diff --git a/.travis.yml b/.travis.yml
index 998a0aa31db24f65d3e26cd4c5d95e5221ed0cea..ac922fd809a8c0a12641b310f6c5ee63e5a955e7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -10,81 +10,95 @@ env:
     TEST_ID=$TRAVIS_JOB_NUMBER
     CONTAINER_ENGINE=docker
     PRIVATE_KEY=$GCE_PRIVATE_KEY
+    GS_ACCESS_KEY_ID=$GS_KEY
+    GS_SECRET_ACCESS_KEY=$GS_SECRET
     ANSIBLE_KEEP_REMOTE_FILES=1
+    CLUSTER_MODE=default
   matrix:
     # Debian Jessie
     - >-
       KUBE_NETWORK_PLUGIN=flannel
       CLOUD_IMAGE=debian-8-kubespray
       CLOUD_REGION=europe-west1-b
+      CLUSTER_MODE=default
     - >-
       KUBE_NETWORK_PLUGIN=calico
       CLOUD_IMAGE=debian-8-kubespray
       CLOUD_REGION=us-central1-c
+      CLUSTER_MODE=default
     - >-
       KUBE_NETWORK_PLUGIN=weave
       CLOUD_IMAGE=debian-8-kubespray
       CLOUD_REGION=us-east1-d
+      CLUSTER_MODE=default
 
     # Centos 7
     - >-
       KUBE_NETWORK_PLUGIN=flannel
       CLOUD_IMAGE=centos-7-sudo
       CLOUD_REGION=asia-east1-c
-
+      CLUSTER_MODE=default
     - >-
       KUBE_NETWORK_PLUGIN=calico
       CLOUD_IMAGE=centos-7-sudo
       CLOUD_REGION=europe-west1-b
-
+      CLUSTER_MODE=default
     - >-
       KUBE_NETWORK_PLUGIN=weave
       CLOUD_IMAGE=centos-7-sudo
       CLOUD_REGION=us-central1-c
+      CLUSTER_MODE=default
 
    # Redhat 7
     - >-
       KUBE_NETWORK_PLUGIN=flannel
       CLOUD_IMAGE=rhel-7-sudo
       CLOUD_REGION=us-east1-d
-
+      CLUSTER_MODE=default
     - >-
       KUBE_NETWORK_PLUGIN=calico
       CLOUD_IMAGE=rhel-7-sudo
       CLOUD_REGION=asia-east1-c
-
+      CLUSTER_MODE=default
     - >-
       KUBE_NETWORK_PLUGIN=weave
       CLOUD_IMAGE=rhel-7-sudo
       CLOUD_REGION=europe-west1-b
+      CLUSTER_MODE=default
 
     # Ubuntu 16.04
     - >-
       KUBE_NETWORK_PLUGIN=flannel
       CLOUD_IMAGE=ubuntu-1604-xenial
       CLOUD_REGION=us-central1-c
+      CLUSTER_MODE=default
     - >-
       KUBE_NETWORK_PLUGIN=calico
       CLOUD_IMAGE=ubuntu-1604-xenial
       CLOUD_REGION=us-east1-d
+      CLUSTER_MODE=default
     - >-
       KUBE_NETWORK_PLUGIN=weave
       CLOUD_IMAGE=ubuntu-1604-xenial
       CLOUD_REGION=asia-east1-c
+      CLUSTER_MODE=default
 
-    # Ubuntu 15.10
+    # Extra cases for separated roles
     - >-
       KUBE_NETWORK_PLUGIN=flannel
-      CLOUD_IMAGE=ubuntu-1510-wily
+      CLOUD_IMAGE=rhel-7-sudo
       CLOUD_REGION=europe-west1-b
+      CLUSTER_MODE=separate
     - >-
       KUBE_NETWORK_PLUGIN=calico
-      CLOUD_IMAGE=ubuntu-1510-wily
+      CLOUD_IMAGE=ubuntu-1604-xenial
       CLOUD_REGION=us-central1-a
+      CLUSTER_MODE=separate
     - >-
       KUBE_NETWORK_PLUGIN=weave
-      CLOUD_IMAGE=ubuntu-1510-wily
+      CLOUD_IMAGE=debian-8-kubespray
       CLOUD_REGION=us-east1-d
+      CLUSTER_MODE=separate
 
 
 before_install:
@@ -92,7 +106,8 @@ before_install:
   - pip install --user boto -U
   - pip install --user ansible
   - pip install --user netaddr
-  - pip install --user apache-libcloud
+  # W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
+  - pip install --user apache-libcloud==0.20.1
 
 cache:
   - directories:
@@ -109,12 +124,11 @@ before_script:
   - $HOME/.local/bin/ansible-playbook --version
   - cp tests/ansible.cfg .
 #  - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}'  $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
-    ## Configure ansible deployment logs to be collected as an artifact. Enable when GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
-#  - $HOME/.local/bin/ansible-playbook -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/configure-logs.yaml
 
 script:
   - >
-    $HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts -c local $LOG_LEVEL
+    $HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
+    -e mode=${CLUSTER_MODE}
     -e test_id=${TEST_ID}
     -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
     -e gce_project_id=${GCE_PROJECT_ID}
@@ -133,8 +147,15 @@ script:
   - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
     ## Ping the between 2 pod
   - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
-    ## Collect env info, enable it once GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
-#  - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/collect-info.yaml
+
+after_failure:
+  - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scripts/collect-info.yaml >/dev/null
+  - >
+    $HOME/.local/bin/ansible-playbook tests/cloud_playbooks/upload-logs-gcs.yml -i "localhost," -c local
+    -e test_id=${TEST_ID}
+    -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
+    -e gs_key=${GS_ACCESS_KEY_ID}
+    -e gs_skey=${GS_SECRET_ACCESS_KEY}
 
 after_script:
   - >
diff --git a/OWNERS b/OWNERS
index 583a0314b3fd9e32c8151809ee19c93978ea098c..6ecbee5c9286c1dd7d674aedf4a624e24faab139 100644
--- a/OWNERS
+++ b/OWNERS
@@ -4,3 +4,6 @@
 owners:
   - Smana
   - ant31
+  - bogdando
+  - mattymo
+  - rsmitty
diff --git a/README.md b/README.md
index 48da75f7ff28a31fb9664b26c137f3c068e4d855..f050bb462394f68402b51e1dc1aaf5071f1d88b2 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@ If you have questions, you can [invite yourself](https://slack.kubespray.io/) to
 
 To deploy the cluster you can use :
 
-[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
+[**kargo-cli**](https://github.com/kubespray/kargo-cli) (deprecated, a newer [go](https://github.com/Smana/kargo-cli/tree/kargogo) version soon)<br>
 **Ansible** usual commands <br>
 **vagrant** by simply running `vagrant up` (for tests purposes) <br>
 
@@ -41,10 +41,10 @@ Supported Linux distributions
 Versions
 --------------
 
-[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.3.0 <br>
+[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.4.3 <br>
 [etcd](https://github.com/coreos/etcd/releases) v3.0.1 <br>
-[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.20.0 <br>
-[flanneld](https://github.com/coreos/flannel/releases) v0.5.5 <br>
+[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
+[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.22.0 <br>
 [weave](http://weave.works/) v1.6.1 <br>
 [docker](https://www.docker.com/) v1.10.3 <br>
 
diff --git a/Vagrantfile b/Vagrantfile
index 44f80db8c4885e523aa6b41ffa8bb299c153287b..73f812bdfe3cd3af336c7345684b47c1517241f1 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -16,7 +16,7 @@ $vm_cpus = 1
 $shared_folders = {}
 $forwarded_ports = {}
 $subnet = "172.17.8"
-$box = "bento/ubuntu-14.04"
+$box = "bento/ubuntu-16.04"
 
 host_vars = {}
 
@@ -38,6 +38,13 @@ if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
   end
 end
 
+if Vagrant.has_plugin?("vagrant-proxyconf")
+    $no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
+    (1..$num_instances).each do |i|
+        $no_proxy += ",#{$subnet}.#{i+100}"
+    end
+end
+
 Vagrant.configure("2") do |config|
   # always use Vagrants insecure key
   config.ssh.insert_key = false
@@ -52,6 +59,12 @@ Vagrant.configure("2") do |config|
     config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
       config.vm.hostname = vm_name
 
+      if Vagrant.has_plugin?("vagrant-proxyconf")
+        config.proxy.http     = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
+        config.proxy.https    = ENV['HTTPS_PROXY'] || ENV['https_proxy'] ||  ""
+        config.proxy.no_proxy = $no_proxy
+      end
+
       if $expose_docker_tcp
         config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
       end
diff --git a/ansible.cfg b/ansible.cfg
index 2be6f4d02155c89fe873d9008b03fa07c074c5e0..f0e4ef6523518dbb1add11e4535fbbcb996e847e 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -1,4 +1,7 @@
 [ssh_connection]
 pipelining=True
-[defaults] 
+[defaults]
 host_key_checking=False
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = /tmp
diff --git a/cluster.yml b/cluster.yml
index 2083823547fe0126e1545326b61a954ee7dc798a..295bb668a6ae554ef274db691d64f7faa5598790 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -10,21 +10,22 @@
 - hosts: all
   gather_facts: true
 
-- hosts: etcd:!k8s-cluster
+- hosts: all
   roles:
     - { role: kubernetes/preinstall, tags: preinstall }
+
+- hosts: etcd:!k8s-cluster
+  roles:
     - { role: etcd, tags: etcd }
 
 - hosts: k8s-cluster
   roles:
-    - { role: kubernetes/preinstall, tags: preinstall }
     - { role: etcd, tags: etcd }
     - { role: kubernetes/node, tags: node }
     - { role: network_plugin, tags: network }
 
 - hosts: kube-master
   roles:
-    - { role: kubernetes/preinstall, tags: preinstall }
     - { role: kubernetes/master, tags: master }
 
 - hosts: k8s-cluster
diff --git a/docs/figures/loadbalancer_localhost.png b/docs/figures/loadbalancer_localhost.png
new file mode 100644
index 0000000000000000000000000000000000000000..0732d5489a919007e65f58f15ac0e181b612e7ee
Binary files /dev/null and b/docs/figures/loadbalancer_localhost.png differ
diff --git a/docs/ha-mode.md b/docs/ha-mode.md
index 587d5ea4620524b9283a9d09ac04d48bb261b1d4..792c18a19fb6f1a4d1b1855c96c3815046ae66ea 100644
--- a/docs/ha-mode.md
+++ b/docs/ha-mode.md
@@ -33,15 +33,29 @@ Kube-apiserver
 --------------
 
 K8s components require a loadbalancer to access the apiservers via a reverse
-proxy. A kube-proxy does not support multiple apiservers for the time being so
+proxy. Kargo includes support for an nginx-based proxy that resides on each
+non-master Kubernetes node. This is referred to as localhost loadbalancing. It
+is less efficient than a dedicated load balancer because it creates extra
+health checks on the Kubernetes apiserver, but is more practical for scenarios
+where an external LB or virtual IP management is inconvenient.
+
+This option is configured by the variable `loadbalancer_apiserver_localhost`.
 you will need to configure your own loadbalancer to achieve HA. Note that
 deploying a loadbalancer is up to a user and is not covered by ansible roles
 in Kargo. By default, it only configures a non-HA endpoint, which points to
 the `access_ip` or IP address of the first server node in the `kube-master`
 group. It can also configure clients to use endpoints for a given loadbalancer
-type.
+type. The following diagram shows how traffic to the apiserver is directed.
+
+![Image](figures/loadbalancer_localhost.png?raw=true)
 
-A loadbalancer (LB) may be an external or internal one. An external LB
+  Note: Kubernetes master nodes still use insecure localhost access because
+  there are bugs in Kubernetes <1.5.0 in using TLS auth on master role
+  services. This makes backends receiving unencrypted traffic and may be a
+  security issue when interconnecting different nodes, or maybe not, if those
+  belong to the isolated management network without external access.
+
+A user may opt to use an external loadbalancer (LB) instead. An external LB
 provides access for external clients, while the internal LB accepts client
 connections only to the localhost, similarly to the etcd-proxy HA endpoints.
 Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
@@ -69,47 +83,18 @@ loadbalancer_apiserver:
 This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
 into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
 the HAProxy service should as well be HA and requires a VIP management, which
-is out of scope of this doc.
-
-The internal LB may be the case if you do not want to operate a VIP management
-HA stack and require no external and no secure access to the K8s API. The group
-var `loadbalancer_apiserver_localhost` (defaults to `false`) controls that
-deployment layout. When enabled, it is expected each node in the `k8s-cluster`
-group to run a loadbalancer that listens the localhost frontend and has all
-of the apiservers as backends. Here is an example configuration for a HAProxy
- service acting as an internal LB:
-
-```
-listen kubernetes-apiserver-http
-  bind localhost:8080
-  mode tcp
-  timeout client 3h
-  timeout server 3h
-  server master1 <IP1>:8080
-  server master2 <IP2>:8080
-  balance leastconn
-```
-
-And the corresponding example global vars config:
-```
-loadbalancer_apiserver_localhost: true
-```
-
-This var overrides an external LB configuration, if any. Note that for this
-example, the `kubernetes-apiserver-http` endpoint has backends receiving
-unencrypted traffic, which may be a security issue when interconnecting
-different nodes, or may be not, if those belong to the isolated management
-network without external access.
+is out of scope of this doc. Specifying an external LB overrides any internal
+localhost LB configuration.
 
-In order to achieve HA for HAProxy instances, those must be running on the
-each node in the `k8s-cluster` group as well, but require no VIP, thus
-no VIP management.
+  Note: In order to achieve HA for HAProxy instances, those must be running on
+  the each node in the `k8s-cluster` group as well, but require no VIP, thus
+  no VIP management.
 
 Access endpoints are evaluated automagically, as the following:
 
 | Endpoint type                | kube-master   | non-master          |
 |------------------------------|---------------|---------------------|
-| Local LB (overrides ext)     | http://lc:p   | http://lc:p         |
+| Local LB                     | http://lc:p   | https://lc:sp       |
 | External LB, no internal     | https://lb:lp | https://lb:lp       |
 | No ext/int LB (default)      | http://lc:p   | https://m[0].aip:sp |
 
diff --git a/docs/roadmap.md b/docs/roadmap.md
index 298750493cb693e9a92148b18de29c7ae332203e..1714ade0579e33a8e6459343d5613fc58e0a4749 100644
--- a/docs/roadmap.md
+++ b/docs/roadmap.md
@@ -1,6 +1,10 @@
 Kargo's roadmap
 =================
 
+### Kubeadm
+- Propose kubeadm as an option in order to setup the kubernetes cluster.
+That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553)
+
 ### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
 - the playbook would install and configure docker/rkt and the etcd cluster
 - the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
@@ -26,13 +30,14 @@ Kargo's roadmap
 - single test with the Ansible version n-1 per day
 - Test idempotency on on single OS but for all network plugins/container engines
 - single test on AWS per day
-- test different achitectures : 
+- test different achitectures :
            - 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
            - 5 instances, 3 are etcd and nodes, 2 are masters only
            - 7 instances, 3 etcd only, 2 masters, 2 nodes
 - test scale up cluster:  +1 etcd, +1 master, +1 node
 
 ### Lifecycle
+- Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
 - Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
 - Drain worker node when shutting down/deleting an instance
 
@@ -56,7 +61,7 @@ While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kub
 ### Kargo API
 - Perform all actions through an **API**
 - Store inventories / configurations of mulltiple clusters
-- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory 
+- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
 
 ### Addons (with kpm)
 Include optionals deployments to init the cluster:
@@ -65,7 +70,7 @@ Include optionals deployments to init the cluster:
 - **Prometheus**
 
 ##### Others
- 
+
 ##### Dashboards:
  - kubernetes-dashboard
  - Fabric8
diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml
index 05a7293a72f6cf247776e6f890f8f54a2f5f21a9..772938a6d14f732dcf79f3e066d9724ff3ca1dfb 100644
--- a/inventory/group_vars/all.yml
+++ b/inventory/group_vars/all.yml
@@ -64,8 +64,9 @@ ndots: 5
 # This may be the case if clients support and loadbalance multiple etcd servers  natively.
 etcd_multiaccess: false
 
-# Assume there are no internal loadbalancers for apiservers exist
-loadbalancer_apiserver_localhost: false
+# Assume there are no internal loadbalancers for apiservers exist and listen on
+# kube_apiserver_port (default 443)
+loadbalancer_apiserver_localhost: true
 
 # Choose network plugin (calico, weave or flannel)
 kube_network_plugin: flannel
@@ -108,9 +109,9 @@ kube_apiserver_insecure_port: 8080 # (http)
 # Do not install additional dnsmasq
 skip_dnsmasq: false
 # Upstream dns servers used by dnsmasq
-upstream_dns_servers:
-  - 8.8.8.8
-  - 8.8.4.4
+#upstream_dns_servers:
+#  - 8.8.8.8
+#  - 8.8.4.4
 #
 # # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
 dns_setup: true
diff --git a/roles/dnsmasq/defaults/main.yml b/roles/dnsmasq/defaults/main.yml
index 48b52c121361508fdc4d019e7a03f69e9e4258d7..89ab02ab887d20f2c06e66198efd31d6f0a88270 100644
--- a/roles/dnsmasq/defaults/main.yml
+++ b/roles/dnsmasq/defaults/main.yml
@@ -10,3 +10,16 @@
 # Max of 2 is allowed here (a 1 is reserved for the dns_server)
 #nameservers:
 #  - 127.0.0.1
+
+# Versions
+dnsmasq_version: 2.72
+
+# Images
+dnsmasq_image_repo: "andyshinn/dnsmasq"
+dnsmasq_image_tag: "{{ dnsmasq_version }}"
+
+# Skip dnsmasq setup
+skip_dnsmasq: false
+
+# Skip setting up dnsmasq daemonset
+skip_dnsmasq_k8s: "{{ skip_dnsmasq }}"
diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml
index 46c1604f6e15913c3a86d934b572a207efa9cd18..6b271a1e2644cc3bc3cbb6bc7d9a16c420c909d0 100644
--- a/roles/dnsmasq/tasks/main.yml
+++ b/roles/dnsmasq/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
 - include: dnsmasq.yml
-  when: "{{ not skip_dnsmasq|bool }}"
+  when: "{{ not skip_dnsmasq_k8s|bool }}"
 
 - include: resolvconf.yml
diff --git a/roles/dnsmasq/templates/01-kube-dns.conf.j2 b/roles/dnsmasq/templates/01-kube-dns.conf.j2
index cad7f8ea3dffb6f875b3eff37d7e1e81833d4166..4d73eebdb99af3ce4ee57b0f2770c0ffb789f524 100644
--- a/roles/dnsmasq/templates/01-kube-dns.conf.j2
+++ b/roles/dnsmasq/templates/01-kube-dns.conf.j2
@@ -13,6 +13,8 @@ server=/{{ dns_domain }}/{{ skydns_server }}
 {% for srv in upstream_dns_servers %}
 server={{ srv }}
 {% endfor %}
+{% elif cloud_provider is defined and cloud_provider == "gce" %}
+server=169.254.169.254
 {% else %}
  server=8.8.8.8
  server=8.8.4.4
diff --git a/roles/dnsmasq/templates/dnsmasq-ds.yml b/roles/dnsmasq/templates/dnsmasq-ds.yml
index f1f622bbd81a8fdc71708bea9425af415724f7a3..49223124e65e65f240287cc1236996154b43f4eb 100644
--- a/roles/dnsmasq/templates/dnsmasq-ds.yml
+++ b/roles/dnsmasq/templates/dnsmasq-ds.yml
@@ -14,7 +14,7 @@ spec:
     spec:
       containers:
         - name: dnsmasq
-          image: andyshinn/dnsmasq:2.72
+          image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}"
           command:
             - dnsmasq
           args:
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index 6f54f33d533ea38f7cde03de02d96a6d3613b7a3..04d76179670a2ef54cf438170167d376f8a52aa7 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -3,6 +3,7 @@
   command: /bin/true
   notify:
     - Docker | reload systemd
+    - Docker | reload docker.socket
     - Docker | reload docker
     - Docker | pause while Docker restarts
     - Docker | wait for docker
@@ -16,6 +17,12 @@
     name: docker
     state: restarted
 
+- name: Docker | reload docker.socket
+  service:
+    name: docker.socket
+    state: restarted
+  when: ansible_os_family == 'CoreOS'
+
 - name: Docker | pause while Docker restarts
   pause: seconds=10 prompt="Waiting for docker restart"
 
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index 51f0b02fd3b34395f6696e9687d59b36ea8105cc..cbe053fa0a4c877af85a2a65f713e0e76ced9b8e 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -5,16 +5,17 @@ local_release_dir: /tmp
 download_run_once: False
 
 # Versions
-include_vars: kube_versions.yml
+kube_version: v1.4.3
 
 etcd_version: v3.0.6
 #TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
 # after migration to container download
-calico_version: v0.20.0
-calico_cni_version: v1.3.1
+calico_version: v0.22.0
+calico_cni_version: v1.4.2
 weave_version: v1.6.1
-flannel_version: 0.5.5
+flannel_version: v0.6.2
 flannel_server_helper_version: 0.1
+pod_infra_version: 3.0
 
 # Download URL's
 etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
@@ -23,8 +24,8 @@ calico_cni_ipam_download_url: "https://storage.googleapis.com/kargo/{{calico_cni
 weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave"
 
 # Checksums
-calico_cni_checksum: "ac05cb9254b5aaa5822cf10325983431bd25489147f2edf9dec7e43d99c43e77"
-calico_cni_ipam_checksum: "3df6951a30749c279229e7e318e74ac4e41263996125be65257db7cd25097273"
+calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548"
+calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172"
 weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580"
 etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
 
@@ -43,6 +44,8 @@ calico_node_image_repo: "calico/node"
 calico_node_image_tag: "{{ calico_version }}"
 hyperkube_image_repo: "quay.io/coreos/hyperkube"
 hyperkube_image_tag: "{{ kube_version }}_coreos.0"
+pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
+pod_infra_image_tag: "{{ pod_infra_version }}"
 
 downloads:
   calico_cni_plugin:
@@ -108,6 +111,10 @@ downloads:
     repo: "{{ calico_node_image_repo }}"
     tag: "{{ calico_node_image_tag }}"
     enabled: "{{ kube_network_plugin == 'calico' }}"
+  pod_infra:
+    container: true
+    repo: "{{ pod_infra_image_repo }}"
+    tag: "{{ pod_infra_image_tag }}"
 
 download:
   container: "{{ file.container|default('false') }}"
diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml
index 528e449b19e9e73e559667ec91a9f2ecf1c667b4..e715f380d339c286b52046d80b256c916b0558f2 100644
--- a/roles/download/tasks/main.yml
+++ b/roles/download/tasks/main.yml
@@ -1,6 +1,4 @@
 ---
-- include_vars: kube_versions.yml
-
 - name: downloading...
   debug:
     msg: "{{ download.url }}"
@@ -63,11 +61,22 @@
 - set_fact:
     fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|regex_replace('/|\0|:', '_')}}.tar"
 
+- name: "Set default value for 'container_changed' to false"
+  set_fact:
+    container_changed: false
+
+- name: "Update the 'container_changed' fact"
+  set_fact:
+    container_changed: "{{ not 'up to date' in pull_task_result.stdout }}"
+  when: "{{ download.enabled|bool and download.container|bool }}"
+  delegate_to: "{{ groups['kube-master'][0] if download_run_once|bool else inventory_hostname }}"
+  run_once: "{{ download_run_once|bool }}"
+
 - name: Download | save container images
   shell: docker save "{{ download.repo }}:{{ download.tag }}" > "{{ fname }}"
   delegate_to: "{{groups['kube-master'][0]}}"
   run_once: true
-  when: ansible_os_family != "CoreOS" and download_run_once|bool and download.enabled|bool and download.container|bool
+  when: ansible_os_family != "CoreOS" and download_run_once|bool and download.enabled|bool and download.container|bool and container_changed|bool
 
 - name: Download | get container images
   synchronize:
@@ -78,8 +87,8 @@
   until: get_task|success
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
-  when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool
+  when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool and container_changed|bool
 
 - name: Download | load container images
   shell: docker load < "{{ fname }}"
-  when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool
+  when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool and container_changed|bool
diff --git a/roles/download/vars/kube_versions.yml b/roles/download/vars/kube_versions.yml
deleted file mode 100644
index 1ea3eb24f0863d7aef07932a53d6baf01e5a501a..0000000000000000000000000000000000000000
--- a/roles/download/vars/kube_versions.yml
+++ /dev/null
@@ -1 +0,0 @@
-kube_version: v1.3.0
diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml
index 56b01da3fdee42061d329cb71f83993efcc0cb83..514a79d731515bb8db0facbf04847969fbb86558 100644
--- a/roles/etcd/tasks/configure.yml
+++ b/roles/etcd/tasks/configure.yml
@@ -1,6 +1,6 @@
 ---
 - name: Configure | Check if member is in cluster
-  shell: "etcdctl --no-sync --peers={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
+  shell: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
   register: etcd_member_in_cluster
   ignore_errors: true
   changed_when: false
@@ -8,7 +8,7 @@
 
 - name: Configure | Add member to the cluster if it is not there
   when: is_etcd_master and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
-  shell: "etcdctl --peers={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
+  shell: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
 
 - name: Configure | Copy etcd.service systemd file
   template:
diff --git a/roles/etcd/tasks/set_cluster_health.yml b/roles/etcd/tasks/set_cluster_health.yml
index be0d938ddbfcd40ecc8d9383c4c1977627b6fc59..1a27e4dcfcd41fde8b40844d63a9846957a97afe 100644
--- a/roles/etcd/tasks/set_cluster_health.yml
+++ b/roles/etcd/tasks/set_cluster_health.yml
@@ -1,6 +1,6 @@
 ---
 - name: Configure | Check if cluster is healthy
-  shell: "etcdctl --peers={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
+  shell: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
   register: etcd_cluster_is_healthy
   ignore_errors: true
   changed_when: false
diff --git a/roles/etcd/templates/etcd-proxy.j2 b/roles/etcd/templates/etcd-proxy.j2
index 90d6f64701c1bd790f37812b3ae60221555b946f..0a1492a379eabb6c7c9ce1fad0a5c5d32b938951 100644
--- a/roles/etcd/templates/etcd-proxy.j2
+++ b/roles/etcd/templates/etcd-proxy.j2
@@ -2,4 +2,4 @@ ETCD_DATA_DIR=/var/lib/etcd-proxy
 ETCD_PROXY=on
 ETCD_LISTEN_CLIENT_URLS={{ etcd_access_endpoint }}
 ETCD_NAME={{ etcd_proxy_member_name | default("etcd-proxy") }}
-ETCD_INITIAL_CLUSTER={% for host in groups['etcd'] %}etcd{{ loop.index|string }}={{ hostvars[host]['etcd_peer_url'] }}{% if not loop.last %},{% endif %}{% endfor %}
+ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
diff --git a/roles/etcd/templates/etcd.j2 b/roles/etcd/templates/etcd.j2
index 1f738593954efe63b1d3764e341820dba69c5329..b82116612ac8e02a76d1c43d2a3e17ed3d482857 100644
--- a/roles/etcd/templates/etcd.j2
+++ b/roles/etcd/templates/etcd.j2
@@ -13,4 +13,4 @@ ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
 ETCD_LISTEN_PEER_URLS=http://{{ etcd_address }}:2380
 ETCD_NAME={{ etcd_member_name }}
 ETCD_PROXY=off
-ETCD_INITIAL_CLUSTER={% for host in groups['etcd'] %}etcd{{ loop.index|string }}={{ hostvars[host]['etcd_peer_url'] }}{% if not loop.last %},{% endif %}{% endfor %}
+ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b1086aa0d04753dbc7db9bdb129df56ee28fe605
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/defaults/main.yml
@@ -0,0 +1,12 @@
+# Versions
+kubedns_version: 1.7
+kubednsmasq_version: 1.3
+exechealthz_version: 1.1
+
+# Images
+kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
+kubedns_image_tag: "{{ kubedns_version }}"
+kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64"
+kubednsmasq_image_tag: "{{ kubednsmasq_version }}"
+exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
+exechealthz_image_tag: "{{ exechealthz_version }}"
\ No newline at end of file
diff --git a/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml b/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f4ac65aeb91844f4cc6cdf35a4c24aed1cdc777c
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml
@@ -0,0 +1,10 @@
+- name: Write calico-policy-controller yaml
+  template: src=calico-policy-controller.yml.j2 dest=/etc/kubernetes/calico-policy-controller.yml
+  when: inventory_hostname == groups['kube-master'][0]
+
+
+- name: Start of Calico policy controller
+  kube:
+    kubectl: "{{bin_dir}}/kubectl"
+    filename: /etc/kubernetes/calico-policy-controller.yml
+  when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/ansible/tasks/main.yaml b/roles/kubernetes-apps/ansible/tasks/main.yaml
index aadd9c1a57854b2bc424defd11705627a7e9b0da..f31eb442bc4fd89e100c5cbb610ad4f7dcd99c55 100644
--- a/roles/kubernetes-apps/ansible/tasks/main.yaml
+++ b/roles/kubernetes-apps/ansible/tasks/main.yaml
@@ -17,3 +17,7 @@
     state: "{{item.changed | ternary('latest','present') }}"
   with_items: "{{ manifests.results }}"
   when: inventory_hostname == groups['kube-master'][0]
+
+
+- include: tasks/calico-policy-controller.yml
+  when: enable_network_policy is defined and enable_network_policy == True
diff --git a/roles/kubernetes-apps/ansible/templates/calico-policy-controller.yml.j2 b/roles/kubernetes-apps/ansible/templates/calico-policy-controller.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..7c0a21cfa4cd66b1bd5abb7e15a4536cdfb4e926
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/calico-policy-controller.yml.j2
@@ -0,0 +1,40 @@
+apiVersion: extensions/v1beta1
+kind: ReplicaSet
+metadata:
+  name: calico-policy-controller
+  namespace: kube-system
+  labels:
+    k8s-app: calico-policy
+    kubernetes.io/cluster-service: "true"
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      kubernetes.io/cluster-service: "true"
+      k8s-app: calico-policy
+  template:
+    metadata:
+      name: calico-policy-controller
+      namespace: kube-system
+      labels:
+        kubernetes.io/cluster-service: "true"
+        k8s-app: calico-policy
+    spec:
+      hostNetwork: true
+      containers:
+        - name: calico-policy-controller
+          image: calico/kube-policy-controller:latest
+          env:
+            - name: ETCD_ENDPOINTS
+              value: "{{ etcd_endpoint }}"
+            # Location of the Kubernetes API - this shouldn't need to be
+            # changed so long as it is used in conjunction with
+            # CONFIGURE_ETC_HOSTS="true".
+            - name: K8S_API
+              value: "https://kubernetes.default:443"
+            # Configure /etc/hosts within the container to resolve
+            # the kubernetes.default Service to the correct clusterIP
+            # using the environment provided by the kubelet.
+            # This removes the need for KubeDNS to resolve the Service.
+            - name: CONFIGURE_ETC_HOSTS
+              value: "true"
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml b/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml
index 3d193d1dc82407440b7eb055e66ebabdc5ccb02f..ed38d671d89f8d32e15ac47bb3bcead6971f3387 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml
@@ -21,7 +21,7 @@ spec:
     spec:
       containers:
       - name: kubedns
-        image: gcr.io/google_containers/kubedns-amd64:1.7
+        image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
         resources:
           # TODO: Set memory limits when we've profiled the container for large
           # clusters, then set request = limit to keep this container in
@@ -63,7 +63,7 @@ spec:
           name: dns-tcp-local
           protocol: TCP
       - name: dnsmasq
-        image: gcr.io/google_containers/kube-dnsmasq-amd64:1.3
+        image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
         args:
         - --log-facility=-
         - --cache-size=1000
@@ -77,7 +77,7 @@ spec:
           name: dns-tcp
           protocol: TCP
       - name: healthz
-        image: gcr.io/google_containers/exechealthz-amd64:1.1
+        image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}"
         resources:
           # keep request = limit to keep this container in guaranteed class
           limits:
diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml
index d0be14d647210ab45b5fc9085bb247c5f7907ab4..ee32ccf57ecb4b35fd877190e16a9c23a1dc31f0 100644
--- a/roles/kubernetes/master/defaults/main.yml
+++ b/roles/kubernetes/master/defaults/main.yml
@@ -10,3 +10,21 @@ kube_users_dir: "{{ kube_config_dir }}/users"
 # An experimental dev/test only dynamic volumes provisioner,
 # for PetSets. Works for kube>=v1.3 only.
 kube_hostpath_dynamic_provisioner: "false"
+
+# This is where you can drop yaml/json files and the kubelet will run those
+# pods on startup
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+
+# This directory is where all the additional config stuff goes
+# the kubernetes normally puts in /srv/kubernets.
+# This puts them in a sane location.
+# Editting this value will almost surely break something. Don't
+# change it. Things like the systemd scripts are hard coded to
+# look in here. Don't do it.
+kube_config_dir: /etc/kubernetes
+
+# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
+kube_apiserver_insecure_bind_address: 127.0.0.1
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
index 56023b997ce3ce543b61eec88ba213290dd84565..4100e8a3414c00bde7ec61d5a55e53b9762ccb2d 100644
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -16,7 +16,7 @@ spec:
     - --etcd-quorum-read=true
     - --insecure-bind-address={{ kube_apiserver_insecure_bind_address }}
     - --apiserver-count={{ kube_apiserver_count }}
-    - --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota
+    - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
     - --service-cluster-ip-range={{ kube_service_addresses }}
     - --client-ca-file={{ kube_cert_dir }}/ca.pem
     - --basic-auth-file={{ kube_users_dir }}/known_users.csv
@@ -30,6 +30,9 @@ spec:
 {%   for conf in kube_api_runtime_config %}
     - --runtime-config={{ conf }}
 {%   endfor %}
+{% endif %}
+{% if enable_network_policy is defined and enable_network_policy == True %}
+    - --runtime-config=extensions/v1beta1/networkpolicies=true
 {% endif %}
     - --v={{ kube_log_level | default('2') }}
     - --allow-privileged=true
diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml
index 2c173837060f85fc8cdb60d7d694ed800d0415b5..8c4ce38a51e82700a78ff6e95db899c5210681e3 100644
--- a/roles/kubernetes/node/defaults/main.yml
+++ b/roles/kubernetes/node/defaults/main.yml
@@ -1,6 +1,13 @@
 # This is where all the cert scripts and certs will be located
 kube_cert_dir: "{{ kube_config_dir }}/ssl"
 
+# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
+kube_apiserver_insecure_bind_address: 127.0.0.1
+
+# This is where you can drop yaml/json files and the kubelet will run those
+# pods on startup
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+
 dns_domain: "{{ cluster_name }}"
 
 # resolv.conf to base dns config
@@ -14,3 +21,17 @@ kube_proxy_masquerade_all: true
 # kube_api_runtime_config:
 #   - extensions/v1beta1/daemonsets=true
 #   - extensions/v1beta1/deployments=true
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+# This directory is where all the additional config stuff goes
+# the kubernetes normally puts in /srv/kubernets.
+# This puts them in a sane location.
+# Editting this value will almost surely break something. Don't
+# change it. Things like the systemd scripts are hard coded to
+# look in here. Don't do it.
+kube_config_dir: /etc/kubernetes
+
+nginx_image_repo: nginx
+nginx_image_tag: 1.11.4-alpine
diff --git a/roles/kubernetes/node/meta/main.yml b/roles/kubernetes/node/meta/main.yml
index b9cbbd9ff59b47645ae69dc46e0db6013de82f84..9c52b2d800dc4bbfa4196f9902385c94d854f812 100644
--- a/roles/kubernetes/node/meta/main.yml
+++ b/roles/kubernetes/node/meta/main.yml
@@ -2,4 +2,6 @@
 dependencies:
   - role: download
     file: "{{ downloads.hyperkube }}"
+  - role: download
+    file: "{{ downloads.pod_infra }}"
   - role: kubernetes/secrets
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 9c14e7a4c9284ed7520ec9494c851ed09d9a5130..a8cb6ce5aad9959adb38d26b009b992c0585036c 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -1,6 +1,9 @@
 ---
 - include: install.yml
 
+- include: nginx-proxy.yml
+  when: is_kube_master == false and loadbalancer_apiserver_localhost|default(false)
+
 - name: Write Calico cni config
   template:
     src: "cni-calico.conf.j2"
diff --git a/roles/kubernetes/node/tasks/nginx-proxy.yml b/roles/kubernetes/node/tasks/nginx-proxy.yml
new file mode 100644
index 0000000000000000000000000000000000000000..056c55a93659e723e381858285702bcfba638844
--- /dev/null
+++ b/roles/kubernetes/node/tasks/nginx-proxy.yml
@@ -0,0 +1,9 @@
+---
+- name: nginx-proxy | Write static pod
+  template: src=manifests/nginx-proxy.manifest.j2 dest=/etc/kubernetes/manifests/nginx-proxy.yml
+
+- name: nginx-proxy | Make nginx directory
+  file: path=/etc/nginx state=directory mode=0700 owner=root
+
+- name: nginx-proxy | Write nginx-proxy configuration
+  template: src=nginx.conf.j2 dest="/etc/nginx/nginx.conf" owner=root mode=0755 backup=yes
diff --git a/roles/kubernetes/node/templates/cni-calico.conf.j2 b/roles/kubernetes/node/templates/cni-calico.conf.j2
index c48b084a53727e44ce8cb37de83dd8de6113bce5..4615cdabddb31ba5aa830a56460336a90fe7c103 100644
--- a/roles/kubernetes/node/templates/cni-calico.conf.j2
+++ b/roles/kubernetes/node/templates/cni-calico.conf.j2
@@ -1,9 +1,16 @@
 {
   "name": "calico-k8s-network",
   "type": "calico",
-  "etcd_authority": "{{ etcd_authority }}",
   "log_level": "info",
   "ipam": {
     "type": "calico-ipam"
+  },
+{% if enable_network_policy is defined and enable_network_policy == True %}
+  "policy": {
+    "type": "k8s"
+  },
+{% endif %}
+  "kubernetes": {
+    "kubeconfig": "{{ kube_config_dir }}/node-kubeconfig.yaml"
   }
 }
diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2
index f55feefa9e1a3cc76c7c7fdd3f344d626b980c76..53f2915d9a4cc5143685b8bc9cf9b0317c25fbb5 100644
--- a/roles/kubernetes/node/templates/kubelet.j2
+++ b/roles/kubernetes/node/templates/kubelet.j2
@@ -20,11 +20,11 @@ KUBELET_REGISTER_NODE="--register-node=false"
 {% endif %}
 # location of the api-server
 {% if dns_setup|bool and skip_dnsmasq|bool %}
-KUBELET_ARGS="--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }}"
+KUBELET_ARGS="--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
 {% elif dns_setup|bool %}
-KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }}"
+KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
 {% else %}
-KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
+KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
 {% endif %}
 {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave"] %}
 KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d"
diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
index f0c4bc211de4b0df7270d5dd085588cad6a5418d..7abffe053526d3f11386402a46d626bc9266841d 100644
--- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
+++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
@@ -17,6 +17,7 @@ spec:
     - --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml
 {% endif %}
     - --bind-address={{ ip | default(ansible_default_ipv4.address) }}
+    - --cluster-cidr={{ kube_pods_subnet }}
     - --proxy-mode={{ kube_proxy_mode }}
 {% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %}
     - --masquerade-all
diff --git a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
new file mode 100644
index 0000000000000000000000000000000000000000..50e054268088fe3b8213b141fde0244a1e0ef331
--- /dev/null
+++ b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: nginx-proxy
+  namespace: kube-system
+spec:
+  hostNetwork: true
+  containers:
+  - name: nginx-proxy
+    image: {{ nginx_image_repo }}:{{ nginx_image_tag }}
+    securityContext:
+      privileged: true
+    volumeMounts:
+    - mountPath: /etc/nginx
+      name: etc-nginx
+      readOnly: true
+  volumes:
+  - name: etc-nginx
+    hostPath:
+      path: /etc/nginx
diff --git a/roles/kubernetes/node/templates/nginx.conf.j2 b/roles/kubernetes/node/templates/nginx.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..edcee08a939f4fe293ecba041f2b82777ab86fbe
--- /dev/null
+++ b/roles/kubernetes/node/templates/nginx.conf.j2
@@ -0,0 +1,26 @@
+error_log stderr notice;
+
+worker_processes auto;
+events {
+  multi_accept on;
+  use epoll;
+  worker_connections 1024;
+}
+
+stream {
+        upstream kube_apiserver {
+            least_conn;
+            {% for host in groups['kube-master'] -%}
+            server {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address'])) }}:{{ kube_apiserver_port }};
+            {% endfor %}
+        }
+
+        server {
+            listen        {{ kube_apiserver_port }};
+            proxy_pass    kube_apiserver;
+            proxy_timeout 3s;
+            proxy_connect_timeout 1s;
+
+        }
+
+}
diff --git a/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 b/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2
index d21b8eef3bd4a1762c2e964e55d5eebf17a490d5..e1593303d1fdd5ddf78e95bf07e450d9e8002b6c 100644
--- a/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2
+++ b/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2
@@ -4,6 +4,7 @@ clusters:
 - name: local
   cluster:
     certificate-authority: {{ kube_cert_dir }}/ca.pem
+    server: {{ kube_apiserver_endpoint }}
 users:
 - name: kubelet
   user:
diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml
index 343ba57077dbac6ace03c286ee3e49746c5c38c8..3eae9757d9d464fdc47f1062a8cacd023398d45f 100644
--- a/roles/kubernetes/preinstall/defaults/main.yml
+++ b/roles/kubernetes/preinstall/defaults/main.yml
@@ -21,6 +21,7 @@ kube_log_dir: "/var/log/kubernetes"
 # pods on startup
 kube_manifest_dir: "{{ kube_config_dir }}/manifests"
 
+epel_rpm_download_url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm"
 
 common_required_pkgs:
   - python-httplib2
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index 8c2aecec5e49f0d49bedafa09f7a3d558e003b2a..49e69a9079dc700feb03be1fdb6b284fdcf780fa 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -91,7 +91,7 @@
   changed_when: False
 
 - name: Install epel-release on RedHat/CentOS
-  shell: rpm -qa | grep epel-release || rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+  shell: rpm -qa | grep epel-release || rpm -ivh {{ epel_rpm_download_url }}
   when: ansible_distribution in ["CentOS","RedHat"] and
         ansible_distribution_major_version >= 7
   changed_when: False
diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml
index e3f4757a764a6629f492a2e51089c09192e93ff2..2dd947dda89dcb8932d26ace7f551aaa56cebefe 100644
--- a/roles/kubernetes/preinstall/tasks/set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/set_facts.yml
@@ -5,12 +5,12 @@
 - set_fact: is_kube_master="{{ inventory_hostname in groups['kube-master'] }}"
 - set_fact: first_kube_master="{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
 - set_fact:
-    kube_apiserver_insecure_bind_address: |-
-      {% if loadbalancer_apiserver_localhost %}{{ kube_apiserver_address }}{% else %}127.0.0.1{% endif %}
+    loadbalancer_apiserver_localhost: false
+    when: loadbalancer_apiserver is defined
 - set_fact:
     kube_apiserver_endpoint: |-
-      {% if loadbalancer_apiserver_localhost -%}
-           http://127.0.0.1:{{ kube_apiserver_insecure_port }}
+      {% if not is_kube_master and loadbalancer_apiserver_localhost -%}
+           https://localhost:{{ kube_apiserver_port }}
       {%- elif is_kube_master and loadbalancer_apiserver is not defined -%}
            http://127.0.0.1:{{ kube_apiserver_insecure_port }}
       {%- else -%}
@@ -30,7 +30,7 @@
 - set_fact:
     etcd_access_addresses: |-
       {% for item in groups['etcd'] -%}
-        http://{{ hostvars[item].etcd_access_address }}:2379{% if not loop.last %},{% endif %}
+        http://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2379{% if not loop.last %},{% endif %}
       {%- endfor %}
 - set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
 - set_fact:
@@ -38,6 +38,11 @@
       {% for host in groups['etcd'] %}
       {%   if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
       {% endfor %}
+- set_fact:
+    etcd_peer_addresses: |-
+      {% for item in groups['etcd'] -%}
+        {{ "etcd"+loop.index|string }}=http://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
+      {%- endfor %}
 - set_fact:
     etcd_proxy_member_name: |-
       {% for host in groups['k8s-cluster'] %}
diff --git a/roles/kubernetes/secrets/defaults/main.yml b/roles/kubernetes/secrets/defaults/main.yml
index a5b88d7ac58a5dfc1a13bc2849fc7c14bfdc4aac..c6011a9bf331e546512bb1ee0d63533bf5020930 100644
--- a/roles/kubernetes/secrets/defaults/main.yml
+++ b/roles/kubernetes/secrets/defaults/main.yml
@@ -6,3 +6,16 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
 
 # This is where to save basic auth file
 kube_users_dir: "{{ kube_config_dir }}/users"
+
+# This directory is where all the additional config stuff goes
+# the kubernetes normally puts in /srv/kubernets.
+# This puts them in a sane location.
+# Editting this value will almost surely break something. Don't
+# change it. Things like the systemd scripts are hard coded to
+# look in here. Don't do it.
+kube_config_dir: /etc/kubernetes
+
+# This directory is where all the additional scripts go
+# that Kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh
index a2f698541e7530ed882a1d5fd5836a8747c205cf..f90fb7e8bffe2d34a01673bb91e3e31d3e1f5266 100755
--- a/roles/kubernetes/secrets/files/make-ssl.sh
+++ b/roles/kubernetes/secrets/files/make-ssl.sh
@@ -26,8 +26,8 @@ Usage : $(basename $0) -f <config> [-d <ssldir>]
       -h | --help         : Show this message
       -f | --config       : Openssl configuration file
       -d | --ssldir       : Directory where the certificates will be installed
-               
-               ex : 
+
+               ex :
                $(basename $0) -f openssl.conf -d /srv/ssl
 EOF
 }
@@ -37,7 +37,7 @@ while (($#)); do
     case "$1" in
         -h | --help)   usage;   exit 0;;
         -f | --config) CONFIG=${2}; shift 2;;
-        -d | --ssldir) SSLDIR="${2}"; shift 2;; 
+        -d | --ssldir) SSLDIR="${2}"; shift 2;;
         *)
             usage
             echo "ERROR : Unknown option"
@@ -68,6 +68,7 @@ openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN
 openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
 openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
 openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
+cat ca.pem >> apiserver.pem
 
 # Nodes and Admin
 for i in node admin; do
diff --git a/roles/kubernetes/secrets/tasks/check-tokens.yml b/roles/kubernetes/secrets/tasks/check-tokens.yml
index 1ecaa70060bcd213e77e69de3dc6088b6deb52c8..14cfbb12439f50e219d55b9563a085c550f50614 100644
--- a/roles/kubernetes/secrets/tasks/check-tokens.yml
+++ b/roles/kubernetes/secrets/tasks/check-tokens.yml
@@ -27,7 +27,7 @@
     sync_tokens: true
   when: >-
       {%- set tokens = {'sync': False} -%}
-      {%- for server in groups['kube-master']
+      {%- for server in groups['kube-master'] | intersect(play_hosts)
          if (not hostvars[server].known_tokens.stat.exists) or
          (hostvars[server].known_tokens.stat.checksum != known_tokens_master.stat.checksum|default('')) -%}
          {%- set _ = tokens.update({'sync': True}) -%}
diff --git a/roles/kubernetes/secrets/tasks/gen_certs.yml b/roles/kubernetes/secrets/tasks/gen_certs.yml
index 7178bce0c9d7fe340e13d4b9861efb34bd1914f5..bec1d9f16165f08203694da752c89fe53cc06fe9 100644
--- a/roles/kubernetes/secrets/tasks/gen_certs.yml
+++ b/roles/kubernetes/secrets/tasks/gen_certs.yml
@@ -27,31 +27,30 @@
     master_certs: ['ca-key.pem', 'admin.pem', 'admin-key.pem', 'apiserver-key.pem', 'apiserver.pem']
     node_certs: ['ca.pem', 'node.pem', 'node-key.pem']
 
-- name: Gen_certs | Get the certs from first master
-  slurp:
-    src: "{{ kube_cert_dir }}/{{ item }}"
+- name: Gen_certs | Gather master certs
+  shell: "tar cfz - -C {{ kube_cert_dir }} {{ master_certs|join(' ') }} {{ node_certs|join(' ') }} | base64 --wrap=0"
+  register: master_cert_data
   delegate_to: "{{groups['kube-master'][0]}}"
-  register: slurp_certs
-  with_items: '{{ master_certs + node_certs }}'
+  run_once: true
   when: sync_certs|default(false)
+
+- name: Gen_certs | Gather node certs
+  shell: "tar cfz - -C {{ kube_cert_dir }} {{ node_certs|join(' ') }} | base64 --wrap=0"
+  register: node_cert_data
+  delegate_to: "{{groups['kube-master'][0]}}"
   run_once: true
-  notify: set secret_changed
+  when: sync_certs|default(false)
 
 - name: Gen_certs | Copy certs on masters
-  copy:
-    content: "{{ item.content|b64decode }}"
-    dest: "{{ item.source }}"
-  with_items: '{{slurp_certs.results}}'
+  shell: "echo '{{master_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ kube_cert_dir }}"
+  changed_when: false
   when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
         inventory_hostname != groups['kube-master'][0]
 
 - name: Gen_certs | Copy certs on nodes
-  copy:
-    content: "{{ item.content|b64decode }}"
-    dest: "{{ item.source }}"
-  with_items: '{{slurp_certs.results}}'
-  when: item.item in node_certs and
-        inventory_hostname in groups['kube-node'] and sync_certs|default(false) and
+  shell: "echo '{{node_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ kube_cert_dir }}"
+  changed_when: false
+  when: inventory_hostname in groups['kube-node'] and  sync_certs|default(false) and
         inventory_hostname != groups['kube-master'][0]
 
 - name: Gen_certs | check certificate permissions
@@ -65,3 +64,30 @@
   shell: chmod 0600 {{ kube_cert_dir}}/*key.pem
   when: inventory_hostname in groups['kube-master']
   changed_when: false
+
+- name: Gen_certs | target ca-certificates directory
+  set_fact:
+    ca_cert_dir: |-
+      {% if ansible_os_family == "Debian" -%}
+      /usr/local/share/ca-certificates
+      {%- elif ansible_os_family == "RedHat" -%}
+      /etc/pki/ca-trust/source/anchors
+      {%- elif ansible_os_family == "CoreOS" -%}
+      /etc/ssl/certs
+      {%- endif %}
+
+- name: Gen_certs | add CA to trusted CA dir
+  copy:
+    src: "{{ kube_cert_dir }}/ca.pem"
+    dest: "{{ ca_cert_dir }}/kube-ca.crt"
+    remote_src: true
+  register: kube_ca_cert
+
+- name: Gen_certs | update ca-certificates (Debian/Ubuntu/CoreOS)
+  command: update-ca-certificates
+  when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS"]
+
+- name: Gen_certs | update ca-certificatesa (RedHat)
+  command: update-ca-trust extract
+  when: kube_ca_cert.changed and ansible_os_family == "RedHat"
+
diff --git a/roles/kubernetes/secrets/tasks/gen_tokens.yml b/roles/kubernetes/secrets/tasks/gen_tokens.yml
index 796657f650a23a9a6ef8a27b98b9e53164590269..dbe35811b7670ae35838406962093aebdbb9bdf6 100644
--- a/roles/kubernetes/secrets/tasks/gen_tokens.yml
+++ b/roles/kubernetes/secrets/tasks/gen_tokens.yml
@@ -43,20 +43,15 @@
   delegate_to: "{{groups['kube-master'][0]}}"
   when: sync_tokens|default(false)
 
-- name: Gen_tokens | Get the tokens from first master
-  slurp:
-    src: "{{ item }}"
-  register: slurp_tokens
-  with_items: '{{tokens_list.stdout_lines}}'
-  run_once: true
+- name: Gen_tokens | Gather tokens
+  shell: "tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0"
+  register: tokens_data
   delegate_to: "{{groups['kube-master'][0]}}"
+  run_once: true
   when: sync_tokens|default(false)
-  notify: set secret_changed
 
 - name: Gen_tokens | Copy tokens on masters
-  copy:
-    content: "{{ item.content|b64decode }}"
-    dest: "{{ item.source }}"
-  with_items: '{{slurp_tokens.results}}'
+  shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /"
+  changed_when: false
   when: inventory_hostname in groups['kube-master'] and sync_tokens|default(false) and
         inventory_hostname != groups['kube-master'][0]
diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2
index fa00163a3a28edd569ce50d43be6b08249500a7c..ac94b6800ca0fbc6c69c7e996a07404a3d903a53 100644
--- a/roles/kubernetes/secrets/templates/openssl.conf.j2
+++ b/roles/kubernetes/secrets/templates/openssl.conf.j2
@@ -11,12 +11,18 @@ DNS.1 = kubernetes
 DNS.2 = kubernetes.default
 DNS.3 = kubernetes.default.svc
 DNS.4 = kubernetes.default.svc.{{ dns_domain }}
+DNS.5 = localhost
+{% for host in groups['kube-master'] %}
+DNS.{{ 5 + loop.index }} = {{ host }}
+{% endfor %}
 {% if loadbalancer_apiserver is defined  and apiserver_loadbalancer_domain_name is defined %}
-DNS.5 = {{ apiserver_loadbalancer_domain_name }}
+{% set idx =  groups['kube-master'] | length | int + 5 %}
+DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
 {% endif %}
 {% for host in groups['kube-master'] %}
 IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
 IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
 {% endfor %}
 {% set idx =  groups['kube-master'] | length | int * 2 + 1 %}
-IP.{{ idx | string }} = {{ kube_apiserver_ip }}
+IP.{{ idx }} = {{ kube_apiserver_ip }}
+IP.{{ idx + 1 }} = 127.0.0.1
diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml
index 45b04c8a9c4dbb3bd68574d23309d90b087f3244..aec7a5e15c78ec3ab7f6aa84f039350299723aa9 100644
--- a/roles/network_plugin/calico/defaults/main.yml
+++ b/roles/network_plugin/calico/defaults/main.yml
@@ -7,4 +7,4 @@ ipip: false
 
 # Set to true if you want your calico cni binaries to overwrite the
 # ones from hyperkube while leaving other cni plugins intact.
-overwrite_hyperkube_cni: false
+overwrite_hyperkube_cni: true
diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml
index 2ce342b20ea560af331075b44e59a49663737d2c..46f7298833b27df0c5b12b8a70cc8c4c86a7d3da 100644
--- a/roles/network_plugin/calico/tasks/main.yml
+++ b/roles/network_plugin/calico/tasks/main.yml
@@ -22,16 +22,6 @@
   changed_when: false
   notify: restart calico-node
 
-- name: Calico | Do not use hyperkube cni if kube_version under v1.3.4
-  set_fact:
-    use_hyperkube_cni: false
-  when: kube_version | version_compare('v1.3.4','<')
-
-- name: Calico | Use hyperkube cni if kube_version above v1.3.4
-  set_fact:
-    use_hyperkube_cni: true
-  when: kube_version | version_compare('v1.3.4','>=')
-
 - name: Calico | Copy cni plugins from hyperkube
   command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -a /opt/cni/bin/ /cnibindir/"
   register: cni_task_result
@@ -39,17 +29,16 @@
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   changed_when: false
-  when: "{{ use_hyperkube_cni|bool }}"
 
 - name: Calico | Install calico cni bin
   command: rsync -pi "{{ local_release_dir }}/calico/bin/calico" "/opt/cni/bin/calico"
   changed_when: false
-  when: "{{ not use_hyperkube_cni|bool or overwrite_hyperkube_cni|bool }}"
+  when: "{{ overwrite_hyperkube_cni|bool }}"
 
 - name: Calico | Install calico-ipam cni bin
   command: rsync -pi "{{ local_release_dir }}/calico/bin/calico-ipam" "/opt/cni/bin/calico-ipam"
   changed_when: false
-  when: "{{ not use_hyperkube_cni|bool or overwrite_hyperkube_cni|bool }}"
+  when: "{{ overwrite_hyperkube_cni|bool }}"
 
 - name: Calico | wait for etcd
   uri: url=http://localhost:2379/health
@@ -90,7 +79,7 @@
   environment:
     NO_DEFAULT_POOLS: true
   run_once: true
-  when: calico_conf.status == 404
+  when: calico_conf.status == 404 or "nodes" not in calico_conf.content
 
 - name: Calico | Get calico configuration from etcd
   uri:
diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml
index 25a9837dbf448b020f9a8a5239fac26509575e92..59cc1bf377f632c1da1de5c7a2476c022dfae6eb 100644
--- a/roles/network_plugin/weave/tasks/main.yml
+++ b/roles/network_plugin/weave/tasks/main.yml
@@ -9,17 +9,6 @@
   notify:
     - restart docker
 
-- name: Weave | Determine hyperkube cni to use depending of the version of kube
-  set_fact:
-    use_hyperkube_cni: >
-      {%- if kube_version | version_compare('v1.3.4','>=') -%}
-        true
-      {%- elif kube_version | version_compare('v1.3.4','<') -%}
-        false
-      {%- else -%}
-        {{ ErrorCannotRecognizeVersion }}
-      {%- endif -%}
-
 - name: Weave | Copy cni plugins from hyperkube
   command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
   register: cni_task_result
@@ -27,7 +16,6 @@
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   changed_when: false
-  when: "{{ use_hyperkube_cni|bool }}"
 
 - name: Weave | Install weave
   command: rsync -piu "{{ local_release_dir }}/weave/bin/weave" "{{ bin_dir }}/weave"
diff --git a/roles/uploads/defaults/main.yml b/roles/uploads/defaults/main.yml
index ad6865da2a1cc9529b7246576ab9c33a4cd6e200..0774d324c1d327fbe8b190feffec5b47b6532e0c 100644
--- a/roles/uploads/defaults/main.yml
+++ b/roles/uploads/defaults/main.yml
@@ -2,11 +2,11 @@
 local_release_dir: /tmp
 
 # Versions
-include_vars: kube_versions.yml
+kube_version: v1.4.3
 
 etcd_version: v3.0.6
-calico_version: v0.20.0
-calico_cni_version: v1.3.1
+calico_version: v0.22.0
+calico_cni_version: v1.4.2
 weave_version: v1.6.1
 
 # Download URL's
@@ -16,8 +16,8 @@ calico_cni_ipam_download_url: "https://github.com/projectcalico/calico-cni/relea
 weave_download_url: "https://github.com/weaveworks/weave/releases/download/{{weave_version}}/weave"
 
 # Checksums
-calico_cni_checksum: "ac05cb9254b5aaa5822cf10325983431bd25489147f2edf9dec7e43d99c43e77"
-calico_cni_ipam_checksum: "3df6951a30749c279229e7e318e74ac4e41263996125be65257db7cd25097273"
+calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548"
+calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172"
 weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580"
 etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
 
diff --git a/roles/uploads/tasks/main.yml b/roles/uploads/tasks/main.yml
index 68fcd432004f15ea7a5d09d38574fc583f133d9b..2d600059940f9649c4dd14748addc16c9022cf5f 100644
--- a/roles/uploads/tasks/main.yml
+++ b/roles/uploads/tasks/main.yml
@@ -1,6 +1,4 @@
 ---
-- include_vars: "kube_versions.yml"
-
 - name: Create dest directories
   file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes
   with_items: '{{downloads}}'
diff --git a/roles/uploads/vars/kube_versions.yml b/roles/uploads/vars/kube_versions.yml
deleted file mode 100644
index 1ea3eb24f0863d7aef07932a53d6baf01e5a501a..0000000000000000000000000000000000000000
--- a/roles/uploads/vars/kube_versions.yml
+++ /dev/null
@@ -1 +0,0 @@
-kube_version: v1.3.0
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index 67d4c8b3540455be4b873a68eb6f35f23217d20a..877b5bf36189e75b46377e746c62726975f638d0 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -6,16 +6,10 @@
   vars:
     debug: false
     commands:
-      - name: git_info
-        cmd: find . -type d -name .git -execdir sh -c 'gen-gitinfos.sh global|head -12' \;
       - name: timedate_info
         cmd: timedatectl status
-      - name: space_info
-        cmd: df -h
       - name: kernel_info
         cmd: uname -r
-      - name: distro_info
-        cmd: cat /etc/issue.net
       - name: docker_info
         cmd: docker info
       - name: ip_info
@@ -24,23 +18,26 @@
         cmd: ip ro
       - name: proc_info
         cmd: ps auxf | grep -v ]$
-      - name: systemctl_info
-        cmd: systemctl status
       - name: systemctl_failed_info
         cmd: systemctl --state=failed --no-pager
       - name: k8s_info
         cmd: kubectl get all --all-namespaces -o wide
       - name: errors_info
         cmd: journalctl -p err --utc --no-pager
+      - name: etcd_info
+        cmd: etcdctl --debug cluster-health
 
     logs:
-      - /var/log/ansible.log
-      - /var/log/ansible/ansible.log
       - /var/log/syslog
       - /var/log/daemon.log
       - /var/log/kern.log
-      - inventory/inventory.ini
-      - cluster.yml
+      - /var/log/dpkg.log
+      - /var/log/apt/history.log
+      - /var/log/yum.log
+      - /var/log/calico/bird/current
+      - /var/log/calico/bird6/current
+      - /var/log/calico/felix/current
+      - /var/log/calico/confd/current
 
   tasks:
     - name: Storing commands output
@@ -50,7 +47,7 @@
       with_items: "{{commands}}"
 
     - debug: var=item
-      with_items: output.results
+      with_items: "{{output.results}}"
       when: debug
 
     - name: Fetch results
diff --git a/scripts/configure-logs.yaml b/scripts/configure-logs.yaml
deleted file mode 100644
index d093e9279983693ea850040ffcd4d39402cc9a74..0000000000000000000000000000000000000000
--- a/scripts/configure-logs.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- hosts: localhost
-  become: true
-  gather_facts: no
-
-  vars:
-    log_path: /var/log/ansible/
-    conf_file: /etc/ansible/ansible.cfg
-    human_readable_plugin: false
-    callback_plugin_path: /usr/share/ansible/plugins/callback
-
-  tasks:
-    - name: LOGS | ensure log path
-      file: path="{{log_path}}" state=directory owner={{ansible_ssh_user}}
-
-    - name: LOGS | ensure plugin path
-      file: path="{{callback_plugin_path}}" state=directory owner={{ansible_ssh_user}}
-      when: human_readable_plugin
-
-    - name: LOGS | get plugin
-      git: repo=https://gist.github.com/cd706de198c85a8255f6.git dest=/tmp/cd706de198c85a8255f6
-      when: human_readable_plugin
-
-    - name: LOGS | install plugin
-      copy: src=/tmp/cd706de198c85a8255f6/human_log.py dest="{{callback_plugin_path}}"
-      when: human_readable_plugin
-
-    - name: LOGS | config
-      lineinfile:
-        line: "log_path={{log_path}}/ansible.log"
-        regexp: "^#log_path|^log_path"
-        dest: "{{conf_file}}"
-
-    - name: LOGS | callback plugin
-      lineinfile:
-        line: "callback_plugins={{callback_plugin_path}}"
-        regexp: "^#callback_plugins|^callback_plugins"
-        dest: "{{conf_file}}"
-      when: human_readable_plugin
diff --git a/tests/ansible.cfg b/tests/ansible.cfg
index 2be6f4d02155c89fe873d9008b03fa07c074c5e0..f0e4ef6523518dbb1add11e4535fbbcb996e847e 100644
--- a/tests/ansible.cfg
+++ b/tests/ansible.cfg
@@ -1,4 +1,7 @@
 [ssh_connection]
 pipelining=True
-[defaults] 
+[defaults]
 host_key_checking=False
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = /tmp
diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml
index 840cf2e7c318a5c57bc9c617ef853fd937ca70e4..b2c3e3020671529ab866ca02fd3f5ddb519345bb 100644
--- a/tests/cloud_playbooks/create-gce.yml
+++ b/tests/cloud_playbooks/create-gce.yml
@@ -1,6 +1,6 @@
 ---
 - hosts: localhost
-  sudo: False
+  become: false
   gather_facts: no
   vars:
     cloud_machine_type: g1-small
diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml
index d42c6cc9116cac91442449f9ac2ec3627f8e50fe..54902fb6f5d7062963ea12945b069cbc9ca3e5c2 100644
--- a/tests/cloud_playbooks/delete-gce.yml
+++ b/tests/cloud_playbooks/delete-gce.yml
@@ -1,6 +1,6 @@
 ---
 - hosts: localhost
-  sudo: False
+  become: false
   gather_facts: no
   vars:
     cloud_machine_type: f1-micro
diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ba31ab56ff1f29586ff3b0d43c77016852399929
--- /dev/null
+++ b/tests/cloud_playbooks/upload-logs-gcs.yml
@@ -0,0 +1,43 @@
+---
+- hosts: localhost
+  become: false
+  gather_facts: no
+
+  vars:
+    expire: 72000
+
+  tasks:
+    - name: replace_test_id
+      set_fact:
+        test_name: "{{ test_id | regex_replace('\\.', '-') }}"
+
+    - name: Create a bucket
+      gc_storage:
+        bucket: "{{ test_name }}"
+        mode: create
+        expiration: "{{ expire }}"
+        permission: private
+        gs_access_key: gs_key
+        gs_secret_key: gs_skey
+
+    - name: Upload collected diagnostic info
+      gc_storage:
+        bucket: "{{ test_name }}"
+        mode: put
+        permission: private
+        expiration: "{{ expire }}"
+        object: "build-{{ test_name }}-{{ kube_network_plugin }}-logs.tar.gz"
+        src: logs.tar.gz
+        gs_access_key: gs_key
+        gs_secret_key: gs_skey
+
+    - name: Get a link
+      gc_storage:
+        bucket: "{{ test_name }}"
+        object: "build-{{ test_name }}-{{ kube_network_plugin }}-logs.tar.gz"
+        mode: get_url
+        register: url
+        gs_access_key: gs_key
+        gs_secret_key: gs_skey
+
+    - debug: msg="Download URL {{get_url}}"
diff --git a/tests/templates/inventory-gce.j2 b/tests/templates/inventory-gce.j2
index 72ad469debf571887bf73c3ecf7fab636db26115..4189107710ef31f06f573ea7ce781ab1d7e9995e 100644
--- a/tests/templates/inventory-gce.j2
+++ b/tests/templates/inventory-gce.j2
@@ -2,6 +2,16 @@ node1 ansible_ssh_host={{gce.instance_data[0].public_ip}}
 node2 ansible_ssh_host={{gce.instance_data[1].public_ip}}
 node3 ansible_ssh_host={{gce.instance_data[2].public_ip}}
 
+{% if mode is defined and mode == "separate" %}
+[kube-master]
+node1
+
+[kube-node]
+node2
+
+[etcd]
+node3
+{% else %}
 [kube-master]
 node1
 node2
@@ -14,6 +24,7 @@ node3
 [etcd]
 node1
 node2
+{% endif %}
 
 [k8s-cluster:children]
 kube-node