diff --git a/README.md b/README.md
index 5a98ccda7603ecdd4911a2005c3cb8a7c605a162..89c4771c7b2d00b12dba1992f7f576ecee5cb3d3 100644
--- a/README.md
+++ b/README.md
@@ -6,17 +6,52 @@ Based on [CiscoCloud](https://github.com/CiscoCloud/kubernetes-ansible) work.
 
 ### Requirements
 Tested on **Debian Jessie** and **Ubuntu** (14.10, 15.04, 15.10).
-The target servers must have access to the Internet in order to pull docker imaqes.
-The firewalls are not managed, you'll need to implement your own rules the way you used to.
+* The target servers must have access to the Internet in order to pull docker imaqes.
+* The firewalls are not managed, you'll need to implement your own rules the way you used to.
+* the following packages are required: openssl, curl, dnsmasq, python-httplib2 on remote servers and python-ipaddr on deployment machine.
 
 Ansible v1.9.x
 
 ### Components
-* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.2
+* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.3
 * [etcd](https://github.com/coreos/etcd/releases) v2.2.2
-* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.11.0
+* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.12.0
 * [flanneld](https://github.com/coreos/flannel/releases) v0.5.5
-* [docker](https://www.docker.com/) v1.8.3
+* [docker](https://www.docker.com/) v1.9.1
+
+Quickstart
+-------------------------
+The following steps will quickly setup a kubernetes cluster with default configuration.
+These defaults are good for tests purposes.
+
+Edit the inventory according to the number of servers
+```
+[downloader]
+10.115.99.1
+
+[kube-master]
+10.115.99.31
+
+[etcd]
+10.115.99.31
+10.115.99.32
+10.115.99.33
+
+[kube-node]
+10.115.99.32
+10.115.99.33
+
+[k8s-cluster:children]
+kube-node
+kube-master
+```
+
+Run the playbook
+```
+ansible-playbook -i environments/test/inventory cluster.yml -u root
+```
+
+You can jump directly to "*Available apps, installation procedure*"
 
 
 Ansible
@@ -24,7 +59,7 @@ Ansible
 ### Download binaries
 A role allows to download required binaries. They will be stored in a directory defined by the variable
 **'local_release_dir'** (by default /tmp).
-Please ensure that you have enough disk space there (about **1G**).
+Please ensure that you have enough disk space there (about **300M**).
 
 **Note**: Whenever you'll need to change the version of a software, you'll have to erase the content of this directory.
 
@@ -44,11 +79,15 @@ In node-mesh mode the nodes peers with all the nodes in order to exchange routes
 
 [kube-master]
 10.99.0.26
+10.99.0.59
 
 [etcd]
 10.99.0.26
+10.99.0.4
+10.99.0.59
 
 [kube-node]
+10.99.0.59
 10.99.0.4
 10.99.0.5
 10.99.0.36
@@ -60,18 +99,13 @@ In node-mesh mode the nodes peers with all the nodes in order to exchange routes
 10.99.0.5 local_as=xxxxxxxx
 
 [usa]
+10.99.0.59 local_as=xxxxxxxx
 10.99.0.36 local_as=xxxxxxxx
 10.99.0.37 local_as=xxxxxxxx
 
 [k8s-cluster:children]
 kube-node
 kube-master
-
-[paris:vars]
-peers=[{"router_id": "10.99.0.2", "as": "65xxx"}, {"router_id": "10.99.0.3", "as": "65xxx"}]
-
-[usa:vars]
-peers=[{"router_id": "10.99.0.34", "as": "65xxx"}, {"router_id": "10.99.0.35", "as": "65xxx"}]
 ```
 
 ### Playbook
@@ -86,16 +120,17 @@ peers=[{"router_id": "10.99.0.34", "as": "65xxx"}, {"router_id": "10.99.0.35", "
   roles:
     - { role: etcd, tags: etcd }
     - { role: docker, tags: docker }
-    - { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
     - { role: dnsmasq, tags: dnsmasq }
+    - { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
+
+- hosts: kube-node
+  roles:
+    - { role: kubernetes/node, tags: node }
 
 - hosts: kube-master
   roles:
     - { role: kubernetes/master, tags: master }
 
-- hosts: kube-node
-  roles:
-    - { role: kubernetes/node, tags: node }
 ```
 
 ### Run
@@ -107,6 +142,17 @@ ansible-playbook -i environments/dev/inventory cluster.yml -u root
 
 Kubernetes
 -------------------------
+### Multi master notes
+* You can choose where to install the master components. If you want your master node to act both as master (api,scheduler,controller) and node (e.g. accept workloads, create pods ...), 
+the server address has to be present on both groups 'kube-master' and 'kube-node'.
+
+* Almost all kubernetes components are running into pods except *kubelet*. These pods are managed by kubelet which ensure they're always running
+
+* One etcd cluster member per node will be configured. For safety reasons, you should have at least two master nodes.
+
+* Kube-proxy doesn't support multiple apiservers on startup ([#18174]('https://github.com/kubernetes/kubernetes/issues/18174')). An external loadbalancer needs to be configured.
+In order to do so, some variables have to be used '**loadbalancer_apiserver**' and '**apiserver_loadbalancer_domain_name**' 
+ 
 
 ### Network Overlay
 You can choose between 2 network plugins. Only one must be chosen.
diff --git a/apps.yml b/apps.yml
index 354983b396472884830d05b61f90b26643152229..ee9d9f36b55d48d14ce2e94399fc4bd3af0fb110 100644
--- a/apps.yml
+++ b/apps.yml
@@ -2,7 +2,7 @@
 - hosts: kube-master
   roles:
     # System
-    - { role: apps/k8s-kubedns, tags: 'kubedns' }
+    - { role: apps/k8s-kubedns, tags: ['kubedns', 'kube-system'] }
 
     # Databases
     - { role: apps/k8s-postgres, tags: 'postgres' }
@@ -14,16 +14,16 @@
     - { role: apps/k8s-rabbitmq, tags: 'rabbitmq' }
 
     # Monitoring
-    - { role: apps/k8s-influxdb, tags: 'influxdb'}
-    - { role: apps/k8s-heapster, tags: 'heapster'}
-    - { role: apps/k8s-kubedash, tags: 'kubedash'}
+    - { role: apps/k8s-influxdb, tags: ['influxdb', 'kube-system']}
+    - { role: apps/k8s-heapster, tags: ['heapster', 'kube-system']}
+    - { role: apps/k8s-kubedash, tags: ['kubedash', 'kube-system']}
 
     # logging
     - { role: apps/k8s-kube-logstash, tags: 'kube-logstash'}
 
     # Console
     - { role: apps/k8s-fabric8, tags: 'fabric8' }
-    - { role: apps/k8s-kube-ui, tags: 'kube-ui' }
+    - { role: apps/k8s-kube-ui, tags: ['kube-ui', 'kube-system']}
 
     # ETCD
-    - { role: apps/k8s-etcd, tags: 'etcd'}
\ No newline at end of file
+    - { role: apps/k8s-etcd, tags: 'etcd'}
diff --git a/cluster.yml b/cluster.yml
index 63ad7de5d15408407590647c75a5caf099369066..ef91f27acc9666a7a5320215a37a0dabb6b1202e 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -8,13 +8,13 @@
   roles:
     - { role: etcd, tags: etcd }
     - { role: docker, tags: docker }
-    - { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
     - { role: dnsmasq, tags: dnsmasq }
-
-- hosts: kube-master
-  roles:
-    - { role: kubernetes/master, tags: master }
+    - { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
 
 - hosts: kube-node
   roles:
     - { role: kubernetes/node, tags: node }
+
+- hosts: kube-master
+  roles:
+    - { role: kubernetes/master, tags: master }
diff --git a/environments/production/group_vars/all.yml b/environments/production/group_vars/all.yml
deleted file mode 100644
index 04ca4c4670b78c47a63b9abedb2dd575ae0398ab..0000000000000000000000000000000000000000
--- a/environments/production/group_vars/all.yml
+++ /dev/null
@@ -1,6 +0,0 @@
- # Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-# Where the binaries will be downloaded.
-# Note: ensure that you've enough disk space (about 1G)
-local_release_dir: "/tmp/releases"
diff --git a/environments/production/group_vars/k8s-cluster.yml b/environments/test/group_vars/all.yml
similarity index 54%
rename from environments/production/group_vars/k8s-cluster.yml
rename to environments/test/group_vars/all.yml
index efaf3cca6bb15b05579a4ecb84f10840676cf596..41c87a57d87851a9b18a36b08e0e786b9bf01e74 100644
--- a/environments/production/group_vars/k8s-cluster.yml
+++ b/environments/test/group_vars/all.yml
@@ -1,25 +1,35 @@
+ # Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
 # Users to create for basic auth in Kubernetes API via HTTP
-# kube_users:
-#   kube:
-#     pass: changeme
-#     role: admin
+kube_users:
+  kube:
+    pass: changeme
+    role: admin
 #   root:
 #     pass: changeme
 #     role: admin
 
 # Kubernetes cluster name, also will be used as DNS domain
-# cluster_name: cluster.local
+cluster_name: cluster.local
 
 # set this variable to calico if needed. keep it empty if flannel is used
-# kube_network_plugin: calico
+kube_network_plugin: calico
 
 # Kubernetes internal network for services, unused block of space.
-# kube_service_addresses: 10.233.0.0/18
+kube_service_addresses: 10.233.0.0/18
 
 # internal network. When used, it will assign IP
 # addresses from this range to individual pods.
 # This network must be unused in your network infrastructure!
-# kube_pods_subnet: 10.233.64.0/18
+kube_pods_subnet: 10.233.64.0/18
 
 # internal network total size (optional). This is the prefix of the
 # entire network. Must be unused in your environment.
@@ -28,16 +38,17 @@
 # internal network node size allocation (optional). This is the size allocated
 # to each node on your network.  With these defaults you should have
 # room for 4096 nodes with 254 pods per node.
-# kube_network_node_prefix: 24
+kube_network_node_prefix: 24
 
 # With calico it is possible to distributed routes with border routers of the datacenter.
-# peer_with_router: false
+peer_with_router: false
 # Warning : enabling router peering will disable calico's default behavior ('node mesh').
 # The subnets of each nodes will be distributed by the datacenter router
 
 # The port the API Server will be listening on.
-# kube_master_port: 443 # (https)
-# kube_master_insecure_port: 8080 # (http)
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
 
 # Internal DNS configuration.
 # Kubernetes can create and mainatain its own DNS server to resolve service names
@@ -48,13 +59,28 @@
 # Kubernetes won't do this for you (yet).
 
 # Upstream dns servers used by dnsmasq
-# upstream_dns_servers:
-#   - 8.8.8.8
-#   - 4.4.8.8
+upstream_dns_servers:
+  - 8.8.8.8
+  - 4.4.8.8
 #
 # # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
-# dns_setup: true
-# dns_domain: "{{ cluster_name }}"
+dns_setup: true
+dns_domain: "{{ cluster_name }}"
 #
 # # Ip address of the kubernetes dns service
-# dns_server: 10.233.0.10
+dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
+
+# For multi masters architecture:
+# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
+# This domain name will be inserted into the /etc/hosts file of all servers
+# configuration example with haproxy :
+# listen kubernetes-apiserver-https
+#   bind 10.99.0.21:8383
+#    option ssl-hello-chk
+#    mode tcp
+#    timeout client 3h
+#    timeout server 3h
+#    server master1 10.99.0.26:443
+#    server master2 10.99.0.27:443
+#    balance roundrobin
+# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
diff --git a/environments/test/group_vars/new-york.yml b/environments/test/group_vars/new-york.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ce9d953963b9f6ee28ab21164ae582b6c3979232
--- /dev/null
+++ b/environments/test/group_vars/new-york.yml
@@ -0,0 +1,10 @@
+#---
+#peers:
+#  -router_id: "10.99.0.34"
+#   as: "65xxx"
+#  - router_id: "10.99.0.35"
+#   as: "65xxx"
+#
+#loadbalancer_apiserver:
+#  address: "10.99.0.44"
+#  port: "8383"
diff --git a/environments/test/group_vars/paris.yml b/environments/test/group_vars/paris.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e8b34ae0d24bf78101a596a8990089b9d80dfcb0
--- /dev/null
+++ b/environments/test/group_vars/paris.yml
@@ -0,0 +1,10 @@
+#---
+#peers:
+#  -router_id: "10.99.0.2"
+#   as: "65xxx"
+#  - router_id: "10.99.0.3"
+#   as: "65xxx"
+#
+#loadbalancer_apiserver:
+#  address: "10.99.0.21"
+#  port: "8383"
diff --git a/environments/test/inventory.example b/environments/test/inventory.example
new file mode 100644
index 0000000000000000000000000000000000000000..a811b084a2fc5066e29bc06632df21fb2d7ad2fe
--- /dev/null
+++ b/environments/test/inventory.example
@@ -0,0 +1,26 @@
+[downloader]
+10.99.0.26
+
+[kube-master]
+10.99.0.26
+10.99.0.27
+
+[kube-node]
+10.99.0.27
+10.99.0.4
+10.99.0.5
+10.99.0.36
+10.99.0.37
+
+[paris]
+10.99.0.26
+10.99.0.4 local_as=xxxxxxxx
+10.99.0.5 local_as=xxxxxxxx
+
+[new-york]
+10.99.0.36 local_as=xxxxxxxx
+10.99.0.37 local_as=xxxxxxxx
+
+[k8s-cluster:children]
+kube-node
+kube-master
diff --git a/roles/apps/k8s-common b/roles/apps/k8s-common
index eaab0692ed375420e183d18392ce79a4c6ed2069..c69c5f881fe414f6856f811b9bb40cd19bcf83f4 160000
--- a/roles/apps/k8s-common
+++ b/roles/apps/k8s-common
@@ -1 +1 @@
-Subproject commit eaab0692ed375420e183d18392ce79a4c6ed2069
+Subproject commit c69c5f881fe414f6856f811b9bb40cd19bcf83f4
diff --git a/roles/apps/k8s-etcd b/roles/apps/k8s-etcd
index e3e574ea25ef4b1db79cc20b6dd31efa8a7d87cb..abd61ee91ae729e7b79ecd56d6bb4eed0ddbe604 160000
--- a/roles/apps/k8s-etcd
+++ b/roles/apps/k8s-etcd
@@ -1 +1 @@
-Subproject commit e3e574ea25ef4b1db79cc20b6dd31efa8a7d87cb
+Subproject commit abd61ee91ae729e7b79ecd56d6bb4eed0ddbe604
diff --git a/roles/apps/k8s-heapster b/roles/apps/k8s-heapster
index dc088e25efcd040e127543b861448aa0d219eac9..44a6519bf8957bff316d3e3bc857d554f69c4016 160000
--- a/roles/apps/k8s-heapster
+++ b/roles/apps/k8s-heapster
@@ -1 +1 @@
-Subproject commit dc088e25efcd040e127543b861448aa0d219eac9
+Subproject commit 44a6519bf8957bff316d3e3bc857d554f69c4016
diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml
index ab534dfb2a5d790fa8bcd886a407a8929e792809..b3585f47a4bf4223aeccf81ab90b0379f804713f 100644
--- a/roles/dnsmasq/tasks/main.yml
+++ b/roles/dnsmasq/tasks/main.yml
@@ -8,6 +8,14 @@
   when: hostvars[item].ansible_default_ipv4.address is defined
   with_items: groups['all']
 
+- name: populate kubernetes loadbalancer address into hosts file
+  lineinfile:
+    dest: /etc/hosts
+    regexp: ".*{{ apiserver_loadbalancer_domain_name }}$"
+    line: "{{ loadbalancer_apiserver.address }} lb-apiserver.kubernetes.local"
+    state: present
+  when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined
+
 - name: clean hosts file
   lineinfile:
     dest: /etc/hosts
@@ -21,16 +29,17 @@
   apt:
     name: "{{ item }}"
     state: present
+    update_cache: yes
   with_items:
     - dnsmasq
     - bind9utils
-  when: inventory_hostname in groups['kube-master'][0]
+  when: inventory_hostname in groups['kube-master']
 
 - name: ensure dnsmasq.d directory exists
   file:
     path: /etc/dnsmasq.d
     state: directory
-  when: inventory_hostname in groups['kube-master'][0]
+  when: inventory_hostname in groups['kube-master']
 
 - name: configure dnsmasq
   template:
@@ -39,14 +48,14 @@
     mode: 755
   notify:
     - restart dnsmasq
-  when: inventory_hostname in groups['kube-master'][0]
+  when: inventory_hostname in groups['kube-master']
 
 - name: enable dnsmasq
   service:
     name: dnsmasq
     state: started
     enabled: yes
-  when: inventory_hostname in groups['kube-master'][0]
+  when: inventory_hostname in groups['kube-master']
 
 - name: update resolv.conf with new DNS setup
   template:
@@ -56,3 +65,5 @@
 
 - name: disable resolv.conf modification by dhclient
   copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=u+x
+
+- meta: flush_handlers
diff --git a/roles/dnsmasq/templates/resolv.conf.j2 b/roles/dnsmasq/templates/resolv.conf.j2
index d10a6fc927b317e4217db0a5004551b6a33837dd..f0b475b02a9b950e57817b3a78292635b5c8a29d 100644
--- a/roles/dnsmasq/templates/resolv.conf.j2
+++ b/roles/dnsmasq/templates/resolv.conf.j2
@@ -1,5 +1,9 @@
 ; generated by ansible
 search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}
+{% if inventory_hostname in groups['kube-master'] %}
+nameserver {{ ansible_default_ipv4.address }}
+{% else %}
 {% for host in groups['kube-master'] %}
 nameserver {{ hostvars[host]['ansible_default_ipv4']['address'] }}
 {% endfor %}
+{% endif %}
diff --git a/roles/docker/tasks/install.yml b/roles/docker/tasks/install.yml
index 4880629a863f160dfb414f24363933dc2387ff0f..473e132fb7032e50108c8966d1cccf553c0fdbbb 100644
--- a/roles/docker/tasks/install.yml
+++ b/roles/docker/tasks/install.yml
@@ -13,7 +13,7 @@
   with_items:
     - aufs-tools
     - cgroupfs-mount
-    - docker-engine=1.8.3-0~{{ ansible_distribution_release }}
+    - docker-engine=1.9.1-0~{{ ansible_distribution_release }}
 
 - name: Copy default docker configuration
   template: src=default-docker.j2 dest=/etc/default/docker backup=yes
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index 9a42c2a115d036dd9e0102ac619663ebde3cb281..a4739f0fc29a283aad55bb4b37ba14a03b4de871 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -1,13 +1,15 @@
 ---
-etcd_download_url: https://github.com/coreos/etcd/releases/download
-flannel_download_url: https://github.com/coreos/flannel/releases/download
-kube_download_url: https://github.com/GoogleCloudPlatform/kubernetes/releases/download
-calico_download_url: https://github.com/Metaswitch/calico-docker/releases/download
-
 etcd_version: v2.2.2
 flannel_version: 0.5.5
 
-kube_version: v1.1.2
-kube_sha1: 69d110d371752c6492d2f8695aa7a47be5b6ed4e
+kube_version: v1.1.3
+kubectl_checksum: "01b9bea18061a27b1cf30e34fd8ab45cfc096c9a9d57d0ed21072abb40dd3d1d"
+kubelet_checksum: "62191c66f2d670dd52ddf1d88ef81048977abf1ffaa95ee6333299447eb6a482"
+
+calico_version: v0.12.0
+
+etcd_download_url: "https://github.com/coreos/etcd/releases/download"
+flannel_download_url: "https://github.com/coreos/flannel/releases/download"
+kube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64"
+calico_download_url: "https://github.com/Metaswitch/calico-docker/releases/download"
 
-calico_version: v0.11.0
diff --git a/roles/download/tasks/kubernetes.yml b/roles/download/tasks/kubernetes.yml
index de6359d147f3b00b07136f066a21e2c18196e001..0985a17d3104ecab6ecefaa63858079218c9933c 100644
--- a/roles/download/tasks/kubernetes.yml
+++ b/roles/download/tasks/kubernetes.yml
@@ -1,47 +1,17 @@
 ---
-- name: Create kubernetes release directory
+- name: Create kubernetes binary directory
   local_action: file
-     path={{ local_release_dir }}/kubernetes
+     path="{{ local_release_dir }}/kubernetes/bin"
      state=directory
+     recurse=yes
 
-- name: Check if kubernetes release archive has been downloaded
-  local_action: stat
-     path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
-  register: k_tar
-
-# issues with get_url module and redirects, to be tested again in the near future
-- name: Download kubernetes
-  local_action: shell
-    curl -o {{ local_release_dir }}/kubernetes/kubernetes.tar.gz -Ls {{ kube_download_url }}/{{ kube_version }}/kubernetes.tar.gz
-  when: not k_tar.stat.exists or k_tar.stat.checksum != "{{ kube_sha1 }}"
-  register: dl_kube
-
-- name: Compare kubernetes archive checksum
-  local_action: stat
-     path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
-  register: k_tar
-  failed_when: k_tar.stat.checksum != "{{ kube_sha1 }}"
-  when: dl_kube|changed
-
-- name: Extract kubernetes archive
-  local_action: unarchive
-     src={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
-     dest={{ local_release_dir }}/kubernetes copy=no
-  when: dl_kube|changed
-
-- name: Extract kubernetes binaries archive
-  local_action: unarchive
-     src={{ local_release_dir }}/kubernetes/kubernetes/server/kubernetes-server-linux-amd64.tar.gz
-     dest={{ local_release_dir }}/kubernetes copy=no
-  when: dl_kube|changed
-
-- name: Pick up only kubernetes binaries
-  local_action: synchronize
-     src={{ local_release_dir }}/kubernetes/kubernetes/server/bin
-     dest={{ local_release_dir }}/kubernetes
-  when: dl_kube|changed
-
-- name: Delete unused kubernetes files
-  local_action: file
-     path={{ local_release_dir }}/kubernetes/kubernetes state=absent
-  when: dl_kube|changed
+- name: Download kubelet and kubectl
+  local_action: get_url
+    url="{{ kube_download_url }}/{{ item.name }}"
+    dest="{{ local_release_dir }}/kubernetes/bin"
+    sha256sum="{{ item.checksum }}"
+  with_items:
+    - name: kubelet
+      checksum: "{{ kubelet_checksum }}"
+    - name: kubectl
+      checksum: "{{ kubectl_checksum }}"
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
index af2442abf5f6b7c1fecb0f3dcad93d65b2fca191..67334a353a685dbab01e6b91a10cddf881186d6d 100644
--- a/roles/etcd/handlers/main.yml
+++ b/roles/etcd/handlers/main.yml
@@ -1,15 +1,14 @@
 ---
-- name: restart daemons
-  command: /bin/true
-  notify:
-    - reload systemd
-    - restart etcd2
-
 - name: reload systemd
   command: systemctl daemon-reload
 
-- name: restart etcd2
-  service: name=etcd2 state=restarted
+- name: restart reloaded-etcd2
+  service:
+    name: etcd2
+    state: restarted
 
-- name: Save iptables rules
-  command: service iptables save
+- name: restart etcd2
+  command: /bin/true
+  notify:
+    - reload systemd
+    - restart reloaded-etcd2
diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml
index aca67018f67b461f99a8889d7a54e33fa169e9a6..5aea90f44ff553ccd054b0ebf918a79d23e2222a 100644
--- a/roles/etcd/tasks/configure.yml
+++ b/roles/etcd/tasks/configure.yml
@@ -1,14 +1,18 @@
 ---
-- name: Disable ferm
-  service: name=ferm state=stopped enabled=no
+- name: Copy etcd2.service systemd file
+  template:
+    src: systemd-etcd2.service.j2
+    dest: /lib/systemd/system/etcd2.service
+    backup: yes
+  notify:
+    - restart etcd2
 
 - name: Create etcd2 environment vars dir
   file: path=/etc/systemd/system/etcd2.service.d state=directory
 
 - name: Write etcd2 config file
-  template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2-cluster.conf backup=yes
+  template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2.conf backup=yes
   notify:
-    - reload systemd
     - restart etcd2
 
 - name: Ensure etcd2 is running
diff --git a/roles/etcd/tasks/install.yml b/roles/etcd/tasks/install.yml
index b500d88edc96f1c6f080f6d74d7fb3706cf0097e..f02dc93db3e8c0ec5cd602f3f7d459c559a1d258 100644
--- a/roles/etcd/tasks/install.yml
+++ b/roles/etcd/tasks/install.yml
@@ -11,15 +11,7 @@
   with_items:
     - etcdctl
     - etcd
-  notify:
-    - restart daemons
+  notify: restart etcd2
 
 - name: Create etcd2 binary symlink
   file: src=/usr/local/bin/etcd dest=/usr/local/bin/etcd2 state=link
-
-- name: Copy etcd2.service systemd file
-  template:
-    src: systemd-etcd2.service.j2
-    dest: /lib/systemd/system/etcd2.service
-    backup: yes
-  notify: restart daemons
diff --git a/roles/etcd/templates/etcd2.j2 b/roles/etcd/templates/etcd2.j2
index 27143e4582e4092cbdc4cd084a14ef1307d8fcc0..a00fb72e299102b42bd78157e3667e4c4a835f53 100644
--- a/roles/etcd/templates/etcd2.j2
+++ b/roles/etcd/templates/etcd2.j2
@@ -1,17 +1,21 @@
 # etcd2.0
 [Service]
-{% if inventory_hostname in groups['kube-master'] %}
-Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{ ansible_default_ipv4.address }}:2379,http://{{ ansible_default_ipv4.address }}:4001"
-Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ ansible_default_ipv4.address }}:2380"
-Environment="ETCD_INITIAL_CLUSTER=master=http://{{ ansible_default_ipv4.address }}:2380"
+{% if inventory_hostname in groups['etcd'] %}
+{% set etcd = {} %}
+{% for srv in groups['etcd'] %}
+{% if inventory_hostname == srv %}
+{% set _dummy = etcd.update({'name':"master"+loop.index|string}) %}
+{% endif %}
+{% endfor %}
+Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379"
+Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address)  }}:2380"
+Environment="ETCD_INITIAL_CLUSTER={% for srv in groups['etcd'] %}master{{ loop.index|string }}=http://{{ srv }}:2380{% if not loop.last %},{% endif %}{% endfor %}"
 Environment="ETCD_INITIAL_CLUSTER_STATE=new"
 Environment="ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd"
-Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
-Environment="ETCD_LISTEN_PEER_URLS=http://:2380,http://{{ ansible_default_ipv4.address }}:7001"
-Environment="ETCD_NAME=master"
-{% else %}
-Environment="ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
-Environment="ETCD_INITIAL_CLUSTER=master=http://{{ groups['kube-master'][0] }}:2380"
-Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
-Environment="ETCD_PROXY=on"
+Environment="ETCD_LISTEN_CLIENT_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address)  }}:2379,http://127.0.0.1:2379"
+Environment="ETCD_LISTEN_PEER_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address)  }}:2380"
+Environment="ETCD_NAME={{ etcd.name }}"
+{% else  %}
+Environment="ETCD_INITIAL_CLUSTER={% for srv in groups['etcd'] %}master{{ loop.index|string }}=http://{{ srv }}:2380{% if not loop.last %},{% endif %}{% endfor %}"
+Environment="ETCD_LISTEN_CLIENT_URLS=http://127.0.0.1:23799"
 {% endif %}
diff --git a/roles/etcd/templates/systemd-etcd2.service.j2 b/roles/etcd/templates/systemd-etcd2.service.j2
index 26cda24ebcbedd4886dfa0b4a0274d818fb0b0a3..84a527d0465e3e0466b3223ae0f84c2cda4cb6fe 100644
--- a/roles/etcd/templates/systemd-etcd2.service.j2
+++ b/roles/etcd/templates/systemd-etcd2.service.j2
@@ -6,7 +6,11 @@ Conflicts=etcd.service
 User=etcd
 Environment=ETCD_DATA_DIR=/var/lib/etcd2
 Environment=ETCD_NAME=%m
+{% if inventory_hostname in groups['etcd'] %}
 ExecStart={{ bin_dir }}/etcd2
+{% else %}
+ExecStart={{ bin_dir }}/etcd2 -proxy on
+{% endif %}
 Restart=always
 RestartSec=10s
 LimitNOFILE=40000
diff --git a/roles/kubernetes/common/files/make-ca-cert.sh b/roles/kubernetes/common/files/make-ca-cert.sh
deleted file mode 100755
index 3950eec91efbbd5ddc7f16dcd2e865f2a0fa4f9a..0000000000000000000000000000000000000000
--- a/roles/kubernetes/common/files/make-ca-cert.sh
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/bin/bash
-
-# Copyright 2014 The Kubernetes Authors All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -o errexit
-set -o nounset
-set -o pipefail
-
-# Caller should set in the ev:
-# MASTER_IP - this may be an ip or things like "_use_gce_external_ip_"
-# DNS_DOMAIN - which will be passed to minions in --cluster_domain
-# SERVICE_CLUSTER_IP_RANGE - where all service IPs are allocated
-# MASTER_NAME - I'm not sure what it is...
-
-# Also the following will be respected
-# CERT_DIR - where to place the finished certs
-# CERT_GROUP - who the group owner of the cert files should be
-
-cert_ip="${MASTER_IP:="${1}"}"
-master_name="${MASTER_NAME:="kubernetes"}"
-service_range="${SERVICE_CLUSTER_IP_RANGE:="10.0.0.0/16"}"
-dns_domain="${DNS_DOMAIN:="cluster.local"}"
-cert_dir="${CERT_DIR:-"/srv/kubernetes"}"
-cert_group="${CERT_GROUP:="kube-cert"}"
-
-# The following certificate pairs are created:
-#
-#  - ca (the cluster's certificate authority)
-#  - server
-#  - kubelet
-#  - kubecfg (for kubectl)
-#
-# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
-# the certs that we need.
-
-# TODO: Add support for discovery on other providers?
-if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
-  cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
-fi
-
-if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
-  cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
-fi
-
-if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
-  cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
-fi
-
-tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
-trap 'rm -rf "${tmpdir}"' EXIT
-cd "${tmpdir}"
-
-# TODO: For now, this is a patched tool that makes subject-alt-name work, when
-# the fix is upstream  move back to the upstream easyrsa.  This is cached in GCS
-# but is originally taken from:
-#   https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
-#
-# To update, do the following:
-# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
-# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
-# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
-#
-# Due to GCS caching of public objects, it may take time for this to be widely
-# distributed.
-
-# Calculate the first ip address in the service range
-octects=($(echo "${service_range}" | sed -e 's|/.*||' -e 's/\./ /g'))
-((octects[3]+=1))
-service_ip=$(echo "${octects[*]}" | sed 's/ /./g')
-
-# Determine appropriete subject alt names
-sans="IP:${cert_ip},IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${dns_domain},DNS:${master_name}"
-
-curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
-tar xzf easy-rsa.tar.gz > /dev/null
-cd easy-rsa-master/easyrsa3
-
-(./easyrsa init-pki > /dev/null 2>&1
- ./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1
- ./easyrsa --subject-alt-name="${sans}" build-server-full "${master_name}" nopass > /dev/null 2>&1
- ./easyrsa build-client-full kubelet nopass > /dev/null 2>&1
- ./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1) || {
- # If there was an error in the subshell, just die.
- # TODO(roberthbailey): add better error handling here
- echo "=== Failed to generate certificates: Aborting ==="
- exit 2
- }
-
-mkdir -p "$cert_dir"
-
-cp -p pki/ca.crt "${cert_dir}/ca.crt"
-cp -p "pki/issued/${master_name}.crt" "${cert_dir}/server.crt" > /dev/null 2>&1
-cp -p "pki/private/${master_name}.key" "${cert_dir}/server.key" > /dev/null 2>&1
-cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
-cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
-cp -p pki/issued/kubelet.crt "${cert_dir}/kubelet.crt"
-cp -p pki/private/kubelet.key "${cert_dir}/kubelet.key"
-
-CERTS=("ca.crt" "server.key" "server.crt" "kubelet.key" "kubelet.crt" "kubecfg.key" "kubecfg.crt")
-for cert in "${CERTS[@]}"; do
-  chgrp "${cert_group}" "${cert_dir}/${cert}"
-  chmod 660 "${cert_dir}/${cert}"
-done
diff --git a/roles/kubernetes/common/meta/main.yml b/roles/kubernetes/common/meta/main.yml
deleted file mode 100644
index 87756afe15709041c425622d9e864b3fa1db17c3..0000000000000000000000000000000000000000
--- a/roles/kubernetes/common/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
-  - { role: etcd }
diff --git a/roles/kubernetes/common/tasks/gen_certs.yml b/roles/kubernetes/common/tasks/gen_certs.yml
deleted file mode 100644
index 74fd4458c991dfd5c51ce09984daff6c5748f087..0000000000000000000000000000000000000000
--- a/roles/kubernetes/common/tasks/gen_certs.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-#- name: Get create ca cert script from Kubernetes
-#  get_url:
-#    url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
-#    dest={{ kube_script_dir }}/make-ca-cert.sh mode=0500
-#    force=yes
-
-- name: certs | install cert generation script
-  copy:
-    src=make-ca-cert.sh
-    dest={{ kube_script_dir }}
-    mode=0500
-  changed_when: false
-
-# FIXME This only generates a cert for one master...
-- name: certs | run cert generation script
-  command:
-    "{{ kube_script_dir }}/make-ca-cert.sh {{ inventory_hostname }}"
-  args:
-    creates: "{{ kube_cert_dir }}/server.crt"
-  environment:
-    MASTER_IP: "{{ hostvars[inventory_hostname]['ip'] | default(hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}"
-    MASTER_NAME: "{{ inventory_hostname }}"
-    DNS_DOMAIN: "{{ dns_domain }}"
-    SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}"
-    CERT_DIR: "{{ kube_cert_dir }}"
-    CERT_GROUP: "{{ kube_cert_group }}"
-
-- name: certs | check certificate permissions
-  file:
-    path={{ item }}
-    group={{ kube_cert_group }}
-    owner=kube
-    mode=0440
-  with_items:
-    - "{{ kube_cert_dir }}/ca.crt"
-    - "{{ kube_cert_dir }}/server.crt"
-    - "{{ kube_cert_dir }}/server.key"
-    - "{{ kube_cert_dir }}/kubecfg.crt"
-    - "{{ kube_cert_dir }}/kubecfg.key"
-    - "{{ kube_cert_dir }}/kubelet.crt"
-    - "{{ kube_cert_dir }}/kubelet.key"
diff --git a/roles/kubernetes/common/tasks/main.yml b/roles/kubernetes/common/tasks/main.yml
deleted file mode 100644
index 76d3bbc805ce35bc9476a0342572c16a14c50e8f..0000000000000000000000000000000000000000
--- a/roles/kubernetes/common/tasks/main.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- name: define alias command for kubectl all
-  lineinfile:
-    dest=/etc/bash.bashrc
-    line="alias kball='{{ bin_dir }}/kubectl --all-namespaces -o wide'"
-    regexp='^alias kball=.*$'
-    state=present
-    insertafter=EOF
-    create=True
-
-- name: create kubernetes config directory
-  file: path={{ kube_config_dir }} state=directory
-
-- name: create kubernetes script directory
-  file: path={{ kube_script_dir }} state=directory
-
-- name: Make sure manifest directory exists
-  file: path={{ kube_manifest_dir }} state=directory
-
-- name: write the global config file
-  template:
-    src: config.j2
-    dest: "{{ kube_config_dir }}/config"
-  notify:
-    - restart daemons
-
-- include: secrets.yml
-  tags:
-    - secrets
diff --git a/roles/kubernetes/master/files/kubectl_bash_completion.sh b/roles/kubernetes/master/files/kubectl_bash_completion.sh
index 8997705307150b07056f55700260655d2c35f9e4..f6d0f25b51bc2e5ffc2d3ed312c21e1158704484 100644
--- a/roles/kubernetes/master/files/kubectl_bash_completion.sh
+++ b/roles/kubernetes/master/files/kubectl_bash_completion.sh
@@ -41,7 +41,9 @@ __handle_reply()
     __debug "${FUNCNAME}"
     case $cur in
         -*)
-            compopt -o nospace
+            if [[ $(type -t compopt) = "builtin" ]]; then
+                compopt -o nospace
+            fi
             local allflags
             if [ ${#must_have_one_flag[@]} -ne 0 ]; then
                 allflags=("${must_have_one_flag[@]}")
@@ -49,7 +51,9 @@ __handle_reply()
                 allflags=("${flags[*]} ${two_word_flags[*]}")
             fi
             COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") )
-            [[ $COMPREPLY == *= ]] || compopt +o nospace
+            if [[ $(type -t compopt) = "builtin" ]]; then
+                [[ $COMPREPLY == *= ]] || compopt +o nospace
+            fi
             return 0;
             ;;
     esac
@@ -156,11 +160,11 @@ __handle_word()
 {
     if [[ $c -ge $cword ]]; then
         __handle_reply
-	return
+        return
     fi
     __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}"
     if [[ "${words[c]}" == -* ]]; then
-	__handle_flag
+        __handle_flag
     elif __contains_word "${words[c]}" "${commands[@]}"; then
         __handle_command
     else
@@ -283,6 +287,30 @@ _kubectl_get()
     flags+=("--watch")
     flags+=("-w")
     flags+=("--watch-only")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -327,6 +355,30 @@ _kubectl_describe()
     flags_completion+=("__handle_filename_extension_flag json|yaml|yml")
     flags+=("--selector=")
     two_word_flags+=("-l")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -370,6 +422,30 @@ _kubectl_create()
     flags+=("--save-config")
     flags+=("--schema-cache-dir=")
     flags+=("--validate")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_flag+=("--filename=")
@@ -402,6 +478,30 @@ _kubectl_replace()
     flags+=("--schema-cache-dir=")
     flags+=("--timeout=")
     flags+=("--validate")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_flag+=("--filename=")
@@ -429,6 +529,30 @@ _kubectl_patch()
     two_word_flags+=("-o")
     flags+=("--patch=")
     two_word_flags+=("-p")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_flag+=("--patch=")
@@ -461,6 +585,30 @@ _kubectl_delete()
     flags+=("--selector=")
     two_word_flags+=("-l")
     flags+=("--timeout=")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -508,6 +656,30 @@ _kubectl_edit()
     flags+=("--output-version=")
     flags+=("--save-config")
     flags+=("--windows-line-endings")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -533,6 +705,30 @@ _kubectl_apply()
     two_word_flags+=("-o")
     flags+=("--schema-cache-dir=")
     flags+=("--validate")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_flag+=("--filename=")
@@ -550,6 +746,30 @@ _kubectl_namespace()
     flags_with_completion=()
     flags_completion=()
 
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -577,6 +797,30 @@ _kubectl_logs()
     flags+=("--since-time=")
     flags+=("--tail=")
     flags+=("--timestamps")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -592,6 +836,7 @@ _kubectl_rolling-update()
     flags_with_completion=()
     flags_completion=()
 
+    flags+=("--container=")
     flags+=("--deployment-label-key=")
     flags+=("--dry-run")
     flags+=("--filename=")
@@ -616,18 +861,42 @@ _kubectl_rolling-update()
     flags+=("--timeout=")
     flags+=("--update-period=")
     flags+=("--validate")
-
-    must_have_one_flag=()
-    must_have_one_flag+=("--filename=")
-    must_have_one_flag+=("-f")
-    must_have_one_flag+=("--image=")
-    must_have_one_noun=()
-}
-
-_kubectl_scale()
-{
-    last_command="kubectl_scale"
-    commands=()
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
+
+    must_have_one_flag=()
+    must_have_one_flag+=("--filename=")
+    must_have_one_flag+=("-f")
+    must_have_one_flag+=("--image=")
+    must_have_one_noun=()
+}
+
+_kubectl_scale()
+{
+    last_command="kubectl_scale"
+    commands=()
 
     flags=()
     two_word_flags=()
@@ -646,6 +915,30 @@ _kubectl_scale()
     flags+=("--replicas=")
     flags+=("--resource-version=")
     flags+=("--timeout=")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_flag+=("--replicas=")
@@ -668,6 +961,30 @@ _kubectl_attach()
     flags+=("-i")
     flags+=("--tty")
     flags+=("-t")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -691,6 +1008,30 @@ _kubectl_exec()
     flags+=("-i")
     flags+=("--tty")
     flags+=("-t")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -708,6 +1049,30 @@ _kubectl_port-forward()
 
     flags+=("--pod=")
     two_word_flags+=("-p")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -738,6 +1103,30 @@ _kubectl_proxy()
     two_word_flags+=("-w")
     flags+=("--www-prefix=")
     two_word_flags+=("-P")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -786,6 +1175,30 @@ _kubectl_run()
     flags+=("--template=")
     two_word_flags+=("-t")
     flags+=("--tty")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_flag+=("--image=")
@@ -834,6 +1247,30 @@ _kubectl_expose()
     flags+=("--template=")
     two_word_flags+=("-t")
     flags+=("--type=")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -871,6 +1308,30 @@ _kubectl_autoscale()
     flags+=("--sort-by=")
     flags+=("--template=")
     two_word_flags+=("-t")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_flag+=("--max=")
@@ -908,6 +1369,30 @@ _kubectl_label()
     flags+=("--sort-by=")
     flags+=("--template=")
     two_word_flags+=("-t")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -964,10 +1449,34 @@ _kubectl_annotate()
     flags+=("--sort-by=")
     flags+=("--template=")
     two_word_flags+=("-t")
-
-    must_have_one_flag=()
-    must_have_one_noun=()
-}
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
+
+    must_have_one_flag=()
+    must_have_one_noun=()
+}
 
 _kubectl_config_view()
 {
@@ -992,6 +1501,30 @@ _kubectl_config_view()
     flags+=("--sort-by=")
     flags+=("--template=")
     two_word_flags+=("-t")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -1012,6 +1545,25 @@ _kubectl_config_set-cluster()
     flags+=("--embed-certs")
     flags+=("--insecure-skip-tls-verify")
     flags+=("--server=")
+    flags+=("--alsologtostderr")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -1033,6 +1585,25 @@ _kubectl_config_set-credentials()
     flags+=("--password=")
     flags+=("--token=")
     flags+=("--username=")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--user=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -1051,6 +1622,27 @@ _kubectl_config_set-context()
     flags+=("--cluster=")
     flags+=("--namespace=")
     flags+=("--user=")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -1066,6 +1658,30 @@ _kubectl_config_set()
     flags_with_completion=()
     flags_completion=()
 
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -1081,6 +1697,30 @@ _kubectl_config_unset()
     flags_with_completion=()
     flags_completion=()
 
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -1096,6 +1736,30 @@ _kubectl_config_use-context()
     flags_with_completion=()
     flags_completion=()
 
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -1119,6 +1783,29 @@ _kubectl_config()
     flags_completion=()
 
     flags+=("--kubeconfig=")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -1134,6 +1821,30 @@ _kubectl_cluster-info()
     flags_with_completion=()
     flags_completion=()
 
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -1149,6 +1860,30 @@ _kubectl_api-versions()
     flags_with_completion=()
     flags_completion=()
 
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -1166,6 +1901,30 @@ _kubectl_version()
 
     flags+=("--client")
     flags+=("-c")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -1182,6 +1941,30 @@ _kubectl_explain()
     flags_completion=()
 
     flags+=("--recursive")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_noun=()
@@ -1215,6 +1998,30 @@ _kubectl_convert()
     flags+=("--template=")
     two_word_flags+=("-t")
     flags+=("--validate")
+    flags+=("--alsologtostderr")
+    flags+=("--api-version=")
+    flags+=("--certificate-authority=")
+    flags+=("--client-certificate=")
+    flags+=("--client-key=")
+    flags+=("--cluster=")
+    flags+=("--context=")
+    flags+=("--insecure-skip-tls-verify")
+    flags+=("--kubeconfig=")
+    flags+=("--log-backtrace-at=")
+    flags+=("--log-dir=")
+    flags+=("--log-flush-frequency=")
+    flags+=("--logtostderr")
+    flags+=("--match-server-version")
+    flags+=("--namespace=")
+    flags+=("--password=")
+    flags+=("--server=")
+    two_word_flags+=("-s")
+    flags+=("--stderrthreshold=")
+    flags+=("--token=")
+    flags+=("--user=")
+    flags+=("--username=")
+    flags+=("--v=")
+    flags+=("--vmodule=")
 
     must_have_one_flag=()
     must_have_one_flag+=("--filename=")
@@ -1291,7 +2098,7 @@ _kubectl()
 __start_kubectl()
 {
     local cur prev words cword
-    if declare -F _init_completions >/dev/null 2>&1; then
+    if declare -F _init_completion >/dev/null 2>&1; then
         _init_completion -s || return
     else
         __my_init_completion || return
@@ -1311,5 +2118,10 @@ __start_kubectl()
     __handle_word
 }
 
-complete -F __start_kubectl kubectl
+if [[ $(type -t compopt) = "builtin" ]]; then
+    complete -F __start_kubectl kubectl
+else
+    complete -o nospace -F __start_kubectl kubectl
+fi
+
 # ex: ts=4 sw=4 et filetype=sh
diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml
index 4e7644b3297b9d52c38e88a73dc67691fffa6eeb..8b00d16892fb9bb300986d7f0842f3c8dfdf8c1a 100644
--- a/roles/kubernetes/master/handlers/main.yml
+++ b/roles/kubernetes/master/handlers/main.yml
@@ -1,47 +1,16 @@
 ---
-- name: restart daemons
-  command: /bin/true
-  notify:
-    - reload systemd
-    - restart reloaded-scheduler
-    - restart reloaded-controller-manager
-    - restart reloaded-apiserver
-    - restart reloaded-proxy
-
 - name: reload systemd
   command: systemctl daemon-reload
 
-- name: restart apiserver
-  command: /bin/true
-  notify:
-    - reload systemd
-    - restart reloaded-apiserver
-
-- name: restart reloaded-apiserver
-  service:
-    name: kube-apiserver
-    state: restarted
-
-- name: restart controller-manager
-  command: /bin/true
-  notify:
-    - reload systemd
-    - restart reloaded-controller-manager
-
-- name: restart reloaded-controller-manager
-  service:
-    name: kube-controller-manager
-    state: restarted
-
-- name: restart scheduler
+- name: restart kubelet
   command: /bin/true
   notify:
     - reload systemd
-    - restart reloaded-scheduler
+    - restart reloaded-kubelet
 
-- name: restart reloaded-scheduler
+- name: restart reloaded-kubelet
   service:
-    name: kube-scheduler
+    name: kubelet
     state: restarted
 
 - name: restart proxy
diff --git a/roles/kubernetes/master/meta/main.yml b/roles/kubernetes/master/meta/main.yml
index 31675692c658f3e11bc3b05d06503744c6a47cd1..53dd0401790ba04ab2aab531ac3023b9aa3749df 100644
--- a/roles/kubernetes/master/meta/main.yml
+++ b/roles/kubernetes/master/meta/main.yml
@@ -1,3 +1,4 @@
 ---
 dependencies:
-  - { role: kubernetes/common }
+  - { role: etcd }
+  - { role: kubernetes/node }
diff --git a/roles/kubernetes/master/tasks/config.yml b/roles/kubernetes/master/tasks/config.yml
deleted file mode 100644
index 2f488a921793d518f5d7f0d333026b9589aa8c5c..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/tasks/config.yml
+++ /dev/null
@@ -1,94 +0,0 @@
----
-- name: get the node token values from token files
-  slurp:
-    src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
-  with_items:
-    - "system:controller_manager"
-    - "system:scheduler"
-    - "system:kubectl"
-    - "system:proxy"
-  register: tokens
-  delegate_to: "{{ groups['kube-master'][0] }}"
-
-- name: Set token facts
-  set_fact:
-    controller_manager_token: "{{ tokens.results[0].content|b64decode }}"
-    scheduler_token: "{{ tokens.results[1].content|b64decode }}"
-    kubectl_token: "{{ tokens.results[2].content|b64decode }}"
-    proxy_token: "{{ tokens.results[3].content|b64decode }}"
-
-- name: write the config files for api server
-  template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver backup=yes
-  notify:
-    - restart apiserver
-
-- name: write config file for controller-manager
-  template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager backup=yes
-  notify:
-    - restart controller-manager
-
-- name: write the kubecfg (auth) file for controller-manager
-  template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig backup=yes
-  notify:
-    - restart controller-manager
-
-- name: write the config file for scheduler
-  template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler backup=yes
-  notify:
-    - restart scheduler
-
-- name: write the kubecfg (auth) file for scheduler
-  template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig backup=yes
-  notify:
-    - restart scheduler
-
-- name: write the kubecfg (auth) file for kubectl
-  template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig backup=yes
-
-- name: Copy kubectl bash completion
-  copy: src=kubectl_bash_completion.sh dest=/etc/bash_completion.d/kubectl.sh
-
-- name: Create proxy environment vars dir
-  file: path=/etc/systemd/system/kube-proxy.service.d state=directory
-
-- name: Write proxy config file
-  template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf backup=yes
-  notify:
-    - restart proxy
-
-- name: write the kubecfg (auth) file for proxy
-  template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig backup=yes
-
-- name: populate users for basic auth in API
-  lineinfile:
-    dest: "{{ kube_users_dir }}/known_users.csv"
-    create: yes
-    line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
-    backup: yes
-  with_dict: "{{ kube_users }}"
-  notify:
-    - restart apiserver
-
-- name: Enable controller-manager
-  service:
-    name: kube-controller-manager
-    enabled: yes
-    state: started
-
-- name: Enable scheduler
-  service:
-    name: kube-scheduler
-    enabled: yes
-    state: started
-
-- name: Enable kube-proxy
-  service:
-    name: kube-proxy
-    enabled: yes
-    state: started
-
-- name: Enable apiserver
-  service:
-    name: kube-apiserver
-    enabled: yes
-    state: started
diff --git a/roles/kubernetes/master/tasks/install.yml b/roles/kubernetes/master/tasks/install.yml
deleted file mode 100644
index 92d1945154c6ddc3447d834154abd7066fc60353..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/tasks/install.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- name: Write kube-apiserver systemd init file
-  template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service backup=yes
-  notify: restart apiserver
-
-- name: Write kube-controller-manager systemd init file
-  template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service backup=yes
-  notify: restart controller-manager
-
-- name: Write kube-scheduler systemd init file
-  template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service backup=yes
-  notify: restart scheduler
-
-- name: Write kube-proxy systemd init file
-  template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service backup=yes
-  notify: restart proxy
-
-- name: Install kubernetes binaries
-  copy:
-     src={{ local_release_dir }}/kubernetes/bin/{{ item }}
-     dest={{ bin_dir }}
-     owner=kube
-     mode=u+x
-  with_items:
-    - kube-apiserver
-    - kube-controller-manager
-    - kube-scheduler
-    - kube-proxy
-    - kubectl
-  notify:
-    - restart daemons
-
-- name: Allow apiserver to bind on both secure and insecure ports
-  shell: setcap cap_net_bind_service+ep {{ bin_dir }}/kube-apiserver
diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml
index 8570db68c389b54cc241c45737f459cb88b8ee7d..6426a0e0de69b7ee7d86f9ffe98afbf1c51d0b97 100644
--- a/roles/kubernetes/master/tasks/main.yml
+++ b/roles/kubernetes/master/tasks/main.yml
@@ -1,3 +1,82 @@
 ---
-- include: install.yml
-- include: config.yml
+- name: Install kubectl binary
+  copy:
+     src={{ local_release_dir }}/kubernetes/bin/kubectl
+     dest={{ bin_dir }}
+     owner=kube
+     mode=u+x
+  notify:
+    - restart daemons
+
+- name: Copy kubectl bash completion
+  copy:
+    src: kubectl_bash_completion.sh
+    dest: /etc/bash_completion.d/kubectl.sh
+
+- name: populate users for basic auth in API
+  lineinfile:
+    dest: "{{ kube_users_dir }}/known_users.csv"
+    create: yes
+    line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
+    backup: yes
+  with_dict: "{{ kube_users }}"
+
+# Sync masters
+- name: synchronize auth directories for masters
+  synchronize:
+    src: "{{ item }}"
+    dest: "{{ kube_config_dir }}"
+    recursive: yes
+    delete: yes
+    rsync_opts: [ '--one-file-system']
+  with_items:
+    - "{{ kube_token_dir }}"
+    - "{{ kube_cert_dir }}"
+    - "{{ kube_users_dir }}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+# Write manifests
+- name: Write kube-apiserver manifest
+  template:
+    src: manifests/kube-apiserver.manifest.j2
+    dest: "{{ kube_manifest_dir }}/kube-apisever.manifest"
+  notify:
+    - restart kubelet
+
+- meta: flush_handlers
+
+- name: wait for the apiserver to be running (pulling image and running container)
+  wait_for:
+    port: "{{kube_apiserver_insecure_port}}"
+    delay: 10
+
+- name: install required python module 'httplib2'
+  apt:
+    name: "python-httplib2"
+    state: present
+  when: inventory_hostname == groups['kube-master'][0]
+
+- name: Create 'kube-system' namespace
+  uri:
+    url: http://{{ groups['kube-master'][0]}}:{{ kube_apiserver_insecure_port }}/api/v1/namespaces
+    method: POST
+    body: '{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"kube-system"}}'
+    status_code: 201,409
+    body_format: json
+  run_once: yes
+  when: inventory_hostname == groups['kube-master'][0]
+
+- name: Write kube-controller-manager manifest
+  template:
+    src: manifests/kube-controller-manager.manifest.j2
+    dest: "{{ kube_config_dir }}/kube-controller-manager.manifest"
+
+- name: Write kube-scheduler manifest
+  template:
+    src: manifests/kube-scheduler.manifest.j2
+    dest: "{{ kube_config_dir }}/kube-scheduler.manifest"
+
+- name: Write podmaster manifest
+  template:
+    src: manifests/kube-podmaster.manifest.j2
+    dest: "{{ kube_manifest_dir }}/kube-podmaster.manifest"
diff --git a/roles/kubernetes/master/templates/apiserver.j2 b/roles/kubernetes/master/templates/apiserver.j2
deleted file mode 100644
index 0a38d5c87cafb40f376cffa95243e9e509ddfc4d..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/templates/apiserver.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-###
-# kubernetes system config
-#
-# The following values are used to configure the kube-apiserver
-#
-
-# The address on the local server to listen to.
-KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
-
-# The port on the local server to listen on.
-KUBE_API_PORT="--insecure-port={{kube_master_insecure_port}} --secure-port={{ kube_master_port }}"
-
-# KUBELET_PORT="--kubelet_port=10250"
-
-# Address range to use for services
-KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
-
-# Location of the etcd cluster
-KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
-
-# default admission control policies
-KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
-
-# RUNTIME API CONFIGURATION (e.g. enable extensions)
-KUBE_RUNTIME_CONFIG="{% if kube_api_runtime_config is defined %}{% for conf in kube_api_runtime_config %}--runtime-config={{ conf }} {% endfor %}{% endif %}"
-
-# Add you own!
-KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt"
diff --git a/roles/kubernetes/master/templates/controller-manager.j2 b/roles/kubernetes/master/templates/controller-manager.j2
deleted file mode 100644
index c7a932900f55ba44c46a823f8ce9a9b858e7c684..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/templates/controller-manager.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-###
-# The following values are used to configure the kubernetes controller-manager
-
-# defaults from config and apiserver should be adequate
-
-KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig --service_account_private_key_file={{ kube_cert_dir }}/server.key --root_ca_file={{ kube_cert_dir }}/ca.crt"
diff --git a/roles/kubernetes/master/templates/controller-manager.kubeconfig.j2 b/roles/kubernetes/master/templates/controller-manager.kubeconfig.j2
deleted file mode 100644
index c71ac50f367e88da7cc6ced851a39a6b1e875561..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/templates/controller-manager.kubeconfig.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: Config
-current-context: controller-manager-to-{{ cluster_name }}
-preferences: {}
-clusters:
-- cluster:
-    certificate-authority: {{ kube_cert_dir }}/ca.crt
-    server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
-  name: {{ cluster_name }}
-contexts:
-- context:
-    cluster: {{ cluster_name }}
-    user: controller-manager
-  name: controller-manager-to-{{ cluster_name }}
-users:
-- name: controller-manager
-  user:
-    token: {{ controller_manager_token }}
diff --git a/roles/kubernetes/master/templates/kubectl.kubeconfig.j2 b/roles/kubernetes/master/templates/kubectl-kubeconfig.yaml.j2
similarity index 68%
rename from roles/kubernetes/master/templates/kubectl.kubeconfig.j2
rename to roles/kubernetes/master/templates/kubectl-kubeconfig.yaml.j2
index dd8f0eabe10d4cfc111009c433855e5814748f7b..5cc74cf9ebcccef24e4d43d672684e3ae6fe2ff9 100644
--- a/roles/kubernetes/master/templates/kubectl.kubeconfig.j2
+++ b/roles/kubernetes/master/templates/kubectl-kubeconfig.yaml.j2
@@ -4,8 +4,8 @@ current-context: kubectl-to-{{ cluster_name }}
 preferences: {}
 clusters:
 - cluster:
-    certificate-authority-data: {{ kube_ca_cert|b64encode }}
-    server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
+    certificate-authority-data: {{ kube_node_cert|b64encode }}
+    server: https://{{ groups['kube-master'][0] }}:{{ kube_apiserver_port }}
   name: {{ cluster_name }}
 contexts:
 - context:
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
new file mode 100644
index 0000000000000000000000000000000000000000..940ec1ace518bf6fa44ff0c6efaef4303660598e
--- /dev/null
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -0,0 +1,53 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: kube-apiserver
+spec:
+  hostNetwork: true
+  containers:
+  - name: kube-apiserver
+    image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
+    command:
+    - /hyperkube
+    - apiserver
+    - --insecure-bind-address=0.0.0.0
+    - --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %}
+
+    - --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
+    - --service-cluster-ip-range={{ kube_service_addresses }}
+    - --client-ca-file={{ kube_cert_dir }}/ca.pem
+    - --basic-auth-file={{ kube_users_dir }}/known_users.csv
+    - --tls-cert-file={{ kube_cert_dir }}/apiserver.pem
+    - --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
+    - --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem
+    - --secure-port={{ kube_apiserver_port }}
+    - --insecure-port={{ kube_apiserver_insecure_port }}
+{% if kube_api_runtime_config is defined %}
+{%   for conf in kube_api_runtime_config %}
+    - --runtime-config={{ conf }}
+{%   endfor %}
+{% endif %}
+    - --token-auth-file={{ kube_token_dir }}/known_tokens.csv
+    - --v={{ kube_log_level | default('2') }}
+    - --allow-privileged=true
+    ports:
+    - containerPort: {{ kube_apiserver_port }}
+      hostPort: {{ kube_apiserver_port }}
+      name: https
+    - containerPort: {{ kube_apiserver_insecure_port }}
+      hostPort: {{ kube_apiserver_insecure_port }}
+      name: local
+    volumeMounts:
+    - mountPath: {{ kube_config_dir }}
+      name: kubernetes-config
+      readOnly: true
+    - mountPath: /etc/ssl/certs
+      name: ssl-certs-host
+      readOnly: true
+  volumes:
+  - hostPath:
+      path: {{ kube_config_dir }}
+    name: kubernetes-config
+  - hostPath:
+      path: /usr/share/ca-certificates
+    name: ssl-certs-host
diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
new file mode 100644
index 0000000000000000000000000000000000000000..44e52f7c083cddf1be280edc738cf5d60b4e7459
--- /dev/null
+++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: kube-controller-manager
+  namespace: kube-system
+spec:
+  hostNetwork: true
+  containers:
+  - name: kube-controller-manager
+    image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
+    command:
+    - /hyperkube
+    - controller-manager
+    - --master=http://127.0.0.1:{{kube_apiserver_insecure_port}}
+    - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
+    - --root-ca-file={{ kube_cert_dir }}/ca.pem
+    - --v={{ kube_log_level | default('2') }}
+    livenessProbe:
+      httpGet:
+        host: 127.0.0.1
+        path: /healthz
+        port: 10252
+      initialDelaySeconds: 15
+      timeoutSeconds: 1
+    volumeMounts:
+    - mountPath: {{ kube_cert_dir }}
+      name: ssl-certs-kubernetes
+      readOnly: true
+    - mountPath: /etc/ssl/certs
+      name: ssl-certs-host
+      readOnly: true
+  volumes:
+  - hostPath:
+      path: {{ kube_cert_dir }}
+    name: ssl-certs-kubernetes
+  - hostPath:
+      path: /usr/share/ca-certificates
+    name: ssl-certs-host
diff --git a/roles/kubernetes/master/templates/manifests/kube-podmaster.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-podmaster.manifest.j2
new file mode 100644
index 0000000000000000000000000000000000000000..86447badf736d91d74fe704e75407b2b87978137
--- /dev/null
+++ b/roles/kubernetes/master/templates/manifests/kube-podmaster.manifest.j2
@@ -0,0 +1,46 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: kube-podmaster
+  namespace: kube-system
+spec:
+  hostNetwork: true
+  containers:
+  - name: scheduler-elector
+    image: gcr.io/google_containers/podmaster:1.1
+    command:
+    - /podmaster
+    - --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %}
+
+    - --key=scheduler
+    - --source-file={{ kube_config_dir}}/kube-scheduler.manifest
+    - --dest-file={{ kube_manifest_dir }}/kube-scheduler.manifest
+    volumeMounts:
+    - mountPath: {{ kube_config_dir }}
+      name: manifest-src
+      readOnly: true
+    - mountPath: {{ kube_manifest_dir }}
+      name: manifest-dst
+  - name: controller-manager-elector
+    image: gcr.io/google_containers/podmaster:1.1
+    command:
+    - /podmaster
+    - --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %}
+
+    - --key=controller
+    - --source-file={{ kube_config_dir }}/kube-controller-manager.manifest
+    - --dest-file={{ kube_manifest_dir }}/kube-controller-manager.manifest
+    terminationMessagePath: /dev/termination-log
+    volumeMounts:
+    - mountPath: {{ kube_config_dir }}
+      name: manifest-src
+      readOnly: true
+    - mountPath: {{ kube_manifest_dir }}
+      name: manifest-dst
+  volumes:
+  - hostPath:
+      path: {{ kube_config_dir }}
+    name: manifest-src
+  - hostPath:
+      path: {{ kube_manifest_dir }}
+    name: manifest-dst
diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
new file mode 100644
index 0000000000000000000000000000000000000000..6360dcc54b69c0d0c9d9f0e037db8390341c1f51
--- /dev/null
+++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: kube-scheduler
+  namespace: kube-system
+spec:
+  hostNetwork: true
+  containers:
+  - name: kube-scheduler
+    image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
+    command:
+    - /hyperkube
+    - scheduler
+    - --master=http://127.0.0.1:{{kube_apiserver_insecure_port}}
+    - --v={{ kube_log_level | default('2') }}
+    livenessProbe:
+      httpGet:
+        host: 127.0.0.1
+        path: /healthz
+        port: 10251
+      initialDelaySeconds: 15
+      timeoutSeconds: 1
diff --git a/roles/kubernetes/master/templates/proxy.j2 b/roles/kubernetes/master/templates/proxy.j2
deleted file mode 100644
index 33f811a534b53fdc75d0ae08e6241945dcb4daba..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/templates/proxy.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-###
-# kubernetes proxy config
-
-# default config should be adequate
-
-# Add your own!
-[Service]
-Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig --proxy-mode={{kube_proxy_mode}}"
diff --git a/roles/kubernetes/master/templates/proxy.kubeconfig.j2 b/roles/kubernetes/master/templates/proxy.kubeconfig.j2
deleted file mode 100644
index 5e35eb5d2566e2feefb9c6c6c7c6ba18dd4676ac..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/templates/proxy.kubeconfig.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: Config
-current-context: proxy-to-{{ cluster_name }}
-preferences: {}
-contexts:
-- context:
-    cluster: {{ cluster_name }}
-    user: proxy
-  name: proxy-to-{{ cluster_name }}
-clusters:
-- cluster:
-    certificate-authority: {{ kube_cert_dir }}/ca.crt
-    server: http://{{ groups['kube-master'][0] }}:{{kube_master_insecure_port}}
-  name: {{ cluster_name }}
-users:
-- name: proxy
-  user:
-    token: {{ proxy_token }}
diff --git a/roles/kubernetes/master/templates/scheduler.j2 b/roles/kubernetes/master/templates/scheduler.j2
deleted file mode 100644
index 8af898d0bde32acb77624ee58329cd240683218a..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/templates/scheduler.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-###
-# kubernetes scheduler config
-
-# default config should be adequate
-
-# Add your own!
-KUBE_SCHEDULER_ARGS="--kubeconfig={{ kube_config_dir }}/scheduler.kubeconfig"
diff --git a/roles/kubernetes/master/templates/scheduler.kubeconfig.j2 b/roles/kubernetes/master/templates/scheduler.kubeconfig.j2
deleted file mode 100644
index bc6203745eaef96833f9645e561ae7ab914fd4f2..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/templates/scheduler.kubeconfig.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: Config
-current-context: scheduler-to-{{ cluster_name }}
-preferences: {}
-clusters:
-- cluster:
-    certificate-authority: {{ kube_cert_dir }}/ca.crt
-    server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
-  name: {{ cluster_name }}
-contexts:
-- context:
-    cluster: {{ cluster_name }}
-    user: scheduler
-  name: scheduler-to-{{ cluster_name }}
-users:
-- name: scheduler
-  user:
-    token: {{ scheduler_token }}
diff --git a/roles/kubernetes/master/templates/systemd-init/kube-apiserver.service.j2 b/roles/kubernetes/master/templates/systemd-init/kube-apiserver.service.j2
deleted file mode 100644
index c2dd67484a49d614c465f6bbb1dc393612d45a10..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/templates/systemd-init/kube-apiserver.service.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-[Unit]
-Description=Kubernetes API Server
-Documentation=https://github.com/GoogleCloudPlatform/kubernetes
-Requires=etcd2.service
-After=etcd2.service
-
-[Service]
-EnvironmentFile=/etc/network-environment
-EnvironmentFile=-/etc/kubernetes/config
-EnvironmentFile=-/etc/kubernetes/apiserver
-User=kube
-ExecStart={{ bin_dir }}/kube-apiserver \
-	    $KUBE_LOGTOSTDERR \
-	    $KUBE_LOG_LEVEL \
-	    $KUBE_ETCD_SERVERS \
-	    $KUBE_API_ADDRESS \
-	    $KUBE_API_PORT \
-	    $KUBELET_PORT \
-	    $KUBE_ALLOW_PRIV \
-	    $KUBE_SERVICE_ADDRESSES \
-	    $KUBE_ADMISSION_CONTROL \
-	    $KUBE_RUNTIME_CONFIG \
-	    $KUBE_API_ARGS
-Restart=on-failure
-Type=notify
-LimitNOFILE=65536
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/kubernetes/master/templates/systemd-init/kube-controller-manager.service.j2 b/roles/kubernetes/master/templates/systemd-init/kube-controller-manager.service.j2
deleted file mode 100644
index a308630eb7bd6eeb0b9777424a42b9657464e076..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/templates/systemd-init/kube-controller-manager.service.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-[Unit]
-Description=Kubernetes Controller Manager
-Documentation=https://github.com/GoogleCloudPlatform/kubernetes
-Requires=etcd2.service
-After=etcd2.service
-
-[Service]
-EnvironmentFile=-/etc/kubernetes/config
-EnvironmentFile=-/etc/kubernetes/controller-manager
-User=kube
-ExecStart={{ bin_dir }}/kube-controller-manager \
-	    $KUBE_LOGTOSTDERR \
-	    $KUBE_LOG_LEVEL \
-	    $KUBE_MASTER \
-	    $KUBE_CONTROLLER_MANAGER_ARGS
-Restart=on-failure
-LimitNOFILE=65536
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/kubernetes/master/templates/systemd-init/kube-proxy.service.j2 b/roles/kubernetes/master/templates/systemd-init/kube-proxy.service.j2
deleted file mode 100644
index b1170c5d883183ef2cf5f7f11881920f0ec775e5..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/templates/systemd-init/kube-proxy.service.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-[Unit]
-Description=Kubernetes Kube-Proxy Server
-Documentation=https://github.com/GoogleCloudPlatform/kubernetes
-{% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
-After=docker.service calico-node.service
-{% else %}
-After=docker.service
-{% endif %}
-
-[Service]
-EnvironmentFile=/etc/kubernetes/config
-EnvironmentFile=/etc/network-environment
-ExecStart={{ bin_dir }}/kube-proxy \
-	    $KUBE_LOGTOSTDERR \
-	    $KUBE_LOG_LEVEL \
-	    $KUBE_MASTER \
-	    $KUBE_PROXY_ARGS
-Restart=on-failure
-LimitNOFILE=65536
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/kubernetes/master/templates/systemd-init/kube-scheduler.service.j2 b/roles/kubernetes/master/templates/systemd-init/kube-scheduler.service.j2
deleted file mode 100644
index c5d93111f149e93ff4b61cca2bc9da4c52a313fd..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/templates/systemd-init/kube-scheduler.service.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-[Unit]
-Description=Kubernetes Scheduler Plugin
-Documentation=https://github.com/GoogleCloudPlatform/kubernetes
-Requires=etcd2.service
-After=etcd2.service
-
-[Service]
-EnvironmentFile=-/etc/kubernetes/config
-EnvironmentFile=-/etc/kubernetes/scheduler
-User=kube
-ExecStart={{ bin_dir }}/kube-scheduler \
-	    $KUBE_LOGTOSTDERR \
-	    $KUBE_LOG_LEVEL \
-	    $KUBE_MASTER \
-	    $KUBE_SCHEDULER_ARGS
-Restart=on-failure
-LimitNOFILE=65536
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/kubernetes/common/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml
similarity index 77%
rename from roles/kubernetes/common/defaults/main.yml
rename to roles/kubernetes/node/defaults/main.yml
index 69d619ae0fde2525ec150fce8f69512e3eca9095..a098e0c11427d92191b09593394ebcb1e8233c02 100644
--- a/roles/kubernetes/common/defaults/main.yml
+++ b/roles/kubernetes/node/defaults/main.yml
@@ -12,7 +12,7 @@ kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
 kube_config_dir: /etc/kubernetes
 
 # This is where all the cert scripts and certs will be located
-kube_cert_dir: "{{ kube_config_dir }}/certs"
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
 
 # This is where all of the bearer tokens will be stored
 kube_token_dir: "{{ kube_config_dir }}/tokens"
@@ -32,13 +32,18 @@ dns_domain: "{{ cluster_name }}"
 
 kube_proxy_mode: userspace
 
+# Temporary image, waiting for official google release
+#  hyperkube_image_repo: gcr.io/google_containers/hyperkube
+hyperkube_image_repo: quay.io/smana/hyperkube
+hyperkube_image_tag: v1.1.3
+
 # IP address of the DNS server.
 # Kubernetes will create a pod with several containers, serving as the DNS
 # server and expose it under this IP address. The IP address must be from
 # the range specified as kube_service_addresses. This magic will actually
 # pick the 10th ip address in the kube_service_addresses range and use that.
-# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
+dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
 
-# kube_api_runtime_config:
-#   - extensions/v1beta1/daemonsets=true
-#   - extensions/v1beta1/deployments=true
+kube_api_runtime_config:
+  - extensions/v1beta1/daemonsets=true
+  - extensions/v1beta1/deployments=true
diff --git a/roles/kubernetes/common/files/kube-gen-token.sh b/roles/kubernetes/node/files/kube-gen-token.sh
similarity index 100%
rename from roles/kubernetes/common/files/kube-gen-token.sh
rename to roles/kubernetes/node/files/kube-gen-token.sh
diff --git a/roles/kubernetes/node/files/make-ssl.sh b/roles/kubernetes/node/files/make-ssl.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9ab0a49df5598d32ccc1dcc31f8d46fe382552b3
--- /dev/null
+++ b/roles/kubernetes/node/files/make-ssl.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+
+# Author: skahlouc@skahlouc-laptop
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o pipefail
+
+usage()
+{
+    cat << EOF
+Create self signed certificates
+
+Usage : $(basename $0) -f <config> [-c <cloud_provider>] [-d <ssldir>] [-g <ssl_group>]
+      -h | --help         : Show this message
+      -f | --config       : Openssl configuration file
+      -c | --cloud        : Cloud provider (GCE, AWS or AZURE)
+      -d | --ssldir       : Directory where the certificates will be installed
+      -g | --sslgrp       : Group of the certificates
+               
+               ex : 
+               $(basename $0) -f openssl.conf -c GCE -d /srv/ssl -g kube
+EOF
+}
+
+# Options parsing
+while (($#)); do
+    case "$1" in
+        -h | --help)   usage;   exit 0;;
+        -f | --config) CONFIG=${2}; shift 2;;
+        -c | --cloud) CLOUD=${2}; shift 2;;
+        -d | --ssldir) SSLDIR="${2}"; shift 2;; 
+        -g | --group) SSLGRP="${2}"; shift 2;;
+        *)
+            usage
+            echo "ERROR : Unknown option"
+            exit 3
+        ;;
+    esac
+done
+
+if [ -z ${CONFIG} ]; then
+    echo "ERROR: the openssl configuration file is missing. option -f"
+    exit 1
+fi
+if [ -z ${SSLDIR} ]; then
+    SSLDIR="/etc/kubernetes/certs"
+fi
+if [ -z ${SSLGRP} ]; then
+    SSLGRP="kube-cert"
+fi
+
+#echo "config=$CONFIG, cloud=$CLOUD, certdir=$SSLDIR, certgroup=$SSLGRP"
+
+SUPPORTED_CLOUDS="GCE AWS AZURE"
+
+# TODO: Add support for discovery on other providers?
+if [ "${CLOUD}" == "GCE" ]; then
+  CLOUD_IP=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
+fi
+
+if [ "${CLOUD}" == "AWS" ]; then
+  CLOUD_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
+fi
+
+if [ "${CLOUD}" == "AZURE" ]; then
+  CLOUD_IP=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
+fi
+
+tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
+trap 'rm -rf "${tmpdir}"' EXIT
+cd "${tmpdir}"
+
+mkdir -p "${SSLDIR}"
+
+# Root CA
+openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
+openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
+
+# Apiserver
+openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
+openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
+openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
+
+# Nodes and Admin
+for i in node admin; do
+    openssl genrsa -out ${i}-key.pem 2048 > /dev/null 2>&1
+    openssl req -new -key ${i}-key.pem -out ${i}.csr -subj "/CN=kube-${i}" > /dev/null 2>&1
+    openssl x509 -req -in ${i}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${i}.pem -days 365 > /dev/null 2>&1
+done
+
+# Install certs
+mv *.pem ${SSLDIR}/
+chgrp ${SSLGRP} ${SSLDIR}/*
+chmod 600 ${SSLDIR}/*-key.pem
+chown root:root ${SSLDIR}/*-key.pem
diff --git a/roles/kubernetes/node/handlers/main.yml b/roles/kubernetes/node/handlers/main.yml
index 9abb8ff25a8f7ce470767c3974728064646cb896..162c4cde13654bd5d1b7a09fe7aaec9b324e6997 100644
--- a/roles/kubernetes/node/handlers/main.yml
+++ b/roles/kubernetes/node/handlers/main.yml
@@ -4,7 +4,6 @@
   notify:
     - reload systemd
     - restart reloaded-kubelet
-    - restart reloaded-proxy
 
 - name: reload systemd
   command: systemctl daemon-reload
@@ -19,14 +18,3 @@
   service:
     name: kubelet
     state: restarted
-
-- name: restart proxy
-  command: /bin/true
-  notify:
-    - reload systemd
-    - restart reloaded-proxy
-
-- name: restart reloaded-proxy
-  service:
-    name: kube-proxy
-    state: restarted
diff --git a/roles/kubernetes/node/meta/main.yml b/roles/kubernetes/node/meta/main.yml
deleted file mode 100644
index 31675692c658f3e11bc3b05d06503744c6a47cd1..0000000000000000000000000000000000000000
--- a/roles/kubernetes/node/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
-  - { role: kubernetes/common }
diff --git a/roles/kubernetes/node/tasks/config.yml b/roles/kubernetes/node/tasks/config.yml
deleted file mode 100644
index c1d5f29b220133032932e5b35054586a8a271125..0000000000000000000000000000000000000000
--- a/roles/kubernetes/node/tasks/config.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- name: Get the node token values
-  slurp:
-    src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
-  with_items:
-    - "system:kubelet"
-    - "system:proxy"
-  register: tokens
-  run_once: true
-  delegate_to: "{{ groups['kube-master'][0] }}"
-
-- name: Set token facts
-  set_fact:
-    kubelet_token: "{{ tokens.results[0].content|b64decode }}"
-    proxy_token: "{{ tokens.results[1].content|b64decode }}"
-
-- name: Create kubelet environment vars dir
-  file: path=/etc/systemd/system/kubelet.service.d state=directory
-
-- name: Write kubelet config file
-  template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf backup=yes
-  notify:
-    - restart kubelet
-
-- name: write the kubecfg (auth) file for kubelet
-  template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig backup=yes
-  notify:
-    - restart kubelet
-
-- name: Create proxy environment vars dir
-  file: path=/etc/systemd/system/kube-proxy.service.d state=directory
-
-- name: Write proxy config file
-  template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf backup=yes
-  notify:
-    - restart proxy
-
-- name: write the kubecfg (auth) file for kube-proxy
-  template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig backup=yes
-  notify:
-    - restart proxy
-
-- name: Enable kubelet
-  service:
-    name: kubelet
-    enabled: yes
-    state: started
-
-- name: Enable proxy
-  service:
-    name: kube-proxy
-    enabled: yes
-    state: started
diff --git a/roles/kubernetes/node/tasks/gen_certs.yml b/roles/kubernetes/node/tasks/gen_certs.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a4f70ce54273cc8e2cfc7e771d2e45b1a53305d0
--- /dev/null
+++ b/roles/kubernetes/node/tasks/gen_certs.yml
@@ -0,0 +1,28 @@
+---
+- name: certs | install cert generation script
+  copy:
+    src=make-ssl.sh
+    dest={{ kube_script_dir }}
+    mode=0500
+  changed_when: false
+
+- name: certs | write openssl config
+  template:
+    src: "openssl.conf.j2"
+    dest: "{{ kube_config_dir }}/.openssl.conf"
+
+- name: certs | run cert generation script
+  shell: >
+    {{ kube_script_dir }}/make-ssl.sh
+    -f {{ kube_config_dir }}/.openssl.conf
+    -g {{ kube_cert_group }}
+    -d {{ kube_cert_dir }}
+  args:
+    creates: "{{ kube_cert_dir }}/apiserver.pem"
+
+- name: certs | check certificate permissions
+  file:
+    path={{ kube_cert_dir }}
+    group={{ kube_cert_group }}
+    owner=kube
+    recurse=yes
diff --git a/roles/kubernetes/common/tasks/gen_tokens.yml b/roles/kubernetes/node/tasks/gen_tokens.yml
similarity index 74%
rename from roles/kubernetes/common/tasks/gen_tokens.yml
rename to roles/kubernetes/node/tasks/gen_tokens.yml
index cf77d43996e19d43c664cc01f70c91cd73bb6674..f2e5625f95b4fcae9bc10691ae95ac70e0054be8 100644
--- a/roles/kubernetes/common/tasks/gen_tokens.yml
+++ b/roles/kubernetes/node/tasks/gen_tokens.yml
@@ -10,21 +10,17 @@
   environment:
     TOKEN_DIR: "{{ kube_token_dir }}"
   with_nested:
-    - [ "system:controller_manager", "system:scheduler", "system:kubectl", 'system:proxy' ]
-    - "{{ groups['kube-master'][0] }}"
+    - [ "system:kubectl" ]
+    - "{{ groups['kube-master'] }}"
   register: gentoken
   changed_when: "'Added' in gentoken.stdout"
-  notify:
-    - restart daemons
 
 - name: tokens | generate tokens for node components
   command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
   environment:
     TOKEN_DIR: "{{ kube_token_dir }}"
   with_nested:
-    - [ 'system:kubelet', 'system:proxy' ]
+    - [ 'system:kubelet' ]
     - "{{ groups['kube-node'] }}"
   register: gentoken
   changed_when: "'Added' in gentoken.stdout"
-  notify:
-    - restart daemons
diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml
index 0772393ff20f51d916bb77c20e185659aab07d60..e1f45460ab630fd36c4a5baab166863acb43a361 100644
--- a/roles/kubernetes/node/tasks/install.yml
+++ b/roles/kubernetes/node/tasks/install.yml
@@ -1,20 +1,13 @@
 ---
-- name: Write kube-proxy systemd init file
-  template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service backup=yes
-  notify: restart daemons
-
 - name: Write kubelet systemd init file
-  template: src=systemd-init/kubelet.service.j2 dest=/etc/systemd/system/kubelet.service backup=yes
-  notify: restart daemons
+  template: src=kubelet.service.j2 dest=/etc/systemd/system/kubelet.service backup=yes
+  notify: restart kubelet
 
-- name: Install kubernetes binaries
+- name: Install kubelet binary
   copy:
-     src={{ local_release_dir }}/kubernetes/bin/{{ item }}
+     src={{ local_release_dir }}/kubernetes/bin/kubelet
      dest={{ bin_dir }}
      owner=kube
      mode=u+x
-  with_items:
-    - kube-proxy
-    - kubelet
   notify:
-    - restart daemons
+    - restart kubelet
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index e0efbaf73cf7676ab3a1b4e1ba5b67d37c050ab7..7b5e29da9889c5f6d0729c24918dddf7a862522b 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -1,4 +1,48 @@
 ---
+- name: create kubernetes config directory
+  file: path={{ kube_config_dir }} state=directory
+
+- name: create kubernetes script directory
+  file: path={{ kube_script_dir }} state=directory
+
+- name: Make sure manifest directory exists
+  file: path={{ kube_manifest_dir }} state=directory
+
+- include: secrets.yml
+  tags:
+    - secrets
+
 - include: install.yml
-- include: config.yml
+
+- name: write the global config file
+  template:
+    src: config.j2
+    dest: "{{ kube_config_dir }}/config"
+  notify:
+    - restart kubelet
+
+- name: Create kubelet environment vars dir
+  file: path=/etc/systemd/system/kubelet.service.d state=directory
+
+- name: Write kubelet config file
+  template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf backup=yes
+  notify:
+    - restart kubelet
+
+- name: write the kubecfg (auth) file for kubelet
+  template: src=node-kubeconfig.yaml.j2 dest={{ kube_config_dir }}/node-kubeconfig.yaml backup=yes
+  notify:
+    - restart kubelet
+
+- name: Write proxy manifest
+  template: 
+    src: manifests/kube-proxy.manifest.j2
+    dest: "{{ kube_manifest_dir }}/kube-proxy.manifest"
+
+- name: Enable kubelet
+  service:
+    name: kubelet
+    enabled: yes
+    state: started
+
 - include: temp_workaround.yml
diff --git a/roles/kubernetes/common/tasks/secrets.yml b/roles/kubernetes/node/tasks/secrets.yml
similarity index 54%
rename from roles/kubernetes/common/tasks/secrets.yml
rename to roles/kubernetes/node/tasks/secrets.yml
index c61e17d9b22b9e385ebd8fed94f7e976128d6055..1fdb99f989be6f8235b4cea6eb74dd4c42e38eee 100644
--- a/roles/kubernetes/common/tasks/secrets.yml
+++ b/roles/kubernetes/node/tasks/secrets.yml
@@ -29,26 +29,36 @@
   run_once: true
   when: inventory_hostname == groups['kube-master'][0]
 
-- name: Read back the CA certificate
-  slurp:
-    src: "{{ kube_cert_dir }}/ca.crt"
-  register: ca_cert
+- include: gen_tokens.yml
   run_once: true
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  when: inventory_hostname == groups['kube-master'][0]
 
-- name: certs | register the CA certificate as a fact for later use
-  set_fact:
-    kube_ca_cert: "{{ ca_cert.content|b64decode }}"
+# Sync certs between nodes
+- user:
+    name: '{{ansible_user_id}}'
+    generate_ssh_key: yes
+  delegate_to: "{{ groups['kube-master'][0] }}"
+  run_once: yes
 
-- name: certs | write CA certificate everywhere
-  copy: content="{{ kube_ca_cert }}" dest="{{ kube_cert_dir }}/ca.crt"
-  notify:
-    - restart daemons
+- name: 'get ssh keypair'
+  slurp: path=~/.ssh/id_rsa.pub
+  register: public_key
+  delegate_to: "{{ groups['kube-master'][0] }}"
 
-- debug: msg="{{groups['kube-master'][0]}} == {{inventory_hostname}}"
-  tags:
-    - debug
+- name: 'setup keypair on nodes'
+  authorized_key:
+    user: '{{ansible_user_id}}'
+    key: "{{public_key.content|b64decode }}"
 
-- include: gen_tokens.yml
-  run_once: true
-  when: inventory_hostname == groups['kube-master'][0]
+- name: synchronize certificates for nodes
+  synchronize:
+    src: "{{ item }}"
+    dest: "{{ kube_cert_dir }}"
+    recursive: yes
+    delete: yes
+    rsync_opts: [ '--one-file-system']
+  with_items:
+    - "{{ kube_cert_dir}}/ca.pem"
+    - "{{ kube_cert_dir}}/node.pem"
+    - "{{ kube_cert_dir}}/node-key.pem"
+  delegate_to: "{{ groups['kube-master'][0] }}"
diff --git a/roles/kubernetes/node/tasks/temp_workaround.yml b/roles/kubernetes/node/tasks/temp_workaround.yml
index 8dcefe5e8733462f25fa990d930c14ed7c48f449..a6ef09f4d79af6e8e87aee4e84c55c74606f2457 100644
--- a/roles/kubernetes/node/tasks/temp_workaround.yml
+++ b/roles/kubernetes/node/tasks/temp_workaround.yml
@@ -1,5 +1,2 @@
-- name: Warning Temporary workaround !!! Disable kubelet and kube-proxy on node startup
-  service: name={{ item }} enabled=no
-  with_items:
-    - kubelet
-    - kube-proxy
+- name: Warning Temporary workaround !!! Disable kubelet on node startup
+  service: name=kubelet enabled=no
diff --git a/roles/kubernetes/common/templates/config.j2 b/roles/kubernetes/node/templates/config.j2
similarity index 89%
rename from roles/kubernetes/common/templates/config.j2
rename to roles/kubernetes/node/templates/config.j2
index 526160a7bd3d096c5a77b1251e13ab33b4c9ed97..03752e1c90a5a84615d781d3354434c3ecc62bc5 100644
--- a/roles/kubernetes/common/templates/config.j2
+++ b/roles/kubernetes/node/templates/config.j2
@@ -17,10 +17,10 @@
 KUBE_LOGTOSTDERR="--logtostderr=true"
 
 # journal message level, 0 is debug
-KUBE_LOG_LEVEL="--v=5"
+KUBE_LOG_LEVEL="--v={{ kube_log_level | default('2') }}"
 
 # Should this cluster be allowed to run privileged docker containers
 KUBE_ALLOW_PRIV="--allow_privileged=true"
 
 # How the replication controller, scheduler, and proxy
-KUBE_MASTER="--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}"
+KUBE_MASTER="--master=https://{{ groups['kube-master'][0] }}:{{ kube_apiserver_port }}"
diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2
index 0a516b5cc35b588c5fac04143783f905cefc0a29..02fce526f8224914648399a9cf42c4365254986f 100644
--- a/roles/kubernetes/node/templates/kubelet.j2
+++ b/roles/kubernetes/node/templates/kubelet.j2
@@ -1,18 +1,20 @@
 [Service]
 Environment="KUBE_LOGTOSTDERR=--logtostderr=true"
-Environment="KUBE_LOG_LEVEL=--v=0"
+Environment="KUBE_LOG_LEVEL=--v={{ kube_log_level | default('2') }}"
 Environment="KUBE_ALLOW_PRIV=--allow_privileged=true"
-Environment="KUBE_MASTER=--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}"
+Environment="KUBELET_API_SERVER=--api_servers={% for srv in groups['kube-master'] %}https://{{ srv }}:{{ kube_apiserver_port }}{% if not loop.last %},{% endif %}{% endfor %}"
 # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
 Environment="KUBELET_ADDRESS=--address=0.0.0.0"
 # The port for the info server to serve on
 # Environment="KUBELET_PORT=--port=10250"
 # You may leave this blank to use the actual hostname
 Environment="KUBELET_HOSTNAME=--hostname_override={{ inventory_hostname }}"
+{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
+Environment="KUBELET_REGISTER_NODE=--register-node=false"
+{% endif %}
 # location of the api-server
-Environment="KUBELET_API_SERVER=--api_servers=https://{{ groups['kube-master'][0]}}:{{ kube_master_port }}"
 {% if dns_setup %}
-Environment="KUBELET_ARGS=--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
+Environment="KUBELET_ARGS=--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }}"
 {% else %}
 Environment="KUBELET_ARGS=--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
 {% endif %}
diff --git a/roles/kubernetes/node/templates/kubelet.kubeconfig.j2 b/roles/kubernetes/node/templates/kubelet.kubeconfig.j2
deleted file mode 100644
index 28eda1e0305208084bd79d993d8ac8629355becb..0000000000000000000000000000000000000000
--- a/roles/kubernetes/node/templates/kubelet.kubeconfig.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: Config
-current-context: kubelet-to-{{ cluster_name }}
-preferences: {}
-clusters:
-- cluster:
-    certificate-authority: {{ kube_cert_dir }}/ca.crt
-    server: https://{{ groups['kube-master'][0] }}:{{kube_master_port}}
-  name: {{ cluster_name }}
-contexts:
-- context:
-    cluster: {{ cluster_name }}
-    user: kubelet
-  name: kubelet-to-{{ cluster_name }}
-users:
-- name: kubelet
-  user:
-    token: {{ kubelet_token }}
diff --git a/roles/kubernetes/node/templates/systemd-init/kubelet.service.j2 b/roles/kubernetes/node/templates/kubelet.service.j2
similarity index 95%
rename from roles/kubernetes/node/templates/systemd-init/kubelet.service.j2
rename to roles/kubernetes/node/templates/kubelet.service.j2
index 338b4b23c2e10c996166ad928d583c808c5cf40f..c09ff795d1f558280430c4d1455b23c6c0c33c02 100644
--- a/roles/kubernetes/node/templates/systemd-init/kubelet.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.service.j2
@@ -19,6 +19,7 @@ ExecStart={{ bin_dir }}/kubelet \
 	    $KUBELET_HOSTNAME \
 	    $KUBE_ALLOW_PRIV \
 	    $KUBELET_ARGS \
+	    $KUBELET_REGISTER_NODE \
 	    $KUBELET_NETWORK_PLUGIN
 Restart=on-failure
 
diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
new file mode 100644
index 0000000000000000000000000000000000000000..8b7ee708e2c61ec7c875e810da2bb07245b69bb1
--- /dev/null
+++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
@@ -0,0 +1,46 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: kube-proxy
+  namespace: kube-system
+spec:
+  hostNetwork: true
+  containers:
+  - name: kube-proxy
+    image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
+    command:
+    - /hyperkube
+    - proxy
+    - --v={{ kube_log_level | default('2') }}
+{% if inventory_hostname in groups['kube-master'] %}
+    - --master=http://127.0.0.1:{{kube_apiserver_insecure_port}}
+{% else %}
+{%   if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %}
+    - --master=https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port }}
+{%   else %}
+    - --master=https://{{ groups['kube-master'][0] }}:{{ kube_apiserver_port }}
+{%   endif%}
+    - --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml
+{% endif %}
+    securityContext:
+      privileged: true
+    volumeMounts:
+    - mountPath: /etc/ssl/certs
+      name: ssl-certs-host
+      readOnly: true
+    - mountPath: /etc/kubernetes/node-kubeconfig.yaml
+      name: "kubeconfig"
+      readOnly: true
+    - mountPath: /etc/kubernetes/ssl
+      name: "etc-kube-ssl"
+      readOnly: true
+  volumes:
+  - name: ssl-certs-host
+    hostPath:
+      path: /usr/share/ca-certificates
+  - name: "kubeconfig"
+    hostPath:
+      path: "/etc/kubernetes/node-kubeconfig.yaml"
+  - name: "etc-kube-ssl"
+    hostPath:
+      path: "/etc/kubernetes/ssl"
diff --git a/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 b/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..d21b8eef3bd4a1762c2e964e55d5eebf17a490d5
--- /dev/null
+++ b/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Config
+clusters:
+- name: local
+  cluster:
+    certificate-authority: {{ kube_cert_dir }}/ca.pem
+users:
+- name: kubelet
+  user:
+    client-certificate: {{ kube_cert_dir }}/node.pem
+    client-key: {{ kube_cert_dir }}/node-key.pem
+contexts:
+- context:
+    cluster: local
+    user: kubelet
+  name: kubelet-{{ cluster_name }}
+current-context: kubelet-{{ cluster_name }}
diff --git a/roles/kubernetes/node/templates/openssl.conf.j2 b/roles/kubernetes/node/templates/openssl.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c594e333782d7d0cbb22cd50fb16b36a626805e1
--- /dev/null
+++ b/roles/kubernetes/node/templates/openssl.conf.j2
@@ -0,0 +1,20 @@
+[req]
+req_extensions = v3_req
+distinguished_name = req_distinguished_name
+[req_distinguished_name]
+[ v3_req ]
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+subjectAltName = @alt_names
+[alt_names]
+DNS.1 = kubernetes
+DNS.2 = kubernetes.default
+DNS.3 = kubernetes.default.svc.{{ dns_domain }}
+{% if loadbalancer_apiserver is defined  and apiserver_loadbalancer_domain_name is defined %}
+DNS.4 = {{ apiserver_loadbalancer_domain_name }}
+{% endif %}
+{% for host in groups['kube-master'] %}
+IP.{{ loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
+{% endfor %}
+{% set idx =  groups['kube-master'] | length | int + 1 %}
+IP.{{ idx | string }} = {{ kube_apiserver_ip }}
diff --git a/roles/kubernetes/node/templates/proxy.j2 b/roles/kubernetes/node/templates/proxy.j2
deleted file mode 100644
index f529d7d5e514fda97c425d2d8e1876776a4d631a..0000000000000000000000000000000000000000
--- a/roles/kubernetes/node/templates/proxy.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-###
-# kubernetes proxy config
-
-# default config should be adequate
-[Service]
-Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig --proxy-mode={{kube_proxy_mode}}"
diff --git a/roles/kubernetes/node/templates/proxy.kubeconfig.j2 b/roles/kubernetes/node/templates/proxy.kubeconfig.j2
deleted file mode 100644
index 78d181631e5bfa99aa89f96d75a9804c36dea6d2..0000000000000000000000000000000000000000
--- a/roles/kubernetes/node/templates/proxy.kubeconfig.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: Config
-current-context: proxy-to-{{ cluster_name }}
-preferences: {}
-contexts:
-- context:
-    cluster: {{ cluster_name }}
-    user: proxy
-  name: proxy-to-{{ cluster_name }}
-clusters:
-- cluster:
-    certificate-authority: {{ kube_cert_dir }}/ca.crt
-    server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
-  name: {{ cluster_name }}
-users:
-- name: proxy
-  user:
-    token: {{ proxy_token }}
diff --git a/roles/kubernetes/node/templates/systemd-init/kube-proxy.service.j2 b/roles/kubernetes/node/templates/systemd-init/kube-proxy.service.j2
deleted file mode 100644
index b1170c5d883183ef2cf5f7f11881920f0ec775e5..0000000000000000000000000000000000000000
--- a/roles/kubernetes/node/templates/systemd-init/kube-proxy.service.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-[Unit]
-Description=Kubernetes Kube-Proxy Server
-Documentation=https://github.com/GoogleCloudPlatform/kubernetes
-{% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
-After=docker.service calico-node.service
-{% else %}
-After=docker.service
-{% endif %}
-
-[Service]
-EnvironmentFile=/etc/kubernetes/config
-EnvironmentFile=/etc/network-environment
-ExecStart={{ bin_dir }}/kube-proxy \
-	    $KUBE_LOGTOSTDERR \
-	    $KUBE_LOG_LEVEL \
-	    $KUBE_MASTER \
-	    $KUBE_PROXY_ARGS
-Restart=on-failure
-LimitNOFILE=65536
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/network_plugin/tasks/calico.yml b/roles/network_plugin/tasks/calico.yml
index c461f5607a7e0e1da75f6acef6eabfb130abadb3..eba8967d1a60dba5e9e7e6ac9de1a9dabf8d3d15 100644
--- a/roles/network_plugin/tasks/calico.yml
+++ b/roles/network_plugin/tasks/calico.yml
@@ -9,6 +9,12 @@
 - name: Calico | Create calicoctl symlink (needed by kubelet)
   file: src=/usr/local/bin/calicoctl dest=/usr/bin/calicoctl state=link
 
+- name: Calico | Configure calico-node desired pool
+  shell: calicoctl pool add {{ kube_pods_subnet }}
+  environment:
+     ETCD_AUTHORITY: "{{ groups['etcd'][0] }}:2379"
+  run_once: true
+
 - name: Calico | Write calico-node systemd init file
   template: src=calico/calico-node.service.j2 dest=/etc/systemd/system/calico-node.service
   register: newservice
@@ -24,18 +30,6 @@
 - name: Calico | Enable calico-node
   service: name=calico-node enabled=yes state=started
 
-- name: Calico | Configure calico-node desired pool
-  shell: calicoctl pool add {{ kube_pods_subnet }}
-  environment:
-     ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
-  run_once: true
-
-- name: Calico | Configure calico-node remove default pool
-  shell: calicoctl pool remove 192.168.0.0/16
-  environment:
-     ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
-  run_once: true
-
 - name: Calico | Disable node mesh
   shell: calicoctl bgp node-mesh off
   when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']
diff --git a/roles/network_plugin/tasks/flannel.yml b/roles/network_plugin/tasks/flannel.yml
index fc06c55ce117e216c2415313b85f4c960c8ff214..a43be9e403212cc42e452dc87a7f3791eb2fdb21 100644
--- a/roles/network_plugin/tasks/flannel.yml
+++ b/roles/network_plugin/tasks/flannel.yml
@@ -44,10 +44,6 @@
   run_once: true
   delegate_to: "{{ groups['kube-master'][0] }}"
 
-- name: Write network-environment
-  template: src=flannel/network-environment.j2 dest=/etc/network-environment mode=u+x
-  notify: restart flannel
-
 - name: Launch Flannel
   service: name=flannel state=started enabled=yes
   notify:
diff --git a/roles/network_plugin/tasks/main.yml b/roles/network_plugin/tasks/main.yml
index 4b6c8c66a6ca018b89f405405d1200a43d664a0d..e3ebf305ff372ad083c620ad8362504138ee785d 100644
--- a/roles/network_plugin/tasks/main.yml
+++ b/roles/network_plugin/tasks/main.yml
@@ -4,13 +4,12 @@
   when: ( kube_network_plugin is defined and kube_network_plugin == "calico" and kube_network_plugin == "flannel" ) or
         kube_network_plugin is not defined 
 
+- name: Write network-environment
+  template: src=network-environment.j2 dest=/etc/network-environment mode=u+x
+
 - include: flannel.yml
   when: kube_network_plugin == "flannel"
 
-- name: Calico | Write network-environment
-  template: src=calico/network-environment.j2 dest=/etc/network-environment mode=u+x
-  when: kube_network_plugin == "calico"
-
 - include: calico.yml
   when: kube_network_plugin == "calico"
 
diff --git a/roles/network_plugin/templates/flannel/network-environment.j2 b/roles/network_plugin/templates/flannel/network-environment.j2
deleted file mode 100644
index ac0b171d49d64b87277e501b57682d43cfcbe3b8..0000000000000000000000000000000000000000
--- a/roles/network_plugin/templates/flannel/network-environment.j2
+++ /dev/null
@@ -1 +0,0 @@
-FLANNEL_ETCD_PREFIX="--etcd-prefix=/{{ cluster_name }}/network"
diff --git a/roles/network_plugin/templates/calico/network-environment.j2 b/roles/network_plugin/templates/network-environment.j2
similarity index 59%
rename from roles/network_plugin/templates/calico/network-environment.j2
rename to roles/network_plugin/templates/network-environment.j2
index 2407f1ecbf5bc89a1e928eaecc20782ee84e7898..5793e881853f6836e372e1f9168847d490670e20 100755
--- a/roles/network_plugin/templates/calico/network-environment.j2
+++ b/roles/network_plugin/templates/network-environment.j2
@@ -1,19 +1,25 @@
-#! /usr/bin/bash
-# This node's IPv4 address
-CALICO_IPAM=true
-DEFAULT_IPV4={{ip | default(ansible_default_ipv4.address) }}
-
-{% if inventory_hostname in groups['kube-node'] %}
-# The kubernetes master IP
-KUBERNETES_MASTER={{ groups['kube-master'][0] }}
-
-# Location of etcd cluster used by Calico.  By default, this uses the etcd
-# instance running on the Kubernetes Master
-ETCD_AUTHORITY={{ groups['kube-master'][0] }}:4001
-
-# The kubernetes-apiserver location - used by the calico plugin
-KUBE_API_ROOT=http://{{ groups['kube-master'][0] }}:{{kube_master_insecure_port}}/api/v1/
-
-# Location of the calicoctl binary - used by the calico plugin
-CALICOCTL_PATH="{{ bin_dir }}/calicoctl"
-{% endif %}
+#! /usr/bin/bash
+{% if kube_network_plugin == "calico" %}
+# This node's IPv4 address
+CALICO_IPAM=true
+DEFAULT_IPV4={{ip | default(ansible_default_ipv4.address) }}
+
+# The kubernetes master IP
+KUBERNETES_MASTER={{ groups['kube-master'][0] }}
+
+# Location of etcd cluster used by Calico.  By default, this uses the etcd
+# instance running on the Kubernetes Master
+{% if inventory_hostname in groups['etcd'] %}
+ETCD_AUTHORITY="127.0.0.1:2379"
+{% else %}
+ETCD_AUTHORITY="127.0.0.1:23799"
+{% endif %}
+
+# The kubernetes-apiserver location - used by the calico plugin
+KUBE_API_ROOT=http://{{ groups['kube-master'][0] }}:{{kube_apiserver_insecure_port}}/api/v1/
+
+# Location of the calicoctl binary - used by the calico plugin
+CALICOCTL_PATH="{{ bin_dir }}/calicoctl"
+{% else %}
+FLANNEL_ETCD_PREFIX="--etcd-prefix=/{{ cluster_name }}/network"
+{% endif %}