diff --git a/.gitignore b/.gitignore
index 66c9b48678b97dbce998fa7b2c1aa326f5844030..8da099d427ecd391cca1b3e9b3aead84a0b9f8fc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
 .vagrant
 *.retry
 inventory/vagrant_ansible_inventory
+inventory/credentials/
 inventory/group_vars/fake_hosts.yml
 inventory/host_vars/
 temp
@@ -23,7 +24,7 @@ __pycache__/
 
 # Distribution / packaging
 .Python
-artifacts/
+inventory/*/artifacts/
 env/
 build/
 credentials/
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 3d49fd26d080e8971ddcdc51c489bd30b80e38c9..5af63147692f65a35a6eb4e6892849885cbb3965 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -109,7 +109,6 @@ before_script:
       ${SSH_ARGS}
       ${LOG_LEVEL}
       -e @${CI_TEST_VARS}
-      -e ansible_python_interpreter=${PYPATH}
       -e ansible_ssh_user=${SSH_USER}
       -e local_release_dir=${PWD}/downloads
       --limit "all:!fake_hosts"
@@ -129,7 +128,6 @@ before_script:
       ${SSH_ARGS}
       ${LOG_LEVEL}
       -e @${CI_TEST_VARS}
-      -e ansible_python_interpreter=${PYPATH}
       -e ansible_ssh_user=${SSH_USER}
       -e local_release_dir=${PWD}/downloads
       --limit "all:!fake_hosts"
@@ -257,10 +255,14 @@ before_script:
 # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
-.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
+.coreos_cilium_variables: &coreos_cilium_variables
 # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
+.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
+# stage: deploy-special
+  MOVED_TO_GROUP_VARS: "true"
+  
 .rhel7_weave_variables: &rhel7_weave_variables
 # stage: deploy-part1
   MOVED_TO_GROUP_VARS: "true"
@@ -320,16 +322,6 @@ gce_coreos-calico-aio:
   only: [/^pr-.*$/]
 
 ### PR JOBS PART2
-do_ubuntu-canal-ha:
-  stage: deploy-part2
-  <<: *job
-  <<: *do
-  variables:
-    <<: *do_variables
-  when: on_success
-  except: ['triggers']
-  only: [/^pr-.*$/]
-
 gce_centos7-flannel-addons:
   stage: deploy-part2
   <<: *job
@@ -363,7 +355,6 @@ gce_coreos-calico-sep-triggers:
   when: on_success
   only: ['triggers']
 
-
 gce_ubuntu-canal-ha-triggers:
   stage: deploy-part2
   <<: *job
@@ -396,6 +387,16 @@ gce_ubuntu-weave-sep-triggers:
   only: ['triggers']
 
 # More builds for PRs/merges (manual) and triggers (auto)
+do_ubuntu-canal-ha:
+  stage: deploy-part2
+  <<: *job
+  <<: *do
+  variables:
+    <<: *do_variables
+  when: manual
+  except: ['triggers']
+  only: ['master', /^pr-.*$/]
+
 gce_ubuntu-canal-ha:
   stage: deploy-part2
   <<: *job
@@ -460,6 +461,17 @@ gce_ubuntu-contiv-sep:
   except: ['triggers']
   only: ['master', /^pr-.*$/]
 
+gce_coreos-cilium:
+  stage: deploy-special
+  <<: *job
+  <<: *gce
+  variables:
+    <<: *gce_variables
+    <<: *coreos_cilium_variables
+  when: manual
+  except: ['triggers']
+  only: ['master', /^pr-.*$/]
+
 gce_ubuntu-cilium-sep:
   stage: deploy-special
   <<: *job
diff --git a/README.md b/README.md
index df80c27fff633fffc902f5c6acd118ade58305c3..3bd0ebfb9c46f8587ef34883e4b3bcba2b82c0dc 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,7 @@ Deploy a Production Ready Kubernetes Cluster
 
 If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
 
--   Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
+-   Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
 -   **High available** cluster
 -   **Composable** (Choice of the network plugin for instance)
 -   Support most popular **Linux distributions**
@@ -66,24 +66,25 @@ Supported Linux Distributions
 -----------------------------
 
 -   **Container Linux by CoreOS**
--   **Debian** Jessie
+-   **Debian** Jessie, Stretch, Wheezy
 -   **Ubuntu** 16.04
 -   **CentOS/RHEL** 7
+-   **Fedora/CentOS** Atomic
 
 Note: Upstart/SysV init based OS types are not supported.
 
 Versions of supported components
 --------------------------------
 
--   [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.2
+-   [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.5
 -   [etcd](https://github.com/coreos/etcd/releases) v3.2.4
--   [flanneld](https://github.com/coreos/flannel/releases) v0.8.0
--   [calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0
+-   [flanneld](https://github.com/coreos/flannel/releases) v0.10.0
+-   [calico](https://docs.projectcalico.org/v2.6/releases/) v2.6.8
 -   [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
--   [cilium](https://github.com/cilium/cilium) v1.0.0-rc4
--   [contiv](https://github.com/contiv/install/releases) v1.0.3
--   [weave](http://weave.works/) v2.0.1
--   [docker](https://www.docker.com/) v1.13 (see note)
+-   [cilium](https://github.com/cilium/cilium) v1.0.0-rc8
+-   [contiv](https://github.com/contiv/install/releases) v1.1.7
+-   [weave](http://weave.works/) v2.2.1
+-   [docker](https://www.docker.com/) v17.03 (see note)
 -   [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
 
 Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
@@ -150,5 +151,5 @@ CI Tests
 
 [![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
 
-CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
+CI/end-to-end tests sponsored by Google (GCE)
 See the [test matrix](docs/test_cases.md) for details.
diff --git a/Vagrantfile b/Vagrantfile
index 9db4be3a1b963449c434cc8f5bc0bb75e7d0e7f5..536bbff2bf123c606d4a632584b2e6203db172a7 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -3,7 +3,7 @@
 
 require 'fileutils'
 
-Vagrant.require_version ">= 1.9.0"
+Vagrant.require_version ">= 2.0.0"
 
 CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
 
@@ -135,12 +135,6 @@ Vagrant.configure("2") do |config|
 
       config.vm.network :private_network, ip: ip
 
-      # workaround for Vagrant 1.9.1 and centos vm
-      # https://github.com/hashicorp/vagrant/issues/8096
-      if Vagrant::VERSION == "1.9.1" && $os == "centos"
-        config.vm.provision "shell", inline: "service network restart", run: "always"
-      end
-
       # Disable swap for each vm
       config.vm.provision "shell", inline: "swapoff -a"
 
@@ -164,7 +158,7 @@ Vagrant.configure("2") do |config|
           if File.exist?(File.join(File.dirname($inventory), "hosts"))
             ansible.inventory_path = $inventory
           end
-          ansible.sudo = true
+          ansible.become = true
           ansible.limit = "all"
           ansible.host_key_checking = false
           ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
diff --git a/ansible.cfg b/ansible.cfg
index 732e3bf6e311279229c2f7dea061ab8bd4a3e9b5..6f381690e42d6dc890dc4d0ae1eeb075abba66c2 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -12,3 +12,5 @@ library = ./library
 callback_whitelist = profile_tasks
 roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
 deprecation_warnings=False
+inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
+jinja2_extensions = jinja2.ext.do
diff --git a/cluster.yml b/cluster.yml
index 995e363090cef1cd53eb069f7de42fcaac453cfb..fb7dec4cbab2764134b166da2a52cc133eba2daf 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -21,6 +21,12 @@
   vars:
     ansible_ssh_pipelining: true
   gather_facts: true
+  pre_tasks:
+    - name: gather facts from all instances
+      setup:
+      delegate_to: "{{item}}"
+      delegate_facts: True
+      with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
 
 - hosts: k8s-cluster:etcd:calico-rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -94,6 +100,8 @@
     - { role: kubespray-defaults}
     - { role: kubernetes-apps/network_plugin, tags: network }
     - { role: kubernetes-apps/policy_controller, tags: policy-controller }
+    - { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
+    - { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
 
 - hosts: calico-rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
diff --git a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2
index 696be6d57e8b26b6e099957944e06ba841739a8e..3e9728e715a3654d35e5b9064fd75e65ac8ec09b 100644
--- a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2
+++ b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2
@@ -1,6 +1,6 @@
 
 {% for vm in  vm_ip_list %}
-{% if not use_bastion or vm.virtualMachinename == 'bastion' %}
+{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
 {{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
 {% else %}
 {{ vm.virtualMachine.name }} ansible_ssh_host={{  vm.virtualMachine.network.privateIpAddresses[0] }}
diff --git a/contrib/packaging/rpm/kubespray.spec b/contrib/packaging/rpm/kubespray.spec
index 7080cf99857547e127b5a98af43f53ec6eda412b..6ec3ffca70552a8ee2bcf7867346aea166cdf15a 100644
--- a/contrib/packaging/rpm/kubespray.spec
+++ b/contrib/packaging/rpm/kubespray.spec
@@ -20,9 +20,10 @@ BuildRequires:  python2-setuptools
 BuildRequires:  python-d2to1
 BuildRequires:  python2-pbr
 
-Requires: ansible
+Requires: ansible >= 2.4.0
 Requires: python-jinja2 >= 2.10
 Requires: python-netaddr
+Requires: python-pbr
 
 %description
 
diff --git a/contrib/terraform/aws/templates/inventory.tpl b/contrib/terraform/aws/templates/inventory.tpl
index 2bb772549a63269c47e224d0dddf61ac998aa758..20a8a69a6af86d68322e5844fd55c2c21037ba5c 100644
--- a/contrib/terraform/aws/templates/inventory.tpl
+++ b/contrib/terraform/aws/templates/inventory.tpl
@@ -24,4 +24,4 @@ kube-master
 
 
 [k8s-cluster:vars]
-${elb_api_fqdn}
+${elb_api_fqdn}
\ No newline at end of file
diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md
index 6ff0860ca9f478968f8ff86704e887315e68f9d4..ed11bef1ef70fadd2b0b7c47bf18446bcad7b201 100644
--- a/contrib/terraform/openstack/README.md
+++ b/contrib/terraform/openstack/README.md
@@ -17,32 +17,33 @@ to actually install kubernetes and stand up the cluster.
 
 ### Networking
 The configuration includes creating a private subnet with a router to the
-external net. It will allocate floating-ips from a pool and assign them to the
+external net. It will allocate floating IPs from a pool and assign them to the
 hosts where that makes sense. You have the option of creating bastion hosts
-inside the private subnet to access the nodes there.
+inside the private subnet to access the nodes there.  Alternatively, a node with
+a floating IP can be used as a jump host to nodes without.
 
 ### Kubernetes Nodes
 You can create many different kubernetes topologies by setting the number of
 different classes of hosts. For each class there are options for allocating
-floating ip addresses or not.
-- Master Nodes with etcd
+floating IP addresses or not.
+- Master nodes with etcd
 - Master nodes without etcd
 - Standalone etcd hosts
 - Kubernetes worker nodes
 
-Note that the ansible script will report an invalid configuration if you wind up
+Note that the Ansible script will report an invalid configuration if you wind up
 with an even number of etcd instances since that is not a valid configuration.
 
-### Gluster FS
-The terraform configuration supports provisioning of an optional GlusterFS
+### GlusterFS
+The Terraform configuration supports provisioning of an optional GlusterFS
 shared file system based on a separate set of VMs. To enable this, you need to
-specify
-- the number of gluster hosts
+specify:
+- the number of Gluster hosts (minimum 2)
 - Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks
 - Other properties related to provisioning the hosts
 
 Even if you are using Container Linux by CoreOS for your cluster, you will still
-need the GlusterFS VMs to be based on either Debian or RedHat based images,
+need the GlusterFS VMs to be based on either Debian or RedHat based images.
 Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through
 binaries available on hyperkube v1.4.3_coreos.0 or higher.
 
@@ -50,9 +51,9 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher.
 
 - [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
 - [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
-- you already have a suitable OS image in glance
-- you already have a floating-ip pool created
-- you have security-groups enabled
+- you already have a suitable OS image in Glance
+- you already have a floating IP pool created
+- you have security groups enabled
 - you have a pair of keys generated that can be used to secure the new hosts
 
 ## Module Architecture
@@ -67,7 +68,7 @@ any external references to the floating IP (e.g. DNS) that would otherwise have
 to be updated.
 
 You can force your existing IPs by modifying the compute variables in
-`kubespray.tf` as
+`kubespray.tf` as follows:
 
 ```
 k8s_master_fips = ["151.101.129.67"]
@@ -75,30 +76,42 @@ k8s_node_fips = ["151.101.129.68"]
 ```
 
 ## Terraform
-Terraform will be used to provision all of the OpenStack resources. It is also
-used to deploy and provision the software requirements.
+Terraform will be used to provision all of the OpenStack resources with base software as appropriate.
 
-### Prep
+### Configuration
 
-#### OpenStack
+#### Inventory files
 
-No provider variables are hard coded inside `variables.tf` because Terraform
-supports various authentication method for OpenStack, between identity v2 and
-v3 API, `openrc` or `clouds.yaml`.
+Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
+
+```ShellSession
+$ cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER
+$ cd inventory/$CLUSTER
+$ ln -s ../../contrib/terraform/openstack/hosts
+```
+
+This will be the base for subsequent Terraform commands.
+
+#### OpenStack access and credentials
+
+No provider variables are hardcoded inside `variables.tf` because Terraform
+supports various authentication methods for OpenStack: the older script and 
+environment method (using `openrc`) as well as a newer declarative method, and 
+different OpenStack environments may support Identity API version 2 or 3.
 
 These are examples and may vary depending on your OpenStack cloud provider,
 for an exhaustive list on how to authenticate on OpenStack with Terraform
 please read the [OpenStack provider documentation](https://www.terraform.io/docs/providers/openstack/).
 
-##### Recommended method : clouds.yaml
+##### Declarative method (recommended)
 
-Newer recommended authentication method is to use a `clouds.yaml` file that can be store in :
+The recommended authentication method is to describe credentials in a YAML file `clouds.yaml` that can be stored in:
 
-* `Current Directory`
+* the current directory
 * `~/.config/openstack`
 * `/etc/openstack`
 
-`clouds.yaml` :
+`clouds.yaml`:
 
 ```
 clouds:
@@ -116,18 +129,19 @@ clouds:
 ```
 
 If you have multiple clouds defined in your `clouds.yaml` file you can choose
-the one you want to use with the environment variable `OS_CLOUD` :
+the one you want to use with the environment variable `OS_CLOUD`:
 
 ```
 export OS_CLOUD=mycloud
 ```
 
-##### Deprecated method : openrc
+##### Openrc method (deprecated)
 
 When using classic environment variables, Terraform uses default `OS_*`
-environment variables :
+environment variables.  A script suitable for your environment may be available
+from Horizon under *Project* -> *Compute* -> *Access & Security* -> *API Access*.
 
-With identity v2 :
+With identity v2:
 
 ```
 source openrc
@@ -144,7 +158,7 @@ OS_INTERFACE=public
 OS_IDENTITY_API_VERSION=2
 ```
 
-With identity v3 :
+With identity v3:
 
 ```
 source openrc
@@ -164,7 +178,7 @@ OS_USER_DOMAIN_NAME=Default
 ```
 
 Terraform does not support a mix of DomainName and DomainID, choose one or the
-other :
+other:
 
 ```
 * provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username
@@ -180,14 +194,11 @@ unset OS_PROJECT_DOMAIN_ID
 set OS_PROJECT_DOMAIN_NAME=Default
 ```
 
-### Terraform Variables
+#### Cluster variables
 The construction of the cluster is driven by values found in
 [variables.tf](variables.tf).
 
-The best way to set these values is to create a file in the project's root
-directory called something like`my-terraform-vars.tfvars`. Many of the
-variables are obvious. Here is a summary of some of the more interesting
-ones:
+For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
 
 |Variable | Description |
 |---------|-------------|
@@ -208,9 +219,9 @@ ones:
 |`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
 | `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
 
-### Terraform files
+#### Terraform state files
 
-In the root folder, the following files might be created (either by Terraform
+In the cluster's inventory folder, the following files might be created (either by Terraform
 or manually), to prevent you from pushing them accidentally they are in a
 `.gitignore` file in the `terraform/openstack` directory :
 
@@ -221,49 +232,61 @@ or manually), to prevent you from pushing them accidentally they are in a
 
 You can still add them manually if you want to.
 
-## Initializing Terraform
+### Initialization
 
-Before Terraform can operate on your cluster you need to install required
-plugins. This is accomplished with the command
+Before Terraform can operate on your cluster you need to install the required
+plugins. This is accomplished as follows:
 
-```bash
-$ terraform init contrib/terraform/openstack
+```ShellSession
+$ cd inventory/$CLUSTER
+$ terraform init ../../contrib/terraform/openstack
 ```
 
-## Provisioning Cluster with Terraform
-You can apply the terraform config to your cluster with the following command
-issued from the project's root directory
-```bash
-$ terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
+This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
+
+### Provisioning cluster
+You can apply the Terraform configuration to your cluster with the following command
+issued from your cluster's inventory directory (`inventory/$CLUSTER`):
+```ShellSession
+$ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack
 ```
 
 if you chose to create a bastion host, this script will create
-`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to
-be able to access your machines tunneling  through the bastion's ip adress. If
+`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
+be able to access your machines tunneling  through the bastion's IP address. If
 you want to manually handle the ssh tunneling to these machines, please delete
 or move that file. If you want to use this, just leave it there, as ansible will
 pick it up automatically.
 
+### Destroying cluster
+You can destroy your new cluster with the following command issued from the cluster's inventory directory:
 
-## Destroying Cluster with Terraform
-You can destroy a config deployed to your cluster with the following command
-issued from the project's root directory
-```bash
-$ terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
+```ShellSession
+$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/openstack
 ```
 
-## Debugging Cluster Provisioning
+If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
+
+* remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
+* clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
+
+### Debugging
 You can enable debugging output from Terraform by setting
-`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before runing the terraform command
+`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command.
+
+### Terraform output
 
-## Terraform output
+Terraform can output values that are useful for configure Neutron/Octavia LBaaS or Cinder persistent volume provisioning as part of your Kubernetes deployment:
 
-Terraform can output useful values that need to be reused if you want to use Kubernetes OpenStack cloud provider with Neutron/Octavia LBaaS or Cinder persistent Volume provisioning:
+ - `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id`
+ - `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id`
 
- - `private_subnet_id`: the subnet where your instances are running, maps to `openstack_lbaas_subnet_id`
- - `floating_network_id`: the network_id where the floating IP are provisioned, maps to `openstack_lbaas_floating_network_id`
+## Ansible
+
+### Node access
+
+#### SSH
 
-# Running the Ansible Script
 Ensure your local ssh-agent is running and your ssh key has been added. This
 step is required by the terraform provisioner:
 
@@ -272,11 +295,22 @@ $ eval $(ssh-agent -s)
 $ ssh-add ~/.ssh/id_rsa
 ```
 
+If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
+
+#### Bastion host
+
+If you are not using a bastion host, but not all of your nodes have floating IPs, create a file `inventory/$CLUSTER/group_vars/no-floating.yml` with the following content.  Use one of your nodes with a floating IP (this should have been output at the end of the Terraform step) and the appropriate user for that OS, or if you have another jump host, use that.
+
+```
+ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@MASTER_IP"'
+```
+
+#### Test access
 
-Make sure you can connect to the hosts:
+Make sure you can connect to the hosts.  Note that Container Linux by CoreOS will have a state `FAILED` due to Python not being present.  This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
 
 ```
-$ ansible -i contrib/terraform/openstack/hosts -m ping all
+$ ansible -i inventory/$CLUSTER/hosts -m ping all
 example-k8s_node-1 | SUCCESS => {
     "changed": false,
     "ping": "pong"
@@ -291,21 +325,17 @@ example-k8s-master-1 | SUCCESS => {
 }
 ```
 
-if you are deploying a system that needs bootstrapping, like Container Linux by
-CoreOS, these might have a state`FAILED` due to Container Linux by CoreOS not
-having python. As long as the state is not`UNREACHABLE`, this is fine.
-
-if it fails try to connect manually via SSH ... it could be something as simple as a stale host key.
+If it fails try to connect manually via SSH.  It could be something as simple as a stale host key.
 
-## Configure Cluster variables
+### Configure cluster variables
 
-Edit `inventory/sample/group_vars/all.yml`:
-- Set variable **bootstrap_os** according selected image
+Edit `inventory/$CLUSTER/group_vars/all.yml`:
+- Set variable **bootstrap_os** appropriately for your desired image:
 ```
 # Valid bootstrap options (required): ubuntu, coreos, centos, none
 bootstrap_os: coreos
 ```
-- **bin_dir**
+- **bin_dir**:
 ```
 # Directory where the binaries will be installed
 # Default:
@@ -313,20 +343,19 @@ bootstrap_os: coreos
 # For Container Linux by CoreOS:
 bin_dir: /opt/bin
 ```
-- and **cloud_provider**
+- and **cloud_provider**:
 ```
 cloud_provider: openstack
 ```
-Edit `inventory/sample/group_vars/k8s-cluster.yml`:
-- Set variable **kube_network_plugin** according selected networking
+Edit `inventory/$CLUSTER/group_vars/k8s-cluster.yml`:
+- Set variable **kube_network_plugin** to your desired networking plugin.
+  - **flannel** works out-of-the-box
+  - **calico** requires [configuring OpenStack Neutron ports](/docs/openstack.md) to allow service and pod subnets
 ```
 # Choose network plugin (calico, weave or flannel)
 # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
 kube_network_plugin: flannel
 ```
-> flannel works out-of-the-box
-
-> calico requires allowing service's and pod's subnets on according OpenStack Neutron ports
 - Set variable **resolvconf_mode**
 ```
 # Can be docker_dns, host_resolvconf or none
@@ -336,18 +365,19 @@ kube_network_plugin: flannel
 resolvconf_mode: host_resolvconf
 ```
 
-For calico configure OpenStack Neutron ports: [OpenStack](/docs/openstack.md)
-
-## Deploy kubernetes:
+### Deploy Kubernetes
 
 ```
-$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
+$ ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
 ```
 
-## Set up local kubectl
-1. Install kubectl on your workstation:
-[Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
-2. Add route to internal IP of master node (if needed):
+This will take some time as there are many tasks to run.
+
+## Kubernetes
+
+### Set up kubectl
+1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation
+2. Add a route to the internal IP of a master node (if needed):
 ```
 sudo route add [master-internal-ip] gw [router-ip]
 ```
@@ -355,28 +385,28 @@ or
 ```
 sudo route add -net [internal-subnet]/24 gw [router-ip]
 ```
-3. List Kubernetes certs&keys:
+3. List Kubernetes certificates & keys:
 ```
 ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
 ```
-4. Get admin's certs&key:
+4. Get `admin`'s certificates and keys:
 ```
 ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
 ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
 ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
 ```
 5. Configure kubectl:
-```
-kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
+```ShellSession
+$ kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
     --certificate-authority=ca.pem
 
-kubectl config set-credentials default-admin \
+$ kubectl config set-credentials default-admin \
     --certificate-authority=ca.pem \
     --client-key=admin-key.pem \
     --client-certificate=admin.pem
 
-kubectl config set-context default-system --cluster=default-cluster --user=default-admin
-kubectl config use-context default-system
+$ kubectl config set-context default-system --cluster=default-cluster --user=default-admin
+$ kubectl config use-context default-system
 ```
 7. Check it:
 ```
@@ -393,14 +423,15 @@ You can tell kubectl to ignore this condition by adding the
 
 ## GlusterFS
 GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
-[glusterfs playbook documentation](../../network-storage/glusterfs/README.md)
+[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md)
 for instructions.
 
-Basically you will install gluster as
-```bash
-$ ansible-playbook --become -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
+Basically you will install Gluster as
+```ShellSession
+$ ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
 ```
 
 
-# What's next
-[Start Hello Kubernetes Service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/)
+## What's next
+
+Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/).
diff --git a/contrib/terraform/openstack/ansible_bastion_template.txt b/contrib/terraform/openstack/ansible_bastion_template.txt
index cdf0120668a54795d850369882e8d85705bd842d..a304b2c9d5dc1703df6e09990ab9b9ebe98762d7 100644
--- a/contrib/terraform/openstack/ansible_bastion_template.txt
+++ b/contrib/terraform/openstack/ansible_bastion_template.txt
@@ -1 +1 @@
-ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"' 
+ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'"
diff --git a/contrib/terraform/openstack/group_vars b/contrib/terraform/openstack/group_vars
deleted file mode 120000
index d64da8dc6112c0f3eefa42ebc4a8dbccd200ec32..0000000000000000000000000000000000000000
--- a/contrib/terraform/openstack/group_vars
+++ /dev/null
@@ -1 +0,0 @@
-../../../inventory/group_vars
\ No newline at end of file
diff --git a/contrib/terraform/openstack/modules/network/main.tf b/contrib/terraform/openstack/modules/network/main.tf
index a5ef099ed576f027f68c2487dea71be286b22342..2c461c78483f7a366880d8baee52c3e5d8b59e4a 100644
--- a/contrib/terraform/openstack/modules/network/main.tf
+++ b/contrib/terraform/openstack/modules/network/main.tf
@@ -1,7 +1,7 @@
 resource "openstack_networking_router_v2" "k8s" {
-  name             = "${var.cluster_name}-router"
-  admin_state_up   = "true"
-  external_gateway = "${var.external_net}"
+  name                = "${var.cluster_name}-router"
+  admin_state_up      = "true"
+  external_network_id = "${var.external_net}"
 }
 
 resource "openstack_networking_network_v2" "k8s" {
diff --git a/contrib/terraform/openstack/modules/network/outputs.tf b/contrib/terraform/openstack/modules/network/outputs.tf
index a426202b9827f274bd7e5c330fca8a636855aa27..e56a792c21c2e4426b82bf4b7d15be786ba42d14 100644
--- a/contrib/terraform/openstack/modules/network/outputs.tf
+++ b/contrib/terraform/openstack/modules/network/outputs.tf
@@ -2,6 +2,6 @@ output "router_id" {
   value = "${openstack_networking_router_interface_v2.k8s.id}"
 }
 
-output "network_id" {
+output "subnet_id" {
   value = "${openstack_networking_subnet_v2.k8s.id}"
 }
diff --git a/contrib/terraform/openstack/sample-inventory/cluster.tf b/contrib/terraform/openstack/sample-inventory/cluster.tf
new file mode 100644
index 0000000000000000000000000000000000000000..7830d2159fbbf18bfde99da1cb664ff23433d183
--- /dev/null
+++ b/contrib/terraform/openstack/sample-inventory/cluster.tf
@@ -0,0 +1,45 @@
+# your Kubernetes cluster name here
+cluster_name = "i-didnt-read-the-docs"
+
+# SSH key to use for access to nodes
+public_key_path = "~/.ssh/id_rsa.pub"
+
+# image to use for bastion, masters, standalone etcd instances, and nodes
+image = "<image name>"
+# user on the node (ex. core on Container Linux, ubuntu on Ubuntu, etc.)
+ssh_user = "<cloud-provisioned user>"
+
+# 0|1 bastion nodes
+number_of_bastions = 0
+#flavor_bastion = "<UUID>"
+
+# standalone etcds
+number_of_etcd = 0
+
+# masters
+number_of_k8s_masters = 1
+number_of_k8s_masters_no_etcd = 0
+number_of_k8s_masters_no_floating_ip = 0
+number_of_k8s_masters_no_floating_ip_no_etcd = 0
+flavor_k8s_master = "<UUID>"
+
+# nodes
+number_of_k8s_nodes = 2
+number_of_k8s_nodes_no_floating_ip = 4
+#flavor_k8s_node = "<UUID>"
+
+# GlusterFS
+# either 0 or more than one
+#number_of_gfs_nodes_no_floating_ip = 0
+#gfs_volume_size_in_gb = 150
+# Container Linux does not support GlusterFS
+#image_gfs = "<image name>"
+# May be different from other nodes
+#ssh_user_gfs = "ubuntu"
+#flavor_gfs_node = "<UUID>"
+
+# networking
+network_name = "<network>"
+external_net = "<UUID>"
+floatingip_pool = "<pool>"
+
diff --git a/contrib/terraform/openstack/sample-inventory/group_vars b/contrib/terraform/openstack/sample-inventory/group_vars
new file mode 120000
index 0000000000000000000000000000000000000000..37359582379ba157188603b03af649187dfa072c
--- /dev/null
+++ b/contrib/terraform/openstack/sample-inventory/group_vars
@@ -0,0 +1 @@
+../../../../inventory/sample/group_vars
\ No newline at end of file
diff --git a/docs/atomic.md b/docs/atomic.md
index cb506a9f3d24b9244b698b980277cdb2d9f7c106..1c432b8e8a19bc96d10e77447a90d6d2dcc50ad8 100644
--- a/docs/atomic.md
+++ b/docs/atomic.md
@@ -7,7 +7,7 @@ Note: Flannel is the only plugin that has currently been tested with atomic
 
 ### Vagrant
 
-* For bootstrapping with Vagrant, use box centos/atomic-host 
+* For bootstrapping with Vagrant, use box centos/atomic-host or fedora/atomic-host 
 * Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
 * Update `vm_memory = 2048` and `vm_cpus = 2`
 * Networking on vagrant hosts has to be brought up manually once they are booted.
@@ -17,6 +17,7 @@ Note: Flannel is the only plugin that has currently been tested with atomic
     sudo /sbin/ifup enp0s8
     ```
 
-* For users of vagrant-libvirt download qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
+* For users of vagrant-libvirt download centos/atomic-host qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
+* For users of vagrant-libvirt download fedora/atomic-host qcow2 format from https://getfedora.org/en/atomic/download/
 
-Then you can proceed to [cluster deployment](#run-deployment)
\ No newline at end of file
+Then you can proceed to [cluster deployment](#run-deployment)
diff --git a/docs/dns-stack.md b/docs/dns-stack.md
index 6215114af40e5e45a0a873917d869c27e65310d0..1deb88776f5de6663825fe258f19fbe22f579ebc 100644
--- a/docs/dns-stack.md
+++ b/docs/dns-stack.md
@@ -62,6 +62,14 @@ other queries are forwardet to the nameservers found in ``upstream_dns_servers``
 This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
 all queries.
 
+#### coredns
+This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
+all queries.
+
+#### coredns_dual
+This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
+all queries. It will also deploy a secondary CoreDNS stack
+
 #### manual
 This does not install dnsmasq or kubedns, but allows you to specify
 `manual_dns_server`, which will be configured on nodes for handling Pod DNS.
diff --git a/docs/getting-started.md b/docs/getting-started.md
index 961d1a9cfd821aed2fde568c5f4ff69134023ce5..2402ac54fe4fa455a831afbc630d42f40d8f3101 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -18,11 +18,9 @@ certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` hel
 
 Example inventory generator usage:
 
-```
-cp -r inventory/sample inventory/mycluster
-declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
-CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
-```
+    cp -r inventory/sample inventory/mycluster
+    declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
+    CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
 
 Starting custom deployment
 --------------------------
@@ -30,12 +28,10 @@ Starting custom deployment
 Once you have an inventory, you may want to customize deployment data vars
 and start the deployment:
 
-**IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars**
+**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars:
 
-```
-ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
-  --private-key=~/.ssh/private_key
-```
+    ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
+      --private-key=~/.ssh/private_key
 
 See more details in the [ansible guide](ansible.md).
 
@@ -44,31 +40,43 @@ Adding nodes
 
 You may want to add **worker** nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
 
-- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
-- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
+-   Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
+-   Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
+
+        ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
+          --private-key=~/.ssh/private_key
+
+Remove nodes
+------------
+
+You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
+
+- Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
+- Run the ansible-playbook command, substituting `remove-node.yml`:
 ```
-ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
+ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
   --private-key=~/.ssh/private_key
 ```
 
 Connecting to Kubernetes
 ------------------------
+
 By default, Kubespray configures kube-master hosts with insecure access to
 kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
-because kubectl will use http://localhost:8080 to connect. The kubeconfig files
+because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
 generated will point to localhost (on kube-masters) and kube-node hosts will
 connect either to a localhost nginx proxy or to a loadbalancer if configured.
 More details on this process are in the [HA guide](ha-mode.md).
 
-Kubespray permits connecting to the cluster remotely on any IP of any 
-kube-master host on port 6443 by default. However, this requires 
-authentication. One could generate a kubeconfig based on one installed 
+Kubespray permits connecting to the cluster remotely on any IP of any
+kube-master host on port 6443 by default. However, this requires
+authentication. One could generate a kubeconfig based on one installed
 kube-master hosts (needs improvement) or connect with a username and password.
 By default, a user with admin rights is created, named `kube`.
-The password can be viewed after deployment by looking at the file 
-`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
+The password can be viewed after deployment by looking at the file
+`PATH_TO_KUBESPRAY/credentials/kube_user.creds`. This contains a randomly generated
 password. If you wish to set your own password, just precreate/modify this
-file yourself. 
+file yourself.
 
 For more information on kubeconfig and accessing a Kubernetes cluster, refer to
 the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
@@ -77,29 +85,33 @@ Accessing Kubernetes Dashboard
 ------------------------------
 
 As of kubernetes-dashboard v1.7.x:
-* New login options that use apiserver auth proxying of token/basic/kubeconfig by default
-* Requires RBAC in authorization_modes
-* Only serves over https
-* No longer available at https://first_master:6443/ui until apiserver is updated with the https proxy URL
+
+-   New login options that use apiserver auth proxying of token/basic/kubeconfig by default
+-   Requires RBAC in authorization\_modes
+-   Only serves over https
+-   No longer available at <https://first_master:6443/ui> until apiserver is updated with the https proxy URL
 
 If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials:
-https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login
+<https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
 
 Or you can run 'kubectl proxy' from your local machine to access dashboard in your browser from:
-http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login
+<http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
 
-It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above
+It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: <https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above>
 
 Accessing Kubernetes API
 ------------------------
 
 The main client of Kubernetes is `kubectl`. It is installed on each kube-master
 host and can optionally be configured on your ansible host by setting
-`kubeconfig_localhost: true` in the configuration. If enabled, kubectl and
-admin.conf will appear in the artifacts/ directory after deployment. You can
-see a list of nodes by running the following commands:
+`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:
+
+-   If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`.
+-   If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment.
+
+You can see a list of nodes by running the following commands:
 
-    cd artifacts/
-    ./kubectl --kubeconfig admin.conf get nodes
+    cd inventory/mycluster/artifacts
+    ./kubectl.sh get nodes
 
-If desired, copy kubectl to your bin dir and admin.conf to ~/.kube/config.
+If desired, copy admin.conf to ~/.kube/config.
diff --git a/docs/large-deployments.md b/docs/large-deployments.md
index b19f699135b7f68c9a10d4add7110e61dbcf06db..723ca5f4847344f15ad41e93e1b033f27f743737 100644
--- a/docs/large-deployments.md
+++ b/docs/large-deployments.md
@@ -3,8 +3,7 @@ Large deployments of K8s
 
 For a large scaled deployments, consider the following configuration changes:
 
-* Tune [ansible settings]
-  (http://docs.ansible.com/ansible/intro_configuration.html)
+* Tune [ansible settings](http://docs.ansible.com/ansible/intro_configuration.html)
   for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
 
 * Override containers' `foo_image_repo` vars to point to intranet registry.
@@ -47,5 +46,8 @@ For a large scaled deployments, consider the following configuration changes:
   section of the Getting started guide for tips on creating a large scale
   Ansible inventory.
 
+* Override the ``etcd_events_cluster_setup: true`` store events in a separate
+  dedicated etcd instance.
+
 For example, when deploying 200 nodes, you may want to run ansible with
 ``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
diff --git a/docs/vagrant.md b/docs/vagrant.md
index 042e8137bd2973941ccbcec497945ecd958e87ae..de47159fa7358bf835e0bffc9a9bdb97131af391 100644
--- a/docs/vagrant.md
+++ b/docs/vagrant.md
@@ -1,7 +1,7 @@
 Vagrant Install
 =================
 
-Assuming you have Vagrant (1.9+) installed with virtualbox (it may work
+Assuming you have Vagrant (2.0+) installed with virtualbox (it may work
 with vmware, but is untested) you should be able to launch a 3 node
 Kubernetes cluster by simply running `$ vagrant up`.<br />
 
diff --git a/docs/vars.md b/docs/vars.md
index 3303f6bcbfe941a33306c5728c7fbee2059c1b17..a4ae65678503d7e0e932d511fa89b82369c8713c 100644
--- a/docs/vars.md
+++ b/docs/vars.md
@@ -63,7 +63,8 @@ following default cluster paramters:
   bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
 * *dns_setup* - Enables dnsmasq
 * *dnsmasq_dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
-* *skydns_server* - Cluster IP for KubeDNS (default is 10.233.0.3)
+* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
+* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
 * *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
   OpenStack (default is unset)
 * *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
@@ -105,9 +106,9 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st
 * *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
   proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
   that correspond to each node.
-* *kubelet_deployment_type* - Controls which platform to deploy kubelet on. 
+* *kubelet_deployment_type* - Controls which platform to deploy kubelet on.
   Available options are ``host``, ``rkt``, and ``docker``. ``docker`` mode
-  is unlikely to work on newer releases. Starting with Kubernetes v1.7 
+  is unlikely to work on newer releases. Starting with Kubernetes v1.7
   series, this now defaults to ``host``. Before v1.7, the default was Docker.
   This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704).
 * *kubelet_load_modules* - For some things, kubelet needs to load kernel modules.  For example,
@@ -117,6 +118,14 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st
 * *kubelet_cgroup_driver* - Allows manual override of the
   cgroup-driver option for Kubelet. By default autodetection is used
   to match Docker configuration.
+* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
+  For example, labels can be set in the inventory as variables or more widely in group_vars.
+  *node_labels* must be defined as a dict:
+```
+node_labels:
+  label1_name: label1_value
+  label2_name: label2_value
+```
 
 ##### Custom flags for Kube Components
 For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
@@ -136,6 +145,6 @@ The possible vars are:
 
 By default, a user with admin rights is created, named `kube`.
 The password can be viewed after deployment by looking at the file
-`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
+`PATH_TO_KUBESPRAY/credentials/kube_user.creds`. This contains a randomly generated
 password. If you wish to set your own password, just precreate/modify this
 file yourself or change `kube_api_pwd` var.
diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml
index 2c460e28fd8180f5b4c055c2b16333bc1e6e55a2..282943a8d5117d47db48155072f8084c1f8f436f 100644
--- a/inventory/sample/group_vars/all.yml
+++ b/inventory/sample/group_vars/all.yml
@@ -17,7 +17,7 @@ bin_dir: /usr/local/bin
 ### LOADBALANCING AND ACCESS MODES
 ## Enable multiaccess to configure etcd clients to access all of the etcd members directly
 ## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
-## This may be the case if clients support and loadbalance multiple etcd servers  natively.
+## This may be the case if clients support and loadbalance multiple etcd servers natively.
 #etcd_multiaccess: true
 
 ### ETCD: disable peer client cert authentication.
@@ -42,7 +42,7 @@ bin_dir: /usr/local/bin
 ## for mounting persistent volumes into containers.  These may not be loaded by preinstall kubernetes
 ## processes.  For example, ceph and rbd backed volumes.  Set to true to allow kubelet to load kernel
 ## modules.
-# kubelet_load_modules: false
+#kubelet_load_modules: false
 
 ## Internal network total size. This is the prefix of the
 ## entire network. Must be unused in your environment.
@@ -76,6 +76,7 @@ bin_dir: /usr/local/bin
 #azure_subnet_name:
 #azure_security_group_name:
 #azure_vnet_name:
+#azure_vnet_resource_group:
 #azure_route_table_name:
 
 ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
@@ -111,7 +112,7 @@ bin_dir: /usr/local/bin
 
 ## Default packages to install within the cluster, f.e:
 #kpm_packages:
-#  - name: kube-system/grafana
+# - name: kube-system/grafana
 
 ## Certificate Management
 ## This setting determines whether certs are generated via scripts or whether a
@@ -128,5 +129,9 @@ bin_dir: /usr/local/bin
 ## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
 #etcd_metrics: basic
 
+## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
+## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
+#etcd_memory_limit: "512M"
+
 # The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
-# kube_read_only_port: 10255
+#kube_read_only_port: 10255
diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml
index cf4f08a896e5d79c751b5f18c30fd5aa4854c421..345d22a3664c85b8ec860f5ac12f3a2b3f4b8e46 100644
--- a/inventory/sample/group_vars/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s-cluster.yml
@@ -1,12 +1,11 @@
 # Kubernetes configuration dirs and system namespace.
 # Those are where all the additional config stuff goes
-# the kubernetes normally puts in /srv/kubernets.
+# the kubernetes normally puts in /srv/kubernetes.
 # This puts them in a sane location and namespace.
-# Editting those values will almost surely break something.
+# Editing those values will almost surely break something.
 kube_config_dir: /etc/kubernetes
 kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
 kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
 
 # This is where all the cert scripts and certs will be located
 kube_cert_dir: "{{ kube_config_dir }}/ssl"
@@ -20,7 +19,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
 kube_api_anonymous_auth: true
 
 ## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.9.2
+kube_version: v1.9.5
 
 # Where the binaries will be downloaded.
 # Note: ensure that you've enough disk space (about 1G)
@@ -29,7 +28,7 @@ local_release_dir: "/tmp/releases"
 retry_stagger: 5
 
 # This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
+# cert files to. Not really changeable...
 kube_cert_group: kube-cert
 
 # Cluster Loglevel configuration
@@ -37,7 +36,7 @@ kube_log_level: 2
 
 # Users to create for basic auth in Kubernetes API via HTTP
 # Optionally add groups for user
-kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user.creds length=15 chars=ascii_letters,digits') }}"
 kube_users:
   kube:
     pass: "{{kube_api_pwd}}"
@@ -111,14 +110,17 @@ kube_apiserver_insecure_port: 8080 # (http)
 
 # Kube-proxy proxyMode configuration.
 # Can be ipvs, iptables
-kube_proxy_mode: iptables 
+kube_proxy_mode: iptables
+
+## Encrypting Secret Data at Rest (experimental)
+kube_encrypt_secret_data: false
 
 # DNS configuration.
 # Kubernetes cluster name, also will be used as DNS domain
 cluster_name: cluster.local
 # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
 ndots: 2
-# Can be dnsmasq_kubedns, kubedns, manual or none
+# Can be dnsmasq_kubedns, kubedns, coredns, coredns_dual, manual or none
 dns_mode: kubedns
 # Set manual server if using a custom cluster DNS server
 #manual_dns_server: 10.x.x.x
@@ -129,6 +131,7 @@ resolvconf_mode: docker_dns
 deploy_netchecker: false
 # Ip address of the kubernetes skydns service
 skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
 dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
 dns_domain: "{{ cluster_name }}"
 
@@ -159,19 +162,30 @@ dashboard_enabled: true
 # Monitoring apps for k8s
 efk_enabled: false
 
-# Helm deployment
+# Helm deployment. Needs for Prometheus Operator, k8s metrics.
 helm_enabled: false
 
+# Prometheus Operator. Needs for k8s metrics. Installed Helm is required.
+prometheus_operator_enabled: false
+
+# K8s cluster metrics. Installed Helm and Prometheus Operator are required.
+k8s_metrics_enabled: false
+
 # Istio deployment
 istio_enabled: false
 
 # Registry deployment
 registry_enabled: false
+# registry_namespace: "{{ system_namespace }}"
+# registry_storage_class: ""
+# registry_disk_size: "10Gi"
 
 # Local volume provisioner deployment
-# deprecated will be removed
-local_volumes_enabled: false
-local_volume_provisioner_enabled: "{{ local_volumes_enabled }}"
+local_volume_provisioner_enabled: false
+# local_volume_provisioner_namespace: "{{ system_namespace }}"
+# local_volume_provisioner_base_dir: /mnt/disks
+# local_volume_provisioner_mount_dir: /mnt/disks
+# local_volume_provisioner_storage_class: local-storage
 
 # CephFS provisioner deployment
 cephfs_provisioner_enabled: false
@@ -185,12 +199,30 @@ cephfs_provisioner_enabled: false
 # cephfs_provisioner_secret: secret
 # cephfs_provisioner_storage_class: cephfs
 
+# Nginx ingress controller deployment
+ingress_nginx_enabled: false
+# ingress_nginx_host_network: false
+# ingress_nginx_namespace: "ingress-nginx"
+# ingress_nginx_insecure_port: 80
+# ingress_nginx_secure_port: 443
+# ingress_nginx_configmap:
+#   map-hash-bucket-size: "128"
+#   ssl-protocols: "SSLv2"
+# ingress_nginx_configmap_tcp_services:
+#   9000: "default/example-go:8080"
+# ingress_nginx_configmap_udp_services:
+#   53: "kube-system/kube-dns:53"
+
+# Cert manager deployment
+cert_manager_enabled: false
+# cert_manager_namespace: "cert-manager"
+
 # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
 persistent_volumes_enabled: false
 
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
 # kubeconfig_localhost: false
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
 # kubectl_localhost: false
 
 # dnsmasq
diff --git a/inventory/sample/hosts.ini b/inventory/sample/hosts.ini
index f8c567b3468f451cca00c80437579c3d7bfdea0b..24578333471f187456123c2228625e97cd87f9f9 100644
--- a/inventory/sample/hosts.ini
+++ b/inventory/sample/hosts.ini
@@ -26,6 +26,11 @@
 # node5
 # node6
 
+# [kube-ingress]
+# node2
+# node3
+
 # [k8s-cluster:children]
+# kube-master
 # kube-node
-# kube-master
\ No newline at end of file
+# kube-ingress
diff --git a/library/kube.py b/library/kube.py
index a84578ff05aeca44d73dc3c548c4106347cac705..0a50c430326487d961aeb3a6c3bbc8609b967829 100644
--- a/library/kube.py
+++ b/library/kube.py
@@ -18,7 +18,9 @@ options:
     required: false
     default: null
     description:
-      - The path and filename of the resource(s) definition file.
+      - The path and filename of the resource(s) definition file(s).
+      - To operate on several files this can accept a comma separated list of files or a list of files.
+    aliases: [ 'files', 'file', 'filenames' ]
   kubectl:
     required: false
     default: null
@@ -86,6 +88,15 @@ EXAMPLES = """
 
 - name: test nginx is present
   kube: filename=/tmp/nginx.yml
+
+- name: test nginx and postgresql are present
+  kube: files=/tmp/nginx.yml,/tmp/postgresql.yml
+
+- name: test nginx and postgresql are present
+  kube:
+    files:
+      - /tmp/nginx.yml
+      - /tmp/postgresql.yml
 """
 
 
@@ -112,7 +123,7 @@ class KubeManager(object):
         self.all = module.params.get('all')
         self.force = module.params.get('force')
         self.name = module.params.get('name')
-        self.filename = module.params.get('filename')
+        self.filename = [f.strip() for f in module.params.get('filename') or []]
         self.resource = module.params.get('resource')
         self.label = module.params.get('label')
 
@@ -122,7 +133,7 @@ class KubeManager(object):
             rc, out, err = self.module.run_command(args)
             if rc != 0:
                 self.module.fail_json(
-                    msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
+                    msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err))
         except Exception as exc:
             self.module.fail_json(
                 msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
@@ -147,7 +158,7 @@ class KubeManager(object):
         if not self.filename:
             self.module.fail_json(msg='filename required to create')
 
-        cmd.append('--filename=' + self.filename)
+        cmd.append('--filename=' + ','.join(self.filename))
 
         return self._execute(cmd)
 
@@ -161,7 +172,7 @@ class KubeManager(object):
         if not self.filename:
             self.module.fail_json(msg='filename required to reload')
 
-        cmd.append('--filename=' + self.filename)
+        cmd.append('--filename=' + ','.join(self.filename))
 
         return self._execute(cmd)
 
@@ -173,7 +184,7 @@ class KubeManager(object):
         cmd = ['delete']
 
         if self.filename:
-            cmd.append('--filename=' + self.filename)
+            cmd.append('--filename=' + ','.join(self.filename))
         else:
             if not self.resource:
                 self.module.fail_json(msg='resource required to delete without filename')
@@ -197,27 +208,31 @@ class KubeManager(object):
     def exists(self):
         cmd = ['get']
 
-        if not self.resource:
-            return False
+        if self.filename:
+            cmd.append('--filename=' + ','.join(self.filename))
+        else:
+            if not self.resource:
+                self.module.fail_json(msg='resource required without filename')
 
-        cmd.append(self.resource)
+            cmd.append(self.resource)
 
-        if self.name:
-            cmd.append(self.name)
+            if self.name:
+                cmd.append(self.name)
 
-        cmd.append('--no-headers')
+            if self.label:
+                cmd.append('--selector=' + self.label)
 
-        if self.label:
-            cmd.append('--selector=' + self.label)
+            if self.all:
+                cmd.append('--all-namespaces')
 
-        if self.all:
-            cmd.append('--all-namespaces')
+        cmd.append('--no-headers')
 
         result = self._execute_nofail(cmd)
         if not result:
             return False
         return True
 
+    # TODO: This is currently unused, perhaps convert to 'scale' with a replicas param?
     def stop(self):
 
         if not self.force and not self.exists():
@@ -226,7 +241,7 @@ class KubeManager(object):
         cmd = ['stop']
 
         if self.filename:
-            cmd.append('--filename=' + self.filename)
+            cmd.append('--filename=' + ','.join(self.filename))
         else:
             if not self.resource:
                 self.module.fail_json(msg='resource required to stop without filename')
@@ -253,7 +268,7 @@ def main():
     module = AnsibleModule(
         argument_spec=dict(
             name=dict(),
-            filename=dict(),
+            filename=dict(type='list', aliases=['files', 'file', 'filenames']),
             namespace=dict(),
             resource=dict(),
             label=dict(),
@@ -263,7 +278,8 @@ def main():
             all=dict(default=False, type='bool'),
             log_level=dict(default=0, type='int'),
             state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
-            )
+            ),
+            mutually_exclusive=[['filename', 'list']]
         )
 
     changed = False
diff --git a/remove-node.yml b/remove-node.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fbc5bc8ba6fb060fcf130912163570906b2492d6
--- /dev/null
+++ b/remove-node.yml
@@ -0,0 +1,29 @@
+---
+
+- hosts: all
+  gather_facts: true
+
+- hosts: etcd:k8s-cluster:vault:calico-rr
+  vars_prompt:
+    name: "delete_nodes_confirmation"
+    prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
+    default: "no"
+    private: no
+
+  pre_tasks:
+    - name: check confirmation
+      fail:
+        msg: "Delete nodes confirmation failed"
+      when: delete_nodes_confirmation != "yes"
+
+- hosts: kube-master
+  roles:
+    - { role: remove-node/pre-remove, tags: pre-remove }
+
+- hosts: kube-node
+  roles:
+    - { role: reset, tags: reset }
+
+- hosts: kube-master
+  roles:
+    - { role: remove-node/post-remove, tags: post-remove }
diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml
index b6574fd27f5c44d57053d95412447a1416d04a0d..8313301752fa3efcd3d38abd602d0cfdb6d3cb6b 100644
--- a/roles/dnsmasq/tasks/main.yml
+++ b/roles/dnsmasq/tasks/main.yml
@@ -91,7 +91,7 @@
 - name: Start Resources
   kube:
     name: "{{item.item.name}}"
-    namespace: "{{system_namespace}}"
+    namespace: "kube-system"
     kubectl: "{{bin_dir}}/kubectl"
     resource: "{{item.item.type}}"
     filename: "{{kube_config_dir}}/{{item.item.file}}"
diff --git a/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml b/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml
index 817de877b4d59b14c6ac561dad17c2e4c96ec2a2..0fa300989bb9977a756136d35f29ed365ceca595 100644
--- a/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml
+++ b/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml
@@ -3,11 +3,11 @@ kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1beta1
 metadata:
   name: dnsmasq
-  namespace: "{{ system_namespace }}"
+  namespace: "kube-system"
 subjects:
   - kind: ServiceAccount
     name: dnsmasq
-    namespace: "{{ system_namespace}}"
+    namespace: "kube-system"
 roleRef:
   kind: ClusterRole
   name: cluster-admin
diff --git a/roles/dnsmasq/templates/dnsmasq-deploy.yml b/roles/dnsmasq/templates/dnsmasq-deploy.yml
index 838471050349fde9ca0b916a2ce3087496a3f4ce..0fb6045e8269ad4061a8d435412553e25a78c53d 100644
--- a/roles/dnsmasq/templates/dnsmasq-deploy.yml
+++ b/roles/dnsmasq/templates/dnsmasq-deploy.yml
@@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:
   name: dnsmasq
-  namespace: "{{system_namespace}}"
+  namespace: "kube-system"
   labels:
     k8s-app: dnsmasq
     kubernetes.io/cluster-service: "true"
diff --git a/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml b/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml
index bce8a232f355fab0e34a6ede8356d851bc60693e..91e98feee88a75aa7263ceedc5497242dd70d035 100644
--- a/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml
+++ b/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml
@@ -3,6 +3,6 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: dnsmasq
-  namespace: "{{ system_namespace }}"
+  namespace: "kube-system"
   labels:
     kubernetes.io/cluster-service: "true"
diff --git a/roles/dnsmasq/templates/dnsmasq-svc.yml b/roles/dnsmasq/templates/dnsmasq-svc.yml
index 54dc0aa9798f89673938a320bf804d18efb732dd..f00d3d3dd854019ae6eb062bbc8e4fe7123c05f7 100644
--- a/roles/dnsmasq/templates/dnsmasq-svc.yml
+++ b/roles/dnsmasq/templates/dnsmasq-svc.yml
@@ -6,7 +6,7 @@ metadata:
     kubernetes.io/cluster-service: 'true'
     k8s-app: dnsmasq
   name: dnsmasq
-  namespace: {{system_namespace}}
+  namespace: kube-system
 spec:
   ports:
     - port: 53
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index e49471ccc0929b1f8831d6118b2857d5d5662b02..3ed3e9ce7668a00cf5f8fba9922eb019a5fd6796 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -1,5 +1,6 @@
 ---
 docker_version: '17.03'
+docker_selinux_version: '17.03'
 
 docker_package_info:
   pkgs:
@@ -10,11 +11,31 @@ docker_repo_key_info:
 docker_repo_info:
   repos:
 
+dockerproject_repo_key_info:
+  repo_keys:
+
+dockerproject_repo_info:
+  repos:
+
 docker_dns_servers_strict: yes
 
 docker_container_storage_setup: false
 
-docker_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7'
-docker_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
-docker_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
-docker_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
+# Used to override obsoletes=0
+yum_conf: /etc/yum.conf
+docker_yum_conf: /etc/yum_docker.conf
+
+# CentOS/RedHat docker-ce repo
+docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/7/$basearch/stable'
+docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg'
+# Ubuntu docker-ce repo
+docker_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu"
+docker_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg'
+# Debian docker-ce repo
+docker_debian_repo_base_url: "https://download.docker.com/linux/debian"
+docker_debian_repo_gpgkey: 'https://download.docker.com/linux/debian/gpg'
+# dockerproject repo
+dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7'
+dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
+dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
+dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 1c95f819f31f3afe7a5f80b218776226871fb304..729397b449635a607c6faf318722c5f987da4fd1 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -30,7 +30,9 @@
   tags:
     - facts
 
-- name: ensure docker repository public key is installed
+- import_tasks: pre-upgrade.yml
+
+- name: ensure docker-ce repository public key is installed
   action: "{{ docker_repo_key_info.pkg_key }}"
   args:
     id: "{{item}}"
@@ -41,15 +43,36 @@
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   with_items: "{{ docker_repo_key_info.repo_keys }}"
-  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic)
 
-- name: ensure docker repository is enabled
+- name: ensure docker-ce repository is enabled
   action: "{{ docker_repo_info.pkg_repo }}"
   args:
     repo: "{{item}}"
     state: present
   with_items: "{{ docker_repo_info.repos }}"
-  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_repo_info.repos|length > 0)
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (docker_repo_info.repos|length > 0)
+
+- name: ensure docker-engine repository public key is installed
+  action: "{{ dockerproject_repo_key_info.pkg_key }}"
+  args:
+    id: "{{item}}"
+    url: "{{dockerproject_repo_key_info.url}}"
+    state: present
+  register: keyserver_task_result
+  until: keyserver_task_result|succeeded
+  retries: 4
+  delay: "{{ retry_stagger | random + 3 }}"
+  with_items: "{{ dockerproject_repo_key_info.repo_keys }}"
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic)
+
+- name: ensure docker-engine repository is enabled
+  action: "{{ dockerproject_repo_info.pkg_repo }}"
+  args:
+    repo: "{{item}}"
+    state: present
+  with_items: "{{ dockerproject_repo_info.repos }}"
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (dockerproject_repo_info.repos|length > 0)
 
 - name: Configure docker repository on RedHat/CentOS
   template:
@@ -57,11 +80,27 @@
     dest: "/etc/yum.repos.d/docker.repo"
   when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
 
+- name: Copy yum.conf for editing
+  copy:
+    src: "{{ yum_conf }}"
+    dest: "{{ docker_yum_conf }}"
+    remote_src: yes
+  when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
+
+- name: Edit copy of yum.conf to set obsoletes=0
+  lineinfile:
+    path: "{{ docker_yum_conf }}"
+    state: present
+    regexp: '^obsoletes='
+    line: 'obsoletes=0'
+  when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
+
 - name: ensure docker packages are installed
   action: "{{ docker_package_info.pkg_mgr }}"
   args:
     pkg: "{{item.name}}"
     force: "{{item.force|default(omit)}}"
+    conf_file: "{{item.yum_conf|default(omit)}}"
     state: present
   register: docker_task_result
   until: docker_task_result|succeeded
diff --git a/roles/docker/tasks/pre-upgrade.yml b/roles/docker/tasks/pre-upgrade.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8b75cba0ddef97518e20b85ee3551a61c6f7548c
--- /dev/null
+++ b/roles/docker/tasks/pre-upgrade.yml
@@ -0,0 +1,25 @@
+---
+- name: Ensure old versions of Docker are not installed. | Debian
+  package:
+    name: '{{ item }}'
+    state: absent
+  with_items:
+    - docker
+    - docker-engine
+  when:
+    - ansible_os_family == 'Debian'
+    - (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
+
+- name: Ensure old versions of Docker are not installed. | RedHat
+  package:
+    name: '{{ item }}'
+    state: absent
+  with_items:
+    - docker
+    - docker-common
+    - docker-engine
+    - docker-selinux
+  when:
+    - ansible_os_family == 'RedHat'
+    - (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
+    - not is_atomic
diff --git a/roles/docker/tasks/set_facts_dns.yml b/roles/docker/tasks/set_facts_dns.yml
index 7152b442b1816fb779541a71c97763ea240bf613..6fe516c2d4d433b2da49860af54a544099ef1708 100644
--- a/roles/docker/tasks/set_facts_dns.yml
+++ b/roles/docker/tasks/set_facts_dns.yml
@@ -3,8 +3,10 @@
 - name: set dns server for docker
   set_fact:
     docker_dns_servers: |-
-      {%- if dns_mode == 'kubedns' -%}
+      {%- if dns_mode in ['kubedns', 'coredns'] -%}
         {{ [ skydns_server ] }}
+      {%- elif dns_mode == 'coredns_dual' -%}
+        {{ [ skydns_server ] + [ skydns_server_secondary ] }}
       {%- elif dns_mode == 'dnsmasq_kubedns' -%}
         {{ [ dnsmasq_dns_server ] }}
       {%- elif dns_mode == 'manual' -%}
@@ -24,7 +26,7 @@
 - name: add upstream dns servers (only when dnsmasq is not used)
   set_fact:
     docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}"
-  when: dns_mode == 'kubedns'
+  when: dns_mode in ['kubedns', 'coredns', 'coreos_dual']
 
 - name: add global searchdomains
   set_fact:
diff --git a/roles/docker/tasks/systemd.yml b/roles/docker/tasks/systemd.yml
index 877de12997ae9f64abc3c284ba098db8dfadffe0..78cec33ccc7413e579125cc71d2eba41c50f2665 100644
--- a/roles/docker/tasks/systemd.yml
+++ b/roles/docker/tasks/systemd.yml
@@ -12,7 +12,7 @@
   when: http_proxy is defined or https_proxy is defined
 
 - name: get systemd version
-  command: systemctl --version | head -n 1 | cut -d " " -f 2
+  shell: systemctl --version | head -n 1 | cut -d " " -f 2
   register: systemd_version
   when: not is_atomic
   changed_when: false
diff --git a/roles/docker/templates/docker.service.j2 b/roles/docker/templates/docker.service.j2
index 29abb6d53bb53650f90acbbccd7c005c0f6ee3fb..d8efe202546dac187a44d4d7ba575fef27d66a98 100644
--- a/roles/docker/templates/docker.service.j2
+++ b/roles/docker/templates/docker.service.j2
@@ -31,7 +31,10 @@ LimitNOFILE=1048576
 LimitNPROC=1048576
 LimitCORE=infinity
 TimeoutStartSec=1min
-Restart=on-abnormal
+# restart the docker process if it exits prematurely
+Restart=on-failure
+StartLimitBurst=3
+StartLimitInterval=60s
 
 [Install]
 WantedBy=multi-user.target
diff --git a/roles/docker/templates/rh_docker.repo.j2 b/roles/docker/templates/rh_docker.repo.j2
index 7cb728625ddf95d44c48c7bf7981e6c26fb561bb..fe2aeac1c2ee316444819fe79268ab09fa8a004b 100644
--- a/roles/docker/templates/rh_docker.repo.j2
+++ b/roles/docker/templates/rh_docker.repo.j2
@@ -1,7 +1,15 @@
-[dockerrepo]
-name=Docker Repository
+[docker-ce]
+name=Docker-CE Repository
 baseurl={{ docker_rh_repo_base_url }}
 enabled=1
 gpgcheck=1
 gpgkey={{ docker_rh_repo_gpgkey }}
 {% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
+
+[docker-engine]
+name=Docker-Engine Repository
+baseurl={{ dockerproject_rh_repo_base_url }}
+enabled=1
+gpgcheck=1
+gpgkey={{ dockerproject_rh_repo_gpgkey }}
+{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
diff --git a/roles/docker/vars/debian.yml b/roles/docker/vars/debian.yml
index 587e910d6c36c3f7499af6c20159d200bb982841..a17cd757583bb40f059865626e5e3b772a952f69 100644
--- a/roles/docker/vars/debian.yml
+++ b/roles/docker/vars/debian.yml
@@ -1,15 +1,16 @@
 ---
 docker_kernel_min_version: '3.10'
 
+# https://download.docker.com/linux/debian/
 # https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist
 docker_versioned_pkg:
-  'latest': docker-engine
+  'latest': docker-ce
   '1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
   '1.12': docker-engine=1.12.6-0~debian-{{ ansible_distribution_release|lower }}
   '1.13': docker-engine=1.13.1-0~debian-{{ ansible_distribution_release|lower }}
-  '17.03': docker-engine=17.03.1~ce-0~debian-{{ ansible_distribution_release|lower }}
-  'stable': docker-engine=17.03.1~ce-0~debian-{{ ansible_distribution_release|lower }}
-  'edge': docker-engine=17.05.0~ce-0~debian-{{ ansible_distribution_release|lower }}
+  '17.03': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }}
+  'stable': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }}
+  'edge': docker-ce=17.12.1~ce-0~debian-{{ ansible_distribution_release|lower }}
 
 docker_package_info:
   pkg_mgr: apt
@@ -19,14 +20,28 @@ docker_package_info:
 
 docker_repo_key_info:
   pkg_key: apt_key
-  url: '{{ docker_apt_repo_gpgkey }}'
+  url: '{{ docker_debian_repo_gpgkey }}'
   repo_keys:
-    - 58118E89F3A912897C070ADBF76221572C52609D
+    - 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
 
 docker_repo_info:
   pkg_repo: apt_repository
   repos:
     - >
-       deb {{ docker_apt_repo_base_url }}
+       deb {{ docker_debian_repo_base_url }}
+       {{ ansible_distribution_release|lower }}
+       stable
+
+dockerproject_repo_key_info:
+  pkg_key: apt_key
+  url: '{{ dockerproject_apt_repo_gpgkey }}'
+  repo_keys:
+    - 58118E89F3A912897C070ADBF76221572C52609D
+
+dockerproject_repo_info:
+  pkg_repo: apt_repository
+  repos:
+    - >
+       deb {{ dockerproject_apt_repo_base_url }}
        {{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
        main
diff --git a/roles/docker/vars/fedora-20.yml b/roles/docker/vars/fedora-20.yml
deleted file mode 100644
index 31d431ee875a3e16457f7f2696af5d2261a339b3..0000000000000000000000000000000000000000
--- a/roles/docker/vars/fedora-20.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-docker_kernel_min_version: '0'
-
-# versioning: docker-io itself is pinned at docker 1.5
-
-docker_package_info:
-  pkg_mgr: yum
-  pkgs:
-    - name: docker-io
-
-docker_repo_key_info:
-  pkg_key: ''
-  repo_keys: []
-
-docker_repo_info:
-  pkg_repo: ''
-  repos: []
diff --git a/roles/docker/vars/fedora.yml b/roles/docker/vars/fedora.yml
deleted file mode 100644
index 8ce0588d54ed6dd8bfac000dfab7d6f4a0cce076..0000000000000000000000000000000000000000
--- a/roles/docker/vars/fedora.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-docker_kernel_min_version: '0'
-
-# https://docs.docker.com/engine/installation/linux/fedora/#install-from-a-package
-# https://download.docker.com/linux/fedora/7/x86_64/stable/
-# the package names below are guesses;
-# docs mention `sudo dnf config-manager --enable docker-ce-edge` for edge
-docker_versioned_pkg:
-  'latest': docker
-  '1.11': docker-1:1.11.2
-  '1.12': docker-1:1.12.6
-  '1.13': docker-1.13.1
-  '17.03': docker-17.03.1
-  'stable': docker-ce
-  'edge': docker-ce-edge
-
-docker_package_info:
-  pkg_mgr: dnf
-  pkgs:
-    - name: "{{ docker_versioned_pkg[docker_version | string] }}"
-
-docker_repo_key_info:
-  pkg_key: ''
-  repo_keys: []
-
-docker_repo_info:
-  pkg_repo: ''
-  repos: []
diff --git a/roles/docker/vars/redhat.yml b/roles/docker/vars/redhat.yml
index 23c5419a6f10a2428b9a33f226bb6adb8fabe8e6..cd53e284c8ec12cf6ccb5d7d2a20536be8ddfe2a 100644
--- a/roles/docker/vars/redhat.yml
+++ b/roles/docker/vars/redhat.yml
@@ -1,24 +1,36 @@
 ---
 docker_kernel_min_version: '0'
 
-# https://yum.dockerproject.org/repo/main/centos/7/Packages/
+# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
+# https://download.docker.com/linux/centos/7/x86_64/stable/Packages/
+# https://yum.dockerproject.org/repo/main/centos/7
 # or do 'yum --showduplicates list docker-engine'
 docker_versioned_pkg:
-  'latest': docker-engine
+  'latest': docker-ce
   '1.11': docker-engine-1.11.2-1.el7.centos
   '1.12': docker-engine-1.12.6-1.el7.centos
   '1.13': docker-engine-1.13.1-1.el7.centos
-  '17.03': docker-engine-17.03.1.ce-1.el7.centos
-  'stable': docker-engine-17.03.1.ce-1.el7.centos
-  'edge': docker-engine-17.05.0.ce-1.el7.centos
+  '17.03': docker-ce-17.03.2.ce-1.el7.centos
+  'stable': docker-ce-17.03.2.ce-1.el7.centos
+  'edge': docker-ce-17.12.1.ce-1.el7.centos
+
+docker_selinux_versioned_pkg:
+  'latest': docker-ce-selinux
+  '1.11': docker-engine-selinux-1.11.2-1.el7.centos
+  '1.12': docker-engine-selinux-1.12.6-1.el7.centos
+  '1.13': docker-engine-selinux-1.13.1-1.el7.centos
+  '17.03': docker-ce-selinux-17.03.2.ce-1.el7.centos
+  'stable': docker-ce-selinux-17.03.2.ce-1.el7.centos
+  'edge': docker-ce-selinux-17.03.2.ce-1.el7.centos
 
-# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
-# https://download.docker.com/linux/centos/7/x86_64/stable/Packages/
 
 docker_package_info:
   pkg_mgr: yum
   pkgs:
+    - name: "{{ docker_selinux_versioned_pkg[docker_selinux_version | string] }}"
+      yum_conf: "{{ docker_yum_conf }}"
     - name: "{{ docker_versioned_pkg[docker_version | string] }}"
+      yum_conf: "{{ docker_yum_conf }}"
 
 docker_repo_key_info:
   pkg_key: ''
diff --git a/roles/docker/vars/ubuntu.yml b/roles/docker/vars/ubuntu.yml
index f11f5bb81b7ae7439a132c55b78a75ddb048d591..f4d6b1e0f718789226baafc74ec1be3e466e343b 100644
--- a/roles/docker/vars/ubuntu.yml
+++ b/roles/docker/vars/ubuntu.yml
@@ -1,15 +1,15 @@
 ---
 docker_kernel_min_version: '3.10'
 
-# https://apt.dockerproject.org/repo/dists/ubuntu-xenial/main/filelist
+# https://download.docker.com/linux/ubuntu/
 docker_versioned_pkg:
-  'latest': docker-engine
-  '1.11': docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
+  'latest': docker-ce
+  '1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
   '1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }}
   '1.13': docker-engine=1.13.1-0~ubuntu-{{ ansible_distribution_release|lower }}
-  '17.03': docker-engine=17.03.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
-  'stable': docker-engine=17.03.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
-  'edge': docker-engine=17.05.0~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
+  '17.03': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
+  'stable': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
+  'edge': docker-ce=17.12.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
 
 docker_package_info:
   pkg_mgr: apt
@@ -19,14 +19,28 @@ docker_package_info:
 
 docker_repo_key_info:
   pkg_key: apt_key
-  url: '{{ docker_apt_repo_gpgkey }}'
+  url: '{{ docker_ubuntu_repo_gpgkey }}'
   repo_keys:
-    - 58118E89F3A912897C070ADBF76221572C52609D
+    - 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
 
 docker_repo_info:
   pkg_repo: apt_repository
   repos:
     - >
-       deb {{ docker_apt_repo_base_url }}
+       deb {{ docker_ubuntu_repo_base_url }}
+       {{ ansible_distribution_release|lower }}
+       stable
+
+dockerproject_repo_key_info:
+  pkg_key: apt_key
+  url: '{{ dockerproject_apt_repo_gpgkey }}'
+  repo_keys:
+    - 58118E89F3A912897C070ADBF76221572C52609D
+
+dockerproject_repo_info:
+  pkg_repo: apt_repository
+  repos:
+    - >
+       deb {{ dockerproject_apt_repo_base_url }}
        {{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
        main
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index 406ec8b9567741f6550c69a3939381267d9add84..84f78e404f5dee985990eff27af66d66c46ba334 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -1,5 +1,5 @@
 ---
-local_release_dir: /tmp
+local_release_dir: /tmp/releases
 
 # Used to only evaluate vars from download role
 skip_downloads: false
@@ -24,24 +24,24 @@ download_always_pull: False
 download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
 
 # Versions
-kube_version: v1.9.2
+kube_version: v1.9.5
 kubeadm_version: "{{ kube_version }}"
 etcd_version: v3.2.4
 # TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
 # after migration to container download
-calico_version: "v2.6.2"
-calico_ctl_version: "v1.6.1"
-calico_cni_version: "v1.11.0"
-calico_policy_version: "v1.0.0"
-calico_rr_version: "v0.4.0"
-flannel_version: "v0.9.1"
+calico_version: "v2.6.8"
+calico_ctl_version: "v1.6.3"
+calico_cni_version: "v1.11.4"
+calico_policy_version: "v1.0.3"
+calico_rr_version: "v0.4.2"
+flannel_version: "v0.10.0"
 flannel_cni_version: "v0.3.0"
 istio_version: "0.2.6"
 vault_version: 0.8.1
-weave_version: 2.2.0
+weave_version: 2.2.1
 pod_infra_version: 3.0
 contiv_version: 1.1.7
-cilium_version: "v1.0.0-rc4"
+cilium_version: "v1.0.0-rc8"
 
 # Download URLs
 istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
@@ -50,7 +50,7 @@ vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/va
 
 # Checksums
 istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
-kubeadm_checksum: 560b44a2b91747f4fb64ac8754fcf65db9a39a84c6b54d4e6483400ac6c674fc
+kubeadm_checksum: 12b6e9ac1624852b7c978bde70b9bde9ca0e4fc6581d09bddfb117bb41f93c74
 vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
 
 # Containers
@@ -70,8 +70,24 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
 calico_policy_image_tag: "{{ calico_policy_version }}"
 calico_rr_image_repo: "quay.io/calico/routereflector"
 calico_rr_image_tag: "{{ calico_rr_version }}"
-hyperkube_image_repo: "quay.io/coreos/hyperkube"
-hyperkube_image_tag: "{{ kube_version }}_coreos.0"
+istio_proxy_image_repo: docker.io/istio/proxy
+istio_proxy_image_tag: "{{ istio_version }}"
+istio_proxy_init_image_repo: docker.io/istio/proxy_init
+istio_proxy_init_image_tag: "{{ istio_version }}"
+istio_ca_image_repo: docker.io/istio/istio-ca
+istio_ca_image_tag: "{{ istio_version }}"
+istio_mixer_image_repo: docker.io/istio/mixer
+istio_mixer_image_tag: "{{ istio_version }}"
+istio_pilot_image_repo: docker.io/istio/pilot
+istio_pilot_image_tag: "{{ istio_version }}"
+istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
+istio_proxy_debug_image_tag: "{{ istio_version }}"
+istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
+istio_sidecar_initializer_image_tag: "{{ istio_version }}"
+istio_statsd_image_repo: prom/statsd-exporter
+istio_statsd_image_tag: latest
+hyperkube_image_repo: "gcr.io/google-containers/hyperkube"
+hyperkube_image_tag: "{{ kube_version }}"
 pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
 pod_infra_image_tag: "{{ pod_infra_version }}"
 install_socat_image_repo: "xueshanf/install-socat"
@@ -91,7 +107,6 @@ contiv_auth_proxy_image_repo: "contiv/auth_proxy"
 contiv_auth_proxy_image_tag: "{{ contiv_version }}"
 cilium_image_repo: "docker.io/cilium/cilium"
 cilium_image_tag: "{{ cilium_version }}"
-
 nginx_image_repo: nginx
 nginx_image_tag: 1.13
 dnsmasq_version: 2.78
@@ -100,6 +115,9 @@ dnsmasq_image_tag: "{{ dnsmasq_version }}"
 kubedns_version: 1.14.8
 kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
 kubedns_image_tag: "{{ kubedns_version }}"
+coredns_version: 1.1.0
+coredns_image_repo: "docker.io/coredns/coredns"
+coredns_image_tag: "{{ coredns_version }}"
 dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
 dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
 dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
@@ -121,14 +139,30 @@ fluentd_image_tag: "{{ fluentd_version }}"
 kibana_version: "v4.6.1"
 kibana_image_repo: "gcr.io/google_containers/kibana"
 kibana_image_tag: "{{ kibana_version }}"
-
-helm_version: "v2.7.2"
+helm_version: "v2.8.1"
 helm_image_repo: "lachlanevenson/k8s-helm"
 helm_image_tag: "{{ helm_version }}"
 tiller_image_repo: "gcr.io/kubernetes-helm/tiller"
 tiller_image_tag: "{{ helm_version }}"
 vault_image_repo: "vault"
 vault_image_tag: "{{ vault_version }}"
+registry_image_repo: "registry"
+registry_image_tag: "2.6"
+registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy"
+registry_proxy_image_tag: "0.4"
+local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
+local_volume_provisioner_image_tag: "v2.0.0"
+cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner"
+cephfs_provisioner_image_tag: "92295a30"
+ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
+ingress_nginx_controller_image_tag: "0.12.0"
+ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
+ingress_nginx_default_backend_image_tag: "1.4"
+cert_manager_version: "v0.2.3"
+cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
+cert_manager_controller_image_tag: "{{ cert_manager_version }}"
+cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
+cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}"
 
 downloads:
   netcheck_server:
@@ -137,18 +171,24 @@ downloads:
     repo: "{{ netcheck_server_img_repo }}"
     tag: "{{ netcheck_server_tag }}"
     sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   netcheck_agent:
     enabled: "{{ deploy_netchecker }}"
     container: true
     repo: "{{ netcheck_agent_img_repo }}"
     tag: "{{ netcheck_agent_tag }}"
     sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   etcd:
     enabled: true
     container: true
     repo: "{{ etcd_image_repo }}"
     tag: "{{ etcd_image_tag }}"
     sha256: "{{ etcd_digest_checksum|default(None) }}"
+    groups:
+      - etcd
   kubeadm:
     enabled: "{{ kubeadm_enabled }}"
     file: true
@@ -160,6 +200,8 @@ downloads:
     unarchive: false
     owner: "root"
     mode: "0755"
+    groups:
+      - k8s-cluster
   istioctl:
     enabled: "{{ istio_enabled }}"
     file: true
@@ -171,134 +213,250 @@ downloads:
     unarchive: false
     owner: "root"
     mode: "0755"
+    groups:
+      - kube-master
+  istio_proxy:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_proxy_image_repo }}"
+    tag: "{{ istio_proxy_image_tag }}"
+    sha256: "{{ istio_proxy_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_proxy_init:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_proxy_init_image_repo }}"
+    tag: "{{ istio_proxy_init_image_tag }}"
+    sha256: "{{ istio_proxy_init_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_ca:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_ca_image_repo }}"
+    tag: "{{ istio_ca_image_tag }}"
+    sha256: "{{ istio_ca_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_mixer:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_mixer_image_repo }}"
+    tag: "{{ istio_mixer_image_tag }}"
+    sha256: "{{ istio_mixer_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_pilot:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_pilot_image_repo }}"
+    tag: "{{ istio_pilot_image_tag }}"
+    sha256: "{{ istio_pilot_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_proxy_debug:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_proxy_debug_image_repo }}"
+    tag: "{{ istio_proxy_debug_image_tag }}"
+    sha256: "{{ istio_proxy_debug_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_sidecar_initializer:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_sidecar_initializer_image_repo }}"
+    tag: "{{ istio_sidecar_initializer_image_tag }}"
+    sha256: "{{ istio_sidecar_initializer_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_statsd:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_statsd_image_repo }}"
+    tag: "{{ istio_statsd_image_tag }}"
+    sha256: "{{ istio_statsd_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   hyperkube:
     enabled: true
     container: true
     repo: "{{ hyperkube_image_repo }}"
     tag: "{{ hyperkube_image_tag }}"
     sha256: "{{ hyperkube_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   cilium:
     enabled: "{{ kube_network_plugin == 'cilium' }}"
     container: true
     repo: "{{ cilium_image_repo }}"
     tag: "{{ cilium_image_tag }}"
     sha256: "{{ cilium_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   flannel:
     enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
     container: true
     repo: "{{ flannel_image_repo }}"
     tag: "{{ flannel_image_tag }}"
     sha256: "{{ flannel_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   flannel_cni:
     enabled: "{{ kube_network_plugin == 'flannel' }}"
     container: true
     repo: "{{ flannel_cni_image_repo }}"
     tag: "{{ flannel_cni_image_tag }}"
     sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   calicoctl:
     enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
     container: true
     repo: "{{ calicoctl_image_repo }}"
     tag: "{{ calicoctl_image_tag }}"
     sha256: "{{ calicoctl_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   calico_node:
     enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
     container: true
     repo: "{{ calico_node_image_repo }}"
     tag: "{{ calico_node_image_tag }}"
     sha256: "{{ calico_node_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   calico_cni:
     enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
     container: true
     repo: "{{ calico_cni_image_repo }}"
     tag: "{{ calico_cni_image_tag }}"
     sha256: "{{ calico_cni_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   calico_policy:
     enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}"
     container: true
     repo: "{{ calico_policy_image_repo }}"
     tag: "{{ calico_policy_image_tag }}"
     sha256: "{{ calico_policy_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   calico_rr:
-    enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr}} and kube_network_plugin == 'calico'"
+    enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}"
     container: true
     repo: "{{ calico_rr_image_repo }}"
     tag: "{{ calico_rr_image_tag }}"
     sha256: "{{ calico_rr_digest_checksum|default(None) }}"
+    groups:
+      - calico-rr
   weave_kube:
     enabled: "{{ kube_network_plugin == 'weave' }}"
     container: true
     repo: "{{ weave_kube_image_repo }}"
     tag: "{{ weave_kube_image_tag }}"
     sha256: "{{ weave_kube_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   weave_npc:
     enabled: "{{ kube_network_plugin == 'weave' }}"
     container: true
     repo: "{{ weave_npc_image_repo }}"
     tag: "{{ weave_npc_image_tag }}"
     sha256: "{{ weave_npc_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   contiv:
     enabled: "{{ kube_network_plugin == 'contiv' }}"
     container: true
     repo: "{{ contiv_image_repo }}"
     tag: "{{ contiv_image_tag }}"
     sha256: "{{ contiv_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   contiv_auth_proxy:
     enabled: "{{ kube_network_plugin == 'contiv' }}"
     container: true
     repo: "{{ contiv_auth_proxy_image_repo }}"
     tag: "{{ contiv_auth_proxy_image_tag }}"
     sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   pod_infra:
     enabled: true
     container: true
     repo: "{{ pod_infra_image_repo }}"
     tag: "{{ pod_infra_image_tag }}"
     sha256: "{{ pod_infra_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   install_socat:
     enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}"
     container: true
     repo: "{{ install_socat_image_repo }}"
     tag: "{{ install_socat_image_tag }}"
     sha256: "{{ install_socat_digest_checksum|default(None) }}"
+    groups:
+      - k8s-cluster
   nginx:
-    enabled: true
+    enabled: "{{ loadbalancer_apiserver_localhost }}"
     container: true
     repo: "{{ nginx_image_repo }}"
     tag: "{{ nginx_image_tag }}"
     sha256: "{{ nginx_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   dnsmasq:
     enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}"
     container: true
     repo: "{{ dnsmasq_image_repo }}"
     tag: "{{ dnsmasq_image_tag }}"
     sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   kubedns:
-    enabled: true
+    enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
     container: true
     repo: "{{ kubedns_image_repo }}"
     tag: "{{ kubedns_image_tag }}"
     sha256: "{{ kubedns_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  coredns:
+    enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
+    container: true
+    repo: "{{ coredns_image_repo }}"
+    tag: "{{ coredns_image_tag }}"
+    sha256: "{{ coredns_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   dnsmasq_nanny:
-    enabled: true
+    enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
     container: true
     repo: "{{ dnsmasq_nanny_image_repo }}"
     tag: "{{ dnsmasq_nanny_image_tag }}"
     sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   dnsmasq_sidecar:
-    enabled: true
+    enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
     container: true
     repo: "{{ dnsmasq_sidecar_image_repo }}"
     tag: "{{ dnsmasq_sidecar_image_tag }}"
     sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   kubednsautoscaler:
-    enabled: true
+    enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
     container: true
     repo: "{{ kubednsautoscaler_image_repo }}"
     tag: "{{ kubednsautoscaler_image_tag }}"
     sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   testbox:
-    enabled: true
+    enabled: false
     container: true
     repo: "{{ test_image_repo }}"
     tag: "{{ test_image_tag }}"
@@ -309,30 +467,40 @@ downloads:
     repo: "{{ elasticsearch_image_repo }}"
     tag: "{{ elasticsearch_image_tag }}"
     sha256: "{{ elasticsearch_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   fluentd:
     enabled: "{{ efk_enabled }}"
     container: true
     repo: "{{ fluentd_image_repo }}"
     tag: "{{ fluentd_image_tag }}"
     sha256: "{{ fluentd_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   kibana:
     enabled: "{{ efk_enabled }}"
     container: true
     repo: "{{ kibana_image_repo }}"
     tag: "{{ kibana_image_tag }}"
     sha256: "{{ kibana_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   helm:
     enabled: "{{ helm_enabled }}"
     container: true
     repo: "{{ helm_image_repo }}"
     tag: "{{ helm_image_tag }}"
     sha256: "{{ helm_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   tiller:
     enabled: "{{ helm_enabled }}"
     container: true
     repo: "{{ tiller_image_repo }}"
     tag: "{{ tiller_image_tag }}"
     sha256: "{{ tiller_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   vault:
     enabled: "{{ cert_management == 'vault' }}"
     container: "{{ vault_deployment_type != 'host' }}"
@@ -347,6 +515,72 @@ downloads:
     unarchive: true
     url: "{{ vault_download_url }}"
     version: "{{ vault_version }}"
+    groups:
+      - vault
+  registry:
+    enabled: "{{ registry_enabled }}"
+    container: true
+    repo: "{{ registry_image_repo }}"
+    tag: "{{ registry_image_tag }}"
+    sha256: "{{ registry_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  registry_proxy:
+    enabled: "{{ registry_enabled }}"
+    container: true
+    repo: "{{ registry_proxy_image_repo }}"
+    tag: "{{ registry_proxy_image_tag }}"
+    sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  local_volume_provisioner:
+    enabled: "{{ local_volume_provisioner_enabled }}"
+    container: true
+    repo: "{{ local_volume_provisioner_image_repo }}"
+    tag: "{{ local_volume_provisioner_image_tag }}"
+    sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  cephfs_provisioner:
+    enabled: "{{ cephfs_provisioner_enabled }}"
+    container: true
+    repo: "{{ cephfs_provisioner_image_repo }}"
+    tag: "{{ cephfs_provisioner_image_tag }}"
+    sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  ingress_nginx_controller:
+    enabled: "{{ ingress_nginx_enabled }}"
+    container: true
+    repo: "{{ ingress_nginx_controller_image_repo }}"
+    tag: "{{ ingress_nginx_controller_image_tag }}"
+    sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
+    groups:
+      - kube-ingress
+  ingress_nginx_default_backend:
+    enabled: "{{ ingress_nginx_enabled }}"
+    container: true
+    repo: "{{ ingress_nginx_default_backend_image_repo }}"
+    tag: "{{ ingress_nginx_default_backend_image_tag }}"
+    sha256: "{{ ingress_nginx_default_backend_digest_checksum|default(None) }}"
+    groups:
+      - kube-ingress
+  cert_manager_controller:
+    enabled: "{{ cert_manager_enabled }}"
+    container: true
+    repo: "{{ cert_manager_controller_image_repo }}"
+    tag: "{{ cert_manager_controller_image_tag }}"
+    sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  cert_manager_ingress_shim:
+    enabled: "{{ cert_manager_enabled }}"
+    container: true
+    repo: "{{ cert_manager_ingress_shim_image_repo }}"
+    tag: "{{ cert_manager_ingress_shim_image_tag }}"
+    sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
 
 download_defaults:
   container: false
diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml
index bbf7cec85f307ea3c9634ea643ab07d8fc9068d3..a5659619c11e384c41f04e7cc8306b770c042c3d 100644
--- a/roles/download/tasks/download_container.yml
+++ b/roles/download/tasks/download_container.yml
@@ -7,6 +7,7 @@
   when:
     - download.enabled
     - download.container
+    - group_names | intersect(download.groups) | length
   tags:
     - facts
 
@@ -23,6 +24,7 @@
     - download.enabled
     - download.container
     - pull_required|default(download_always_pull)
+    - group_names | intersect(download.groups) | length
   delegate_to: "{{ download_delegate }}"
   delegate_facts: yes
   run_once: yes
@@ -38,3 +40,4 @@
     - download.enabled
     - download.container
     - pull_required|default(download_always_pull)
+    - group_names | intersect(download.groups) | length
diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml
index 664fa4728580c822b801419a93cc2ea064261124..832fec41ea0a800abb89833dbad28630b26ae14d 100644
--- a/roles/download/tasks/download_file.yml
+++ b/roles/download/tasks/download_file.yml
@@ -13,6 +13,7 @@
   when:
     - download.enabled
     - download.file
+    - group_names | intersect(download.groups) | length
 
 - name: file_download | Download item
   get_url:
@@ -28,6 +29,7 @@
   when:
     - download.enabled
     - download.file
+    - group_names | intersect(download.groups) | length
 
 - name: file_download | Extract archives
   unarchive:
@@ -40,3 +42,4 @@
     - download.enabled
     - download.file
     - download.unarchive|default(False)
+    - group_names | intersect(download.groups) | length
diff --git a/roles/download/tasks/sync_container.yml b/roles/download/tasks/sync_container.yml
index a15f78cde44e41bf39fc680268e2dfa3d2fa7b50..1ca84ad671d7ef623f34476b616f074dc395d83f 100644
--- a/roles/download/tasks/sync_container.yml
+++ b/roles/download/tasks/sync_container.yml
@@ -7,6 +7,7 @@
   when:
     - download.enabled
     - download.container
+    - group_names | intersect(download.groups) | length
   tags:
     - facts
 
@@ -17,6 +18,7 @@
     - download.enabled
     - download.container
     - download_run_once
+    - group_names | intersect(download.groups) | length
   tags:
     - facts
 
@@ -27,6 +29,7 @@
     - download.enabled
     - download.container
     - download_run_once
+    - group_names | intersect(download.groups) | length
 
 - name: "container_download | Update the 'container_changed' fact"
   set_fact:
@@ -36,6 +39,7 @@
     - download.container
     - download_run_once
     - pull_required|default(download_always_pull)
+    - group_names | intersect(download.groups) | length
   run_once: "{{ download_run_once }}"
   tags:
     - facts
@@ -53,6 +57,7 @@
     - download.enabled
     - download.container
     - download_run_once
+    - group_names | intersect(download.groups) | length
   tags:
     - facts
 
@@ -68,6 +73,7 @@
     - download_run_once
     - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost")
     - (container_changed or not img.stat.exists)
+    - group_names | intersect(download.groups) | length
 
 - name: container_download | copy container images to ansible host
   synchronize:
@@ -87,6 +93,7 @@
     - inventory_hostname == download_delegate
     - download_delegate != "localhost"
     - saved.changed
+    - group_names | intersect(download.groups) | length
 
 - name: container_download | upload container images to nodes
   synchronize:
@@ -108,6 +115,7 @@
     - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
       inventory_hostname != download_delegate or
       download_delegate == "localhost")
+    - group_names | intersect(download.groups) | length
   tags:
     - upload
     - upgrade
@@ -120,6 +128,7 @@
     - download_run_once
     - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
       inventory_hostname != download_delegate or download_delegate == "localhost")
+    - group_names | intersect(download.groups) | length
   tags:
     - upload
     - upgrade
diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml
index 4e122e719b5349f4387cd5a6c81f5e879cd7623b..6c13810c525daecd5055c9f0a76431437b4edc98 100644
--- a/roles/etcd/defaults/main.yml
+++ b/roles/etcd/defaults/main.yml
@@ -4,6 +4,7 @@ etcd_cluster_setup: true
 
 etcd_backup_prefix: "/var/backups"
 etcd_data_dir: "/var/lib/etcd"
+etcd_events_data_dir: "/var/lib/etcd-events"
 
 etcd_config_dir: /etc/ssl/etcd
 etcd_cert_dir: "{{ etcd_config_dir }}/ssl"
@@ -11,9 +12,9 @@ etcd_cert_group: root
 # Note: This does not set up DNS entries. It simply adds the following DNS
 # entries to the certificate
 etcd_cert_alt_names:
-  - "etcd.{{ system_namespace }}.svc.{{ dns_domain }}"
-  - "etcd.{{ system_namespace }}.svc"
-  - "etcd.{{ system_namespace }}"
+  - "etcd.kube-system.svc.{{ dns_domain }}"
+  - "etcd.kube-system.svc"
+  - "etcd.kube-system"
   - "etcd"
 
 etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
@@ -21,6 +22,13 @@ etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
 etcd_heartbeat_interval: "250"
 etcd_election_timeout: "5000"
 
+# etcd_snapshot_count: "10000"
+
+# Parameters for ionice
+# -c takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle.
+# -n takes an integer between 0 (highest priority) and 7 (lowest priority)
+# etcd_ionice: "-c2 -n0"
+
 etcd_metrics: "basic"
 
 # Limits
diff --git a/roles/etcd/files/make-ssl-etcd.sh b/roles/etcd/files/make-ssl-etcd.sh
index 5544d6639fb41df6ab79f3f9f1db78f082a1c746..ebf0e2afa3578a5dcbf2da4090f9b1511a9837a1 100755
--- a/roles/etcd/files/make-ssl-etcd.sh
+++ b/roles/etcd/files/make-ssl-etcd.sh
@@ -65,7 +65,7 @@ if [ -e "$SSLDIR/ca-key.pem" ]; then
     cp $SSLDIR/{ca.pem,ca-key.pem} .
 else
     openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
-    openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=etcd-ca" > /dev/null 2>&1
+    openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=etcd-ca" > /dev/null 2>&1
 fi
 
 # ETCD member
@@ -75,12 +75,12 @@ if [ -n "$MASTERS" ]; then
         # Member key
         openssl genrsa -out member-${host}-key.pem 2048 > /dev/null 2>&1
         openssl req -new -key member-${host}-key.pem -out member-${host}.csr -subj "/CN=etcd-member-${cn}" -config ${CONFIG} > /dev/null 2>&1
-        openssl x509 -req -in member-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member-${host}.pem -days 3650 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
+        openssl x509 -req -in member-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member-${host}.pem -days 36500 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
 
         # Admin key
         openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1
         openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=etcd-admin-${cn}" > /dev/null 2>&1
-        openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 3650 -extensions ssl_client  -extfile ${CONFIG} > /dev/null 2>&1
+        openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 36500 -extensions ssl_client  -extfile ${CONFIG} > /dev/null 2>&1
     done
 fi
 
@@ -90,7 +90,7 @@ if [ -n "$HOSTS" ]; then
         cn="${host%%.*}"
         openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1
         openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=etcd-node-${cn}" > /dev/null 2>&1
-        openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 3650 -extensions ssl_client  -extfile ${CONFIG} > /dev/null 2>&1
+        openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 36500 -extensions ssl_client  -extfile ${CONFIG} > /dev/null 2>&1
     done
 fi
 
diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml
index 247b2ae004c1fc0306f8020f83720394cabcdfba..a0a80e10899adbc967cef63b7f90dfff0aba22f0 100644
--- a/roles/etcd/handlers/backup.yml
+++ b/roles/etcd/handlers/backup.yml
@@ -48,7 +48,7 @@
       snapshot save {{ etcd_backup_directory }}/snapshot.db
   environment:
     ETCDCTL_API: 3
-    ETCDCTL_CERT: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
-    ETCDCTL_KEY: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+    ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
   retries: 3
   delay: "{{ retry_stagger | random + 3 }}"
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
index f6666ee944249d5f0b66de4823ebb3eb84bb61f8..a72cbd515bf7c7c3fcdd16813d07e3cbedb06ff2 100644
--- a/roles/etcd/handlers/main.yml
+++ b/roles/etcd/handlers/main.yml
@@ -7,17 +7,33 @@
     - reload etcd
     - wait for etcd up
 
+- name: restart etcd-events
+  command: /bin/true
+  notify:
+    - etcd-events | reload systemd
+    - reload etcd-events
+    - wait for etcd-events up
+
 - import_tasks: backup.yml
 
 - name: etcd | reload systemd
   command: systemctl daemon-reload
 
+- name: etcd-events | reload systemd
+  command: systemctl daemon-reload
+
 - name: reload etcd
   service:
     name: etcd
     state: restarted
   when: is_etcd_master
 
+- name: reload etcd-events
+  service:
+    name: etcd-events
+    state: restarted
+  when: is_etcd_master
+
 - name: wait for etcd up
   uri:
     url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
@@ -29,6 +45,17 @@
   retries: 10
   delay: 5
 
+- name: wait for etcd-events up
+  uri:
+    url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2381/health"
+    validate_certs: no
+    client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
+    client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
+  register: result
+  until: result.status is defined and result.status == 200
+  retries: 10
+  delay: 5
+
 - name: set etcd_secret_changed
   set_fact:
     etcd_secret_changed: true
diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml
index 7af17f69e02c0561abcde0bd60b698c76d3fb25e..d39ba62d4ff8a193f55c1e57722509d8f9720f97 100644
--- a/roles/etcd/tasks/configure.yml
+++ b/roles/etcd/tasks/configure.yml
@@ -1,5 +1,5 @@
 ---
-- name: Configure | Check if member is in cluster
+- name: Configure | Check if member is in etcd cluster
   shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
   register: etcd_member_in_cluster
   ignore_errors: true
@@ -9,8 +9,21 @@
   tags:
     - facts
   environment:
-    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
-    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
+
+- name: Configure | Check if member is in etcd-events cluster
+  shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_access_address }}"
+  register: etcd_events_member_in_cluster
+  ignore_errors: true
+  changed_when: false
+  check_mode: no
+  when: is_etcd_master and etcd_events_cluster_setup
+  tags:
+    - facts
+  environment:
+    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
 
 - name: Configure | Copy etcd.service systemd file
   template:
@@ -20,11 +33,36 @@
   when: is_etcd_master
   notify: restart etcd
 
-- name: Configure | Join member(s) to cluster one at a time
-  include_tasks: join_member.yml
+- name: Configure | Copy etcd-events.service systemd file
+  template:
+    src: "etcd-events-host.service.j2"
+    dest: /etc/systemd/system/etcd-events.service
+    backup: yes
+  when: is_etcd_master and etcd_deployment_type == "host" and etcd_events_cluster_setup
+  notify: restart etcd-events
+
+- name: Configure | Copy etcd-events.service systemd file
+  template:
+    src: "etcd-events-docker.service.j2"
+    dest: /etc/systemd/system/etcd-events.service
+    backup: yes
+  when: is_etcd_master and etcd_deployment_type == "docker" and etcd_events_cluster_setup
+  notify: restart etcd-events
+
+- name: Configure | Join member(s) to etcd cluster one at a time
+  include_tasks: join_etcd_member.yml
   vars:
     target_node: "{{ item }}"
   loop_control:
     pause: 10
   with_items: "{{ groups['etcd'] }}"
   when: inventory_hostname == item and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
+
+- name: Configure | Join member(s) to etcd-events cluster one at a time
+  include_tasks: join_etcd-evetns_member.yml
+  vars:
+    target_node: "{{ item }}"
+  loop_control:
+    pause: 10
+  with_items: "{{ groups['etcd'] }}"
+  when: inventory_hostname == item and etcd_events_cluster_setup and etcd_events_member_in_cluster.rc != 0 and etcd_events_cluster_is_healthy.rc == 0
diff --git a/roles/etcd/tasks/install_docker.yml b/roles/etcd/tasks/install_docker.yml
index 291bb5f25413e728cff610cd33ab9f8d25ea208a..58e1485a54a0c17fdf29b288914a130e6a62a2c5 100644
--- a/roles/etcd/tasks/install_docker.yml
+++ b/roles/etcd/tasks/install_docker.yml
@@ -18,3 +18,13 @@
     mode: 0755
     backup: yes
   notify: restart etcd
+
+- name: Install etcd-events launch script
+  template:
+    src: etcd-events.j2
+    dest: "{{ bin_dir }}/etcd-events"
+    owner: 'root'
+    mode: 0755
+    backup: yes
+  when: etcd_events_cluster_setup
+  notify: restart etcd-events
diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5a7061880e13a9b1371e7ae2d02152b4e8dc1bd7
--- /dev/null
+++ b/roles/etcd/tasks/join_etcd-events_member.yml
@@ -0,0 +1,47 @@
+---
+- name: Join Member | Add member to cluster
+  shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} member add {{ etcd_member_name }} {{ etcd_events_peer_url }}"
+  register: member_add_result
+  until: member_add_result.rc == 0
+  retries: 4
+  delay: "{{ retry_stagger | random + 3 }}"
+  when: target_node == inventory_hostname
+  environment:
+    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
+
+- include_tasks: refresh_config.yml
+  vars:
+    etcd_events_peer_addresses: >-
+      {% for host in groups['etcd'] -%}
+        {%- if hostvars[host]['etcd_events_member_in_cluster'].rc == 0 -%}
+          {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2382,
+        {%- endif -%}
+        {%- if loop.last -%}
+          {{ etcd_member_name }}={{ etcd_events_peer_url }}
+        {%- endif -%}
+      {%- endfor -%}
+  when: target_node == inventory_hostname
+
+- name: Join Member | reload systemd
+  command: systemctl daemon-reload
+  when: target_node == inventory_hostname
+
+- name: Join Member | Ensure etcd-events is running
+  service:
+    name: etcd-events
+    state: started
+    enabled: yes
+  when: target_node == inventory_hostname
+
+- name: Join Member | Ensure member is in etcd-events cluster
+  shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_events_access_address }}"
+  register: etcd_events_member_in_cluster
+  changed_when: false
+  check_mode: no
+  tags:
+    - facts
+  when: target_node == inventory_hostname
+  environment:
+    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d11037151f7451c2fcbb55e0af8c20c4261a585e
--- /dev/null
+++ b/roles/etcd/tasks/join_etcd_member.yml
@@ -0,0 +1,47 @@
+---
+- name: Join Member | Add member to cluster
+  shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
+  register: member_add_result
+  until: member_add_result.rc == 0
+  retries: 4
+  delay: "{{ retry_stagger | random + 3 }}"
+  when: target_node == inventory_hostname
+  environment:
+    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
+
+- include_tasks: refresh_config.yml
+  vars:
+    etcd_peer_addresses: >-
+      {% for host in groups['etcd'] -%}
+        {%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%}
+          {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2380,
+        {%- endif -%}
+        {%- if loop.last -%}
+          {{ etcd_member_name }}={{ etcd_peer_url }}
+        {%- endif -%}
+      {%- endfor -%}
+  when: target_node == inventory_hostname
+
+- name: Join Member | reload systemd
+  command: systemctl daemon-reload
+  when: target_node == inventory_hostname
+
+- name: Join Member | Ensure etcd is running
+  service:
+    name: etcd
+    state: started
+    enabled: yes
+  when: target_node == inventory_hostname
+
+- name: Join Member | Ensure member is in cluster
+  shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
+  register: etcd_member_in_cluster
+  changed_when: false
+  check_mode: no
+  tags:
+    - facts
+  when: target_node == inventory_hostname
+  environment:
+    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
diff --git a/roles/etcd/tasks/join_member.yml b/roles/etcd/tasks/join_member.yml
index b7801f0c916c792a6a061ebaccfabbe91d744c31..d11037151f7451c2fcbb55e0af8c20c4261a585e 100644
--- a/roles/etcd/tasks/join_member.yml
+++ b/roles/etcd/tasks/join_member.yml
@@ -7,8 +7,8 @@
   delay: "{{ retry_stagger | random + 3 }}"
   when: target_node == inventory_hostname
   environment:
-    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
-    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
 
 - include_tasks: refresh_config.yml
   vars:
@@ -43,5 +43,5 @@
     - facts
   when: target_node == inventory_hostname
   environment:
-    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
-    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index a8a9f23ad433aa17866beb64cbbb7c81f60f0a61..a64d9b097d05229cdc3a86e5bde2f82466c15bdf 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -29,13 +29,13 @@
   tags:
     - upgrade
 
-- import_tasks: set_cluster_health.yml
+- include_tasks: set_cluster_health.yml
   when: is_etcd_master and etcd_cluster_setup
 
-- import_tasks: configure.yml
+- include_tasks: configure.yml
   when: is_etcd_master and etcd_cluster_setup
 
-- import_tasks: refresh_config.yml
+- include_tasks: refresh_config.yml
   when: is_etcd_master and etcd_cluster_setup
 
 - name: Restart etcd if certs changed
@@ -43,6 +43,11 @@
   notify: restart etcd
   when: is_etcd_master and etcd_secret_changed|default(false)
 
+- name: Restart etcd-events if certs changed
+  command: /bin/true
+  notify: restart etcd
+  when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed|default(false)
+
 # reload-systemd
 - meta: flush_handlers
 
@@ -53,11 +58,18 @@
     enabled: yes
   when: is_etcd_master and etcd_cluster_setup
 
+- name: Ensure etcd-events is running
+  service:
+    name: etcd-events
+    state: started
+    enabled: yes
+  when: is_etcd_master and etcd_events_cluster_setup
+
 # After etcd cluster is assembled, make sure that
 # initial state of the cluster is in `existing`
 # state insted of `new`.
-- import_tasks: set_cluster_health.yml
+- include_tasks: set_cluster_health.yml
   when: is_etcd_master and etcd_cluster_setup
 
-- import_tasks: refresh_config.yml
+- include_tasks: refresh_config.yml
   when: is_etcd_master and etcd_cluster_setup
diff --git a/roles/etcd/tasks/refresh_config.yml b/roles/etcd/tasks/refresh_config.yml
index 0691d1df9bd80bdeac2a2655dc148be1dbe424cc..9276633013015d277bf28c31d3222047917d1e64 100644
--- a/roles/etcd/tasks/refresh_config.yml
+++ b/roles/etcd/tasks/refresh_config.yml
@@ -5,3 +5,10 @@
     dest: /etc/etcd.env
   notify: restart etcd
   when: is_etcd_master
+
+- name: Refresh config | Create etcd-events config file
+  template:
+    src: etcd-events.env.j2
+    dest: /etc/etcd-events.env
+  notify: restart etcd-events
+  when: is_etcd_master and etcd_events_cluster_setup
diff --git a/roles/etcd/tasks/set_cluster_health.yml b/roles/etcd/tasks/set_cluster_health.yml
index 955208633040ecea66768b3366c28960a05bcf3d..d0202943c481041a551983c33867056360978b40 100644
--- a/roles/etcd/tasks/set_cluster_health.yml
+++ b/roles/etcd/tasks/set_cluster_health.yml
@@ -1,5 +1,5 @@
 ---
-- name: Configure | Check if cluster is healthy
+- name: Configure | Check if etcd cluster is healthy
   shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
   register: etcd_cluster_is_healthy
   ignore_errors: true
@@ -9,5 +9,18 @@
   tags:
     - facts
   environment:
-    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
-    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
+
+- name: Configure | Check if etcd-events cluster is healthy
+  shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
+  register: etcd_events_cluster_is_healthy
+  ignore_errors: true
+  changed_when: false
+  check_mode: no
+  when: is_etcd_master and etcd_events_cluster_setup
+  tags:
+    - facts
+  environment:
+    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
+    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
diff --git a/roles/etcd/templates/etcd-events-docker.service.j2 b/roles/etcd/templates/etcd-events-docker.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..271980ab7e4e52ba840e1afc12bd30ad9426dd64
--- /dev/null
+++ b/roles/etcd/templates/etcd-events-docker.service.j2
@@ -0,0 +1,18 @@
+[Unit]
+Description=etcd docker wrapper
+Wants=docker.socket
+After=docker.service
+
+[Service]
+User=root
+PermissionsStartOnly=true
+EnvironmentFile=-/etc/etcd-events.env
+ExecStart={{ bin_dir }}/etcd-events
+ExecStartPre=-{{ docker_bin_dir }}/docker rm -f {{ etcd_member_name }}-events
+ExecStop={{ docker_bin_dir }}/docker stop {{ etcd_member_name }}-events
+Restart=always
+RestartSec=15s
+TimeoutStartSec=30s
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/etcd/templates/etcd-events-host.service.j2 b/roles/etcd/templates/etcd-events-host.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..6e0167a8c750f245a393f46063e88cf1c2caaf21
--- /dev/null
+++ b/roles/etcd/templates/etcd-events-host.service.j2
@@ -0,0 +1,16 @@
+[Unit]
+Description=etcd
+After=network.target
+
+[Service]
+Type=notify
+User=root
+EnvironmentFile=/etc/etcd-events.env
+ExecStart={{ bin_dir }}/etcd
+NotifyAccess=all
+Restart=always
+RestartSec=10s
+LimitNOFILE=40000
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/etcd/templates/etcd-events.env.j2 b/roles/etcd/templates/etcd-events.env.j2
new file mode 100644
index 0000000000000000000000000000000000000000..e7dffbbfed0b6b3c07c19f51a2d3f54ddf90958b
--- /dev/null
+++ b/roles/etcd/templates/etcd-events.env.j2
@@ -0,0 +1,29 @@
+ETCD_DATA_DIR={{ etcd_events_data_dir }}
+ETCD_ADVERTISE_CLIENT_URLS={{ etcd_events_client_url }}
+ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_events_peer_url }}
+ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %}
+
+ETCD_METRICS={{ etcd_metrics }}
+ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2381,https://127.0.0.1:2381
+ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }}
+ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }}
+ETCD_INITIAL_CLUSTER_TOKEN=k8s_events_etcd
+ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2382
+ETCD_NAME={{ etcd_member_name }}-events
+ETCD_PROXY=off
+ETCD_INITIAL_CLUSTER={{ etcd_events_peer_addresses }}
+ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }}
+{% if etcd_snapshot_count is defined %}
+ETCD_SNAPSHOT_COUNT={{ etcd_snapshot_count }}
+{% endif %}
+
+# TLS settings
+ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
+ETCD_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem
+ETCD_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem
+ETCD_CLIENT_CERT_AUTH={{ etcd_secure_client | lower}}
+
+ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
+ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem
+ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem
+ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }}
diff --git a/roles/etcd/templates/etcd-events.j2 b/roles/etcd/templates/etcd-events.j2
new file mode 100644
index 0000000000000000000000000000000000000000..b268479606323bd687c271c1db2f41ce7e00427a
--- /dev/null
+++ b/roles/etcd/templates/etcd-events.j2
@@ -0,0 +1,21 @@
+#!/bin/bash
+{{ docker_bin_dir }}/docker run \
+  --restart=on-failure:5 \
+  --env-file=/etc/etcd-events.env \
+  --net=host \
+  -v /etc/ssl/certs:/etc/ssl/certs:ro \
+  -v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \
+  -v {{ etcd_events_data_dir }}:{{ etcd_events_data_dir }}:rw \
+  {% if etcd_memory_limit is defined %}
+  --memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \
+  {% endif %}
+  {% if etcd_cpu_limit is defined %}
+  --cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \
+  {% endif %}
+  {% if etcd_blkio_weight is defined %}
+  --blkio-weight={{ etcd_blkio_weight }} \
+  {% endif %}
+  --name={{ etcd_member_name }}-events \
+  {{ etcd_image_repo }}:{{ etcd_image_tag }} \
+  /usr/local/bin/etcd \
+  "$@"
diff --git a/roles/etcd/templates/etcd.env.j2 b/roles/etcd/templates/etcd.env.j2
index 6a917d127a3add8da3efcd8a9fb172102d86bccb..178366d006f0ae05afe76baddbfc028e522fa313 100644
--- a/roles/etcd/templates/etcd.env.j2
+++ b/roles/etcd/templates/etcd.env.j2
@@ -13,6 +13,9 @@ ETCD_NAME={{ etcd_member_name }}
 ETCD_PROXY=off
 ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
 ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }}
+{% if etcd_snapshot_count is defined %}
+ETCD_SNAPSHOT_COUNT={{ etcd_snapshot_count }}
+{% endif %}
 
 # TLS settings
 ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
diff --git a/roles/etcd/templates/etcd.j2 b/roles/etcd/templates/etcd.j2
index d916a75709bc96f73ab474c4377a10f5af0eeea2..a6628d8fb970a6bd596307c5991f02a645f8201b 100644
--- a/roles/etcd/templates/etcd.j2
+++ b/roles/etcd/templates/etcd.j2
@@ -6,17 +6,19 @@
   -v /etc/ssl/certs:/etc/ssl/certs:ro \
   -v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \
   -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:rw \
-  {% if etcd_memory_limit is defined %}
+{% if etcd_memory_limit is defined %}
   --memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \
-  {% endif %}
-  --oom-kill-disable \
-  {% if etcd_cpu_limit is defined %}
+{% endif %}
+{% if etcd_cpu_limit is defined %}
   --cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \
-  {% endif %}
-  {% if etcd_blkio_weight is defined %}
+{% endif %}
+{% if etcd_blkio_weight is defined %}
   --blkio-weight={{ etcd_blkio_weight }} \
-  {% endif %}
+{% endif %}
   --name={{ etcd_member_name | default("etcd") }} \
   {{ etcd_image_repo }}:{{ etcd_image_tag }} \
+{% if etcd_ionice is defined %}
+  /bin/ionice {{ etcd_ionice }} \
+{% endif %}
   /usr/local/bin/etcd \
   "$@"
diff --git a/roles/etcd/templates/openssl.conf.j2 b/roles/etcd/templates/openssl.conf.j2
index 48327f0bfa667bd4f325833a6b5e4cc553ba13c3..2f4f7e26275b42c058d42768860bf6be4ffa30ff 100644
--- a/roles/etcd/templates/openssl.conf.j2
+++ b/roles/etcd/templates/openssl.conf.j2
@@ -1,4 +1,4 @@
-[req]
+{% set counter = {'dns': 2,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req]
 req_extensions = v3_req
 distinguished_name = req_distinguished_name
 
@@ -25,19 +25,18 @@ authorityKeyIdentifier=keyid:always,issuer
 [alt_names]
 DNS.1 = localhost
 {% for host in groups['etcd'] %}
-DNS.{{ 1 + loop.index }} = {{ host }}
+DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }}
 {% endfor %}
-{% if loadbalancer_apiserver is defined %}
-{% set idx =  groups['etcd'] | length | int + 2 %}
-DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
+{% if apiserver_loadbalancer_domain_name is defined %}
+DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }}
 {% endif %}
-{% set idx =  groups['etcd'] | length | int + 3 %}
 {% for etcd_alt_name in etcd_cert_alt_names %}
-DNS.{{ idx + 1 + loop.index }} = {{ etcd_alt_name }}
+DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }}
 {% endfor %}
 {% for host in groups['etcd'] %}
-IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
-IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
+{% if hostvars[host]['access_ip'] is defined  %}
+IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
+{% endif %}
+IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }}
 {% endfor %}
-{% set idx =  groups['etcd'] | length | int * 2 + 1 %}
-IP.{{ idx }} = 127.0.0.1
+IP.{{ counter["ip"] }} = 127.0.0.1
diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml
index dc29e33e9c1b1ff29e3410f1af2d500f2be9a113..4dc4be212d7ed3012271a982dd0a9b978b164f19 100644
--- a/roles/kubernetes-apps/ansible/defaults/main.yml
+++ b/roles/kubernetes-apps/ansible/defaults/main.yml
@@ -10,6 +10,9 @@ dns_memory_requests: 70Mi
 kubedns_min_replicas: 2
 kubedns_nodes_per_replica: 10
 
+# CoreDNS
+coredns_replicas: 2
+
 # Images
 kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
 kubedns_image_tag: "{{ kubedns_version }}"
@@ -41,9 +44,7 @@ netchecker_server_memory_requests: 64M
 # Dashboard
 dashboard_enabled: true
 dashboard_image_repo: gcr.io/google_containers/kubernetes-dashboard-amd64
-dashboard_image_tag: v1.8.1
-dashboard_init_image_repo: gcr.io/google_containers/kubernetes-dashboard-init-amd64
-dashboard_init_image_tag: v1.0.1
+dashboard_image_tag: v1.8.3
 
 # Limits for dashboard
 dashboard_cpu_limit: 100m
diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e77f1e7991e677be6c3514c418fcaca4d8b44717
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
@@ -0,0 +1,54 @@
+---
+- name: Kubernetes Apps | Delete old CoreDNS resources
+  kube:
+    name: "coredns"
+    namespace: "kube-system"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item }}"
+    state: absent
+  with_items:
+    - 'deploy'
+    - 'configmap'
+    - 'svc'
+  tags:
+    - upgrade
+
+- name: Kubernetes Apps | Delete kubeadm CoreDNS
+  kube:
+    name: "coredns"
+    namespace: "kube-system"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "deploy"
+    state: absent
+  when:
+    - kubeadm_enabled|default(false)
+    - kubeadm_init.changed|default(false)
+    - inventory_hostname == groups['kube-master'][0]
+
+- name: Kubernetes Apps | Delete old KubeDNS resources
+  kube:
+    name: "kube-dns"
+    namespace: "kube-system"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item }}"
+    state: absent
+  with_items:
+    - 'deploy'
+    - 'svc'
+  tags:
+    - upgrade
+
+- name: Kubernetes Apps | Delete kubeadm KubeDNS
+  kube:
+    name: "kube-dns"
+    namespace: "kube-system"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item }}"
+    state: absent
+  with_items:
+    - 'deploy'
+    - 'svc'
+  when:
+    - kubeadm_enabled|default(false)
+    - kubeadm_init.changed|default(false)
+    - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/ansible/tasks/coredns.yml b/roles/kubernetes-apps/ansible/tasks/coredns.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fcd6c4c6d01d4c5dd1b84b01d72f3b36287d368c
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/tasks/coredns.yml
@@ -0,0 +1,39 @@
+---
+- name: Kubernetes Apps | Lay Down CoreDNS Template
+  template:
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
+  with_items:
+    - { name: coredns, file: coredns-config.yml, type: configmap }
+    - { name: coredns, file: coredns-sa.yml, type: sa }
+    - { name: coredns, file: coredns-deployment.yml, type: deployment }
+    - { name: coredns, file: coredns-svc.yml, type: svc }
+    - { name: coredns, file: coredns-clusterrole.yml, type: clusterrole }
+    - { name: coredns, file: coredns-clusterrolebinding.yml, type: clusterrolebinding }
+  register: coredns_manifests
+  vars:
+    clusterIP: "{{ skydns_server }}"
+  when:
+    - dns_mode in ['coredns', 'coredns_dual']
+    - inventory_hostname == groups['kube-master'][0]
+    - rbac_enabled or item.type not in rbac_resources
+  tags:
+    - coredns
+
+- name: Kubernetes Apps | Lay Down Secondary CoreDNS Template
+  template:
+    src: "{{ item.src }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
+  with_items:
+    - { name: coredns, src: coredns-deployment.yml, file: coredns-deployment-secondary.yml, type: deployment }
+    - { name: coredns, src: coredns-svc.yml, file: coredns-svc-secondary.yml, type: svc }
+  register: coredns_secondary_manifests
+  vars:
+    clusterIP: "{{ skydns_server_secondary }}"
+    coredns_ordinal_suffix: "-secondary"
+  when:
+    - dns_mode == 'coredns_dual'
+    - inventory_hostname == groups['kube-master'][0]
+    - rbac_enabled or item.type not in rbac_resources
+  tags:
+    - coredns
diff --git a/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/roles/kubernetes-apps/ansible/tasks/dashboard.yml
index ce56bd5d10a0cfc57cb9d94b1365abe96bcf3411..4c9ad5c7426f97074322b171d3b3d5866ad2ea71 100644
--- a/roles/kubernetes-apps/ansible/tasks/dashboard.yml
+++ b/roles/kubernetes-apps/ansible/tasks/dashboard.yml
@@ -22,7 +22,7 @@
 - name: Kubernetes Apps | Start dashboard
   kube:
     name: "{{ item.item.name }}"
-    namespace: "{{ system_namespace }}"
+    namespace: "kube-system"
     kubectl: "{{ bin_dir }}/kubectl"
     resource: "{{ item.item.type }}"
     filename: "{{ kube_config_dir }}/{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/ansible/tasks/kubedns.yml b/roles/kubernetes-apps/ansible/tasks/kubedns.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c4c34ecf8f773142d93bab6498b46ca466c8b6b2
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/tasks/kubedns.yml
@@ -0,0 +1,41 @@
+---
+
+- name: Kubernetes Apps | Lay Down KubeDNS Template
+  template:
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item.file }}"
+  with_items:
+    - { name: kube-dns, file: kubedns-sa.yml, type: sa }
+    - { name: kube-dns, file: kubedns-deploy.yml, type: deployment }
+    - { name: kube-dns, file: kubedns-svc.yml, type: svc }
+    - { name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa }
+    - { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole }
+    - { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding }
+    - { name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment }
+  register: kubedns_manifests
+  when:
+    - dns_mode in ['kubedns','dnsmasq_kubedns']
+    - inventory_hostname == groups['kube-master'][0]
+    - rbac_enabled or item.type not in rbac_resources
+  tags:
+    - dnsmasq
+
+# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns
+- name: Kubernetes Apps | Patch system:kube-dns ClusterRole
+  command: >
+    {{ bin_dir }}/kubectl patch clusterrole system:kube-dns
+    --patch='{
+               "rules": [
+                 {
+                   "apiGroups" : [""],
+                   "resources" : ["endpoints", "services"],
+                   "verbs": ["list", "watch", "get"]
+                 }
+               ]
+             }'
+  when:
+    - dns_mode in ['kubedns', 'dnsmasq_kubedns']
+    - inventory_hostname == groups['kube-master'][0]
+    - rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
+  tags:
+    - dnsmasq
diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml
index a25d595ebb6eaa973add06eb3eff6902285dbce3..ceb667f69dab1c72150ba8625bd2367bd155de1c 100644
--- a/roles/kubernetes-apps/ansible/tasks/main.yml
+++ b/roles/kubernetes-apps/ansible/tasks/main.yml
@@ -11,82 +11,49 @@
   delay: 2
   when: inventory_hostname == groups['kube-master'][0]
 
-- name: Kubernetes Apps | Delete old kubedns resources
-  kube:
-    name: "kubedns"
-    namespace: "{{ system_namespace }}"
-    kubectl: "{{ bin_dir }}/kubectl"
-    resource: "{{ item }}"
-    state: absent
-  with_items:
-    - 'deploy'
-    - 'svc'
+- name: Kubernetes Apps | Cleanup DNS
+  import_tasks: tasks/cleanup_dns.yml
+  when:
+    - inventory_hostname == groups['kube-master'][0]
   tags:
     - upgrade
 
-- name: Kubernetes Apps | Delete kubeadm kubedns
-  kube:
-    name: "kubedns"
-    namespace: "{{ system_namespace }}"
-    kubectl: "{{ bin_dir }}/kubectl"
-    resource: "deploy"
-    state: absent
+- name: Kubernetes Apps | CoreDNS
+  import_tasks: "tasks/coredns.yml"
   when:
-    - kubeadm_enabled|default(false)
-    - kubeadm_init.changed|default(false)
+    - dns_mode in ['coredns', 'coredns_dual']
     - inventory_hostname == groups['kube-master'][0]
-
-- name: Kubernetes Apps | Lay Down KubeDNS Template
-  template:
-    src: "{{ item.file }}.j2"
-    dest: "{{ kube_config_dir }}/{{ item.file }}"
-  with_items:
-    - { name: kube-dns, file: kubedns-sa.yml, type: sa }
-    - { name: kube-dns, file: kubedns-deploy.yml, type: deployment }
-    - { name: kube-dns, file: kubedns-svc.yml, type: svc }
-    - { name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa }
-    - { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole }
-    - { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding }
-    - { name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment }
-  register: manifests
-  when:
-    - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
-    - rbac_enabled or item.type not in rbac_resources
   tags:
-    - dnsmasq
+    - coredns
 
-# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns
-- name: Kubernetes Apps | Patch system:kube-dns ClusterRole
-  command: >
-    {{ bin_dir }}/kubectl patch clusterrole system:kube-dns
-    --patch='{
-               "rules": [
-                 {
-                   "apiGroups" : [""],
-                   "resources" : ["endpoints", "services"],
-                   "verbs": ["list", "watch", "get"]
-                 }
-               ]
-             }'
+- name: Kubernetes Apps | KubeDNS
+  import_tasks: "tasks/kubedns.yml"
   when:
-    - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
-    - rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
+    - dns_mode in ['kubedns', 'dnsmasq_kubedns']
+    - inventory_hostname == groups['kube-master'][0]
   tags:
     - dnsmasq
 
 - name: Kubernetes Apps | Start Resources
   kube:
     name: "{{ item.item.name }}"
-    namespace: "{{ system_namespace }}"
+    namespace: "kube-system"
     kubectl: "{{ bin_dir }}/kubectl"
     resource: "{{ item.item.type }}"
     filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
-  with_items: "{{ manifests.results }}"
+  with_items:
+    - "{{ kubedns_manifests.results | default({}) }}"
+    - "{{ coredns_manifests.results | default({}) }}"
+    - "{{ coredns_secondary_manifests.results | default({}) }}"
   when:
     - dns_mode != 'none'
     - inventory_hostname == groups['kube-master'][0]
     - not item|skipped
+  register: resource_result
+  until: resource_result|succeeded
+  retries: 4
+  delay: 5
   tags:
     - dnsmasq
 
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..4136d603e98c6dcfc4e4b6a101bd967cf18878c6
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  labels:
+    kubernetes.io/bootstrapping: rbac-defaults
+    addonmanager.kubernetes.io/mode: Reconcile
+  name: system:coredns
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - endpoints
+  - services
+  - pods
+  - namespaces
+  verbs:
+  - list
+  - watch
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..89becd5b44b1ee2a767d4ebed83cc23ea52c9b7a
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2
@@ -0,0 +1,18 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  annotations:
+    rbac.authorization.kubernetes.io/autoupdate: "true"
+  labels:
+    kubernetes.io/bootstrapping: rbac-defaults
+    addonmanager.kubernetes.io/mode: EnsureExists
+  name: system:coredns
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: system:coredns
+subjects:
+- kind: ServiceAccount
+  name: coredns
+  namespace: kube-system
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..360480c1e731a98d8eb878eceb9e7559f6bc6d5c
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
@@ -0,0 +1,22 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: coredns
+  namespace: kube-system
+  labels:
+    addonmanager.kubernetes.io/mode: EnsureExists
+data:
+  Corefile: |
+    .:53 {
+        errors
+        health
+        kubernetes {{ cluster_name }} in-addr.arpa ip6.arpa {
+          pods insecure
+          upstream /etc/resolv.conf
+          fallthrough in-addr.arpa ip6.arpa
+        }
+        prometheus :9153
+        proxy . /etc/resolv.conf
+        cache 30
+    }
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..5cba6f1f09212d9ed2cee94900f230cd0513d008
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2
@@ -0,0 +1,81 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: coredns{{ coredns_ordinal_suffix | default('') }}
+  namespace: kube-system
+  labels:
+    k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
+    kubernetes.io/cluster-service: "true"
+    addonmanager.kubernetes.io/mode: Reconcile
+    kubernetes.io/name: "CoreDNS"
+spec:
+  replicas: {{ coredns_replicas }}
+  strategy:
+    type: RollingUpdate
+    rollingUpdate:
+      maxUnavailable: 0
+      maxSurge: 10%
+  selector:
+    matchLabels:
+      k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
+  template:
+    metadata:
+      labels:
+        k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+{% if rbac_enabled %}
+      serviceAccountName: coredns
+{% endif %}
+      tolerations:
+        - key: node-role.kubernetes.io/master
+          effect: NoSchedule
+        - key: "CriticalAddonsOnly"
+          operator: "Exists"
+      containers:
+      - name: coredns
+        image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}"
+        imagePullPolicy: {{ k8s_image_pull_policy }}
+        resources:
+          # TODO: Set memory limits when we've profiled the container for large
+          # clusters, then set request = limit to keep this container in
+          # guaranteed class. Currently, this container falls into the
+          # "burstable" category so the kubelet doesn't backoff from restarting it.
+          limits:
+            memory: {{ dns_memory_limit }}
+          requests:
+            cpu: {{ dns_cpu_requests }}
+            memory: {{ dns_memory_requests }}
+        args: [ "-conf", "/etc/coredns/Corefile" ]
+        volumeMounts:
+        - name: config-volume
+          mountPath: /etc/coredns
+        ports:
+        - containerPort: 53
+          name: dns
+          protocol: UDP
+        - containerPort: 53
+          name: dns-tcp
+          protocol: TCP
+        - containerPort: 9153
+          name: metrics
+          protocol: TCP
+        livenessProbe:
+          httpGet:
+            path: /health
+            port: 8080
+            scheme: HTTP
+          initialDelaySeconds: 60
+          timeoutSeconds: 5
+          successThreshold: 1
+          failureThreshold: 5
+      dnsPolicy: Default
+      volumes:
+        - name: config-volume
+          configMap:
+            name: coredns
+            items:
+            - key: Corefile
+              path: Corefile
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..64d9c4dae27d29ae9eeabd9c763861da69cb5d82
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2
@@ -0,0 +1,9 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: coredns
+  namespace: kube-system
+  labels:
+    kubernetes.io/cluster-service: "true"
+    addonmanager.kubernetes.io/mode: Reconcile
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..193de10eb976c58f73285efe9aa20fe6b28740d8
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2
@@ -0,0 +1,22 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: coredns{{ coredns_ordinal_suffix | default('') }}
+  namespace: kube-system
+  labels:
+    k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
+    kubernetes.io/cluster-service: "true"
+    addonmanager.kubernetes.io/mode: Reconcile
+    kubernetes.io/name: "CoreDNS"
+spec:
+  selector:
+    k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
+  clusterIP: {{ clusterIP }}
+  ports:
+  - name: dns
+    port: 53
+    protocol: UDP
+  - name: dns-tcp
+    port: 53
+    protocol: TCP
diff --git a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
index b1ba1481de20b95ebd54575cf1767f207e5a25d4..5f0a40cb3af70e8b68f1c1f147b0b0e3a2db5d8f 100644
--- a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
@@ -25,7 +25,7 @@ metadata:
   labels:
     k8s-app: kubernetes-dashboard
   name: kubernetes-dashboard-certs
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 type: Opaque
 
 ---
@@ -37,7 +37,7 @@ metadata:
   labels:
     k8s-app: kubernetes-dashboard
   name: kubernetes-dashboard
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 
 ---
 # ------------------- Dashboard Role & Role Binding ------------------- #
@@ -46,7 +46,7 @@ kind: Role
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: kubernetes-dashboard-minimal
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 rules:
   # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
 - apiGroups: [""]
@@ -81,7 +81,7 @@ apiVersion: rbac.authorization.k8s.io/v1
 kind: RoleBinding
 metadata:
   name: kubernetes-dashboard-minimal
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: Role
@@ -89,7 +89,7 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: kubernetes-dashboard
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 
 ---
 # ------------------- Gross Hack For anonymous auth through api proxy ------------------- #
@@ -103,7 +103,7 @@ rules:
   resources: ["services/proxy"]
   resourceNames: ["https:kubernetes-dashboard:"]
   verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
-- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/{{ system_namespace }}/services/https:kubernetes-dashboard:/proxy/*"]
+- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/*"]
   verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
 
 ---
@@ -128,7 +128,7 @@ metadata:
   labels:
     k8s-app: kubernetes-dashboard
   name: kubernetes-dashboard
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 spec:
   replicas: 1
   revisionHistoryLimit: 10
@@ -200,7 +200,7 @@ metadata:
   labels:
     k8s-app: kubernetes-dashboard
   name: kubernetes-dashboard
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 spec:
   ports:
     - port: 443
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2
index f80d3d90c17f95a79b448dc41fcd5c891bf463b1..e29ed4dac54bb710f492d0be2046cf5863719004 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2
@@ -17,7 +17,7 @@ kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1beta1
 metadata:
   name: cluster-proportional-autoscaler
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 rules:
   - apiGroups: [""]
     resources: ["nodes"]
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2
index eb76f2d4ecbcbf5b3623cde5c1b0c864c47cbbc7..3b11c6b9fcb1a8912188b7cbe8579d5e7de375d4 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2
@@ -17,11 +17,11 @@ kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1beta1
 metadata:
   name: cluster-proportional-autoscaler
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 subjects:
   - kind: ServiceAccount
     name: cluster-proportional-autoscaler
-    namespace: {{ system_namespace }}
+    namespace: kube-system
 roleRef:
   kind: ClusterRole
   name: cluster-proportional-autoscaler
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2
index 542ae86cec4c8af60cd6eba5efc5fc33a3213912..4c440f653f134e2bde87fcf330ad9ef6c9a60f82 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2
@@ -17,4 +17,4 @@ kind: ServiceAccount
 apiVersion: v1
 metadata:
   name: cluster-proportional-autoscaler
-  namespace: {{ system_namespace }}
+  namespace: kube-system
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
index df92ee6156bc9411445eb3b402854d2f06922b6f..d7c30ecebcaa9a507542a85f1eeab89ccf8a1980 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
@@ -17,7 +17,7 @@ apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:
   name: kubedns-autoscaler
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     k8s-app: kubedns-autoscaler
     kubernetes.io/cluster-service: "true"
@@ -40,7 +40,7 @@ spec:
             memory: "10Mi"
         command:
         - /cluster-proportional-autoscaler
-        - --namespace={{ system_namespace }}
+        - --namespace=kube-system
         - --configmap=kubedns-autoscaler
         # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
         - --target=Deployment/kube-dns
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
index 682bdf49130050e9c74a5dbda7cb8e0bc0a0acce..cfce65f0efe6b8843b691e3074e478efa44dca3f 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
@@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:
   name: kube-dns
-  namespace: "{{system_namespace}}"
+  namespace: kube-system
   labels:
     k8s-app: kube-dns
     kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2
index f399fd6f4b400453aaece8f61807b5cad5b55b28..296a3a938201181734d041e404d637bdd6e738c7 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2
@@ -3,6 +3,6 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: kube-dns
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2
index 1c4710db13b20db82fba293c29a0f92b1fbec7e0..6bc5f9240e13469410a80eb613a2e4ed32f1e3c5 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2
@@ -3,7 +3,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: kube-dns
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     k8s-app: kube-dns
     kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index 5bf6709495e050ba24d526bc1bc5b7c903536358..0511b7be52240987773c24131c5c03c691a7cccd 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -16,11 +16,13 @@
     src: "node-crb.yml.j2"
     dest: "{{ kube_config_dir }}/node-crb.yml"
   register: node_crb_manifest
-  when: rbac_enabled
+  when:
+    - rbac_enabled
+    - inventory_hostname == groups['kube-master'][0]
 
 - name: Apply workaround to allow all nodes with cert O=system:nodes to register
   kube:
-    name: "system:node"
+    name: "kubespray:system:node"
     kubectl: "{{bin_dir}}/kubectl"
     resource: "clusterrolebinding"
     filename: "{{ kube_config_dir }}/node-crb.yml"
@@ -28,32 +30,101 @@
   when:
     - rbac_enabled
     - node_crb_manifest.changed
+    - inventory_hostname == groups['kube-master'][0]
 
-# This is not a cluster role, but should be run after kubeconfig is set on master
-- name: Write kube system namespace manifest
+- name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet
   template:
-    src: namespace.j2
-    dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
-  when: inventory_hostname == groups['kube-master'][0]
-  tags:
-    - apps
-
-- name: Check if kube system namespace exists
-  command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}"
-  register: 'kubesystem'
-  changed_when: False
-  failed_when: False
-  when: inventory_hostname == groups['kube-master'][0]
-  tags:
-    - apps
-
-- name: Create kube system namespace
-  command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
-  retries: 4
-  delay: "{{ retry_stagger | random + 3 }}"
-  register: create_system_ns
-  until: create_system_ns.rc == 0
-  changed_when: False
-  when: inventory_hostname == groups['kube-master'][0] and kubesystem.rc != 0
-  tags:
-    - apps
+    src: "node-webhook-cr.yml.j2"
+    dest: "{{ kube_config_dir }}/node-webhook-cr.yml"
+  register: node_webhook_cr_manifest
+  when:
+    - rbac_enabled
+    - kubelet_authorization_mode_webhook
+    - inventory_hostname == groups['kube-master'][0]
+  tags: node-webhook
+
+- name: Apply webhook ClusterRole
+  kube:
+    name: "system:node-webhook"
+    kubectl: "{{bin_dir}}/kubectl"
+    resource: "clusterrole"
+    filename: "{{ kube_config_dir }}/node-webhook-cr.yml"
+    state: latest
+  when:
+    - rbac_enabled
+    - kubelet_authorization_mode_webhook
+    - node_webhook_cr_manifest.changed
+    - inventory_hostname == groups['kube-master'][0]
+  tags: node-webhook
+
+- name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole
+  template:
+    src: "node-webhook-crb.yml.j2"
+    dest: "{{ kube_config_dir }}/node-webhook-crb.yml"
+  register: node_webhook_crb_manifest
+  when:
+    - rbac_enabled
+    - kubelet_authorization_mode_webhook
+    - inventory_hostname == groups['kube-master'][0]
+  tags: node-webhook
+
+- name: Grant system:nodes the webhook ClusterRole
+  kube:
+    name: "system:node-webhook"
+    kubectl: "{{bin_dir}}/kubectl"
+    resource: "clusterrolebinding"
+    filename: "{{ kube_config_dir }}/node-webhook-crb.yml"
+    state: latest
+  when:
+    - rbac_enabled
+    - kubelet_authorization_mode_webhook
+    - node_webhook_crb_manifest.changed
+    - inventory_hostname == groups['kube-master'][0]
+  tags: node-webhook
+
+- name: Check if vsphere-cloud-provider ClusterRole exists
+  command: "{{ bin_dir }}/kubectl get clusterroles system:vsphere-cloud-provider"
+  register: vsphere_cloud_provider
+  ignore_errors: true
+  when:
+    - rbac_enabled
+    - cloud_provider is defined
+    - cloud_provider == 'vsphere'
+    - kube_version | version_compare('v1.9.0', '>=')
+    - kube_version | version_compare('v1.9.3', '<=')
+    - inventory_hostname == groups['kube-master'][0]
+  tags: vsphere
+
+- name: Write vsphere-cloud-provider ClusterRole manifest
+  template:
+    src: "vsphere-rbac.yml.j2"
+    dest: "{{ kube_config_dir }}/vsphere-rbac.yml"
+  register: vsphere_rbac_manifest
+  when:
+    - rbac_enabled
+    - cloud_provider is defined
+    - cloud_provider == 'vsphere'
+    - vsphere_cloud_provider.rc is defined
+    - vsphere_cloud_provider.rc != 0
+    - kube_version | version_compare('v1.9.0', '>=')
+    - kube_version | version_compare('v1.9.3', '<=')
+    - inventory_hostname == groups['kube-master'][0]
+  tags: vsphere
+
+- name: Apply vsphere-cloud-provider ClusterRole
+  kube:
+    name: "system:vsphere-cloud-provider"
+    kubectl: "{{bin_dir}}/kubectl"
+    resource: "clusterrolebinding"
+    filename: "{{ kube_config_dir }}/vsphere-rbac.yml"
+    state: latest
+  when:
+    - rbac_enabled
+    - cloud_provider is defined
+    - cloud_provider == 'vsphere'
+    - vsphere_cloud_provider.rc is defined
+    - vsphere_cloud_provider.rc != 0
+    - kube_version | version_compare('v1.9.0', '>=')
+    - kube_version | version_compare('v1.9.3', '<=')
+    - inventory_hostname == groups['kube-master'][0]
+  tags: vsphere
diff --git a/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 b/roles/kubernetes-apps/cluster_roles/templates/namespace.j2
index 9bdf201a21a84c558b75a5763480bb6407ca5388..f2e115a6acfc27b9c7b33bfa58161487678c63e1 100644
--- a/roles/kubernetes-apps/cluster_roles/templates/namespace.j2
+++ b/roles/kubernetes-apps/cluster_roles/templates/namespace.j2
@@ -1,4 +1,4 @@
 apiVersion: v1
 kind: Namespace
 metadata:
-  name: "{{system_namespace}}"
+  name: "kube-system"
diff --git a/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2
index 98e82dff7f296156874c31594dd577fc388161e7..9a4a3c46e103e4fde4d61ee99e94f13eb8daff28 100644
--- a/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2
+++ b/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2
@@ -6,7 +6,7 @@ metadata:
     rbac.authorization.kubernetes.io/autoupdate: "true"
   labels:
     kubernetes.io/bootstrapping: rbac-defaults
-  name: system:node
+  name: kubespray:system:node
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: ClusterRole
diff --git a/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..0ac79d3e6ea052502214be705e4d02a078d3179b
--- /dev/null
+++ b/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2
@@ -0,0 +1,20 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  annotations:
+    rbac.authorization.kubernetes.io/autoupdate: "true"
+  labels:
+    kubernetes.io/bootstrapping: rbac-defaults
+  name: system:node-webhook
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - nodes/proxy
+      - nodes/stats
+      - nodes/log
+      - nodes/spec
+      - nodes/metrics
+    verbs:
+      - "*"
\ No newline at end of file
diff --git a/roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..68aed5cb53cb87a99536f1f588505b470ae64452
--- /dev/null
+++ b/roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2
@@ -0,0 +1,17 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  annotations:
+    rbac.authorization.kubernetes.io/autoupdate: "true"
+  labels:
+    kubernetes.io/bootstrapping: rbac-defaults
+  name: system:node-webhook
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: system:node-webhook
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+  kind: Group
+  name: system:nodes
diff --git a/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..99da0462f80ecda37b0e3a0757a1abf123bba1e0
--- /dev/null
+++ b/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2
@@ -0,0 +1,35 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: system:vsphere-cloud-provider
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+  verbs:
+  - create
+  - patch
+  - update
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: system:vsphere-cloud-provider
+roleRef:
+  kind: ClusterRole
+  name: system:vsphere-cloud-provider
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+  name: vsphere-cloud-provider
+  namespace: kube-system
diff --git a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml
index 8abbe231711db90d374cab33288cea594764dfd3..b6055132b00de1c2e840a77a40410202972fedce 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml
+++ b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml
@@ -10,7 +10,7 @@
   when: rbac_enabled
 
 - name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
-  command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}"
+  command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n kube-system"
   with_items:
     - "efk-sa.yml"
     - "efk-clusterrolebinding.yml"
@@ -24,7 +24,7 @@
   register: es_deployment_manifest
 
 - name: "ElasticSearch | Create ES deployment"
-  command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n {{ system_namespace }}"
+  command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n kube-system"
   run_once: true
   when: es_deployment_manifest.changed
 
@@ -35,6 +35,6 @@
   register: es_service_manifest
 
 - name: "ElasticSearch | Create ES service"
-  command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}"
+  command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n kube-system"
   run_once: true
   when: es_service_manifest.changed
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml
index a5aba61aef5500c5fbfc6486fa3171275b75a3e6..dd5b9b630f9d4c6561b6f84452e42516da591134 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml
@@ -3,11 +3,11 @@ kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1beta1
 metadata:
   name: efk
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 subjects:
   - kind: ServiceAccount
     name: efk
-    namespace: {{ system_namespace }}
+    namespace: kube-system
 roleRef:
   kind: ClusterRole
   name: cluster-admin
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml
index e79e26be87f8045a1e17a820c45bcd20a1f74596..75d75f6508c7badb071657bc007cf52cfe90db8f 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml
@@ -3,6 +3,6 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: efk
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
index 6d5382e09ada726fcb317f02b293d0130a176a5e..ee2eb8b21484fc6d6885ab7967c6df0992fbe8e3 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
@@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:
   name: elasticsearch-logging-v1
-  namespace: "{{ system_namespace }}"
+  namespace: kube-system
   labels:
     k8s-app: elasticsearch-logging
     version: "{{ elasticsearch_image_tag }}"
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2
index b7558f9d938cf01d812a9b15cfd5f8f2f329700d..789ecb215a09500001d4541bbc1c373ef8cda42c 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2
@@ -3,7 +3,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: elasticsearch-logging
-  namespace: "{{ system_namespace }}"
+  namespace: "kube-system"
   labels:
     k8s-app: elasticsearch-logging
     kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml
index c91bf68276e8a7af6837a719f743aa935dde8b3a..f444c79b62f7a9db4e28aa1de0d899fa8fbbd358 100644
--- a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml
+++ b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml
@@ -17,6 +17,6 @@
   register: fluentd_ds_manifest
 
 - name: "Fluentd | Create fluentd daemonset"
-  command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}"
+  command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n kube-system"
   run_once: true
   when: fluentd_ds_manifest.changed
diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2
index 8a8ebbceca88cf0be62612e86a900d16ef3828f6..b7de44dc03340534374d2c6a0adcd1ec5c7732de 100644
--- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2
+++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2
@@ -2,7 +2,7 @@ apiVersion: v1
 kind: ConfigMap
 metadata:
   name: fluentd-config
-  namespace: "{{ system_namespace }}"
+  namespace: "kube-system"
 data:
   {{ fluentd_config_file }}: |
     # This configuration file for Fluentd / td-agent is used
diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
index 960a79e89e7a1a3301c8e54ba36262f73af9ff5d..f23a8851c502cadb6e1ec1e1c174af2c41b3603b 100644
--- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
+++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
@@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1
 kind: DaemonSet
 metadata:
   name: "fluentd-es-v{{ fluentd_version }}"
-  namespace: "{{ system_namespace }}"
+  namespace: "kube-system"
   labels:
     k8s-app: fluentd-es
     kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/efk/kibana/defaults/main.yml b/roles/kubernetes-apps/efk/kibana/defaults/main.yml
index baf07cdf23e31ef4df16b845a312ae4eea251c42..0651a032d062c40ddf5a7c65247bdd721b96465a 100644
--- a/roles/kubernetes-apps/efk/kibana/defaults/main.yml
+++ b/roles/kubernetes-apps/efk/kibana/defaults/main.yml
@@ -4,4 +4,3 @@ kibana_mem_limit: 0M
 kibana_cpu_requests: 100m
 kibana_mem_requests: 0M
 kibana_service_port: 5601
-kibana_base_url: "/api/v1/proxy/namespaces/kube-system/services/kibana-logging"
diff --git a/roles/kubernetes-apps/efk/kibana/tasks/main.yml b/roles/kubernetes-apps/efk/kibana/tasks/main.yml
index ea85682864be14591cca46f1a21dc4f7a8bbf85d..424b313b80c96830915b922100276f66a3b4c243 100644
--- a/roles/kubernetes-apps/efk/kibana/tasks/main.yml
+++ b/roles/kubernetes-apps/efk/kibana/tasks/main.yml
@@ -10,7 +10,7 @@
     filename: "{{kube_config_dir}}/kibana-deployment.yaml"
     kubectl: "{{bin_dir}}/kubectl"
     name: "kibana-logging"
-    namespace: "{{system_namespace}}"
+    namespace: "kube-system"
     resource: "deployment"
     state: "latest"
   with_items: "{{ kibana_deployment_manifest.changed }}"
@@ -27,7 +27,7 @@
     filename: "{{kube_config_dir}}/kibana-service.yaml"
     kubectl: "{{bin_dir}}/kubectl"
     name: "kibana-logging"
-    namespace: "{{system_namespace}}"
+    namespace: "kube-system"
     resource: "svc"
     state: "latest"
   with_items: "{{ kibana_service_manifest.changed }}"
diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
index c48413bd0e8769e5b1f27a861fb08c3e8cbe9e2a..4fdf54c042112941fdee778f9cb3ebe42bcc9674 100644
--- a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
+++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
@@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:
   name: kibana-logging
-  namespace: "{{ system_namespace  }}"
+  namespace: "kube-system"
   labels:
     k8s-app: kibana-logging
     kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2
index 241b896f05155a661fcbfabd9220463eabf26e39..5cff3c628094e0cad46e5251a3ecb74d16536136 100644
--- a/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2
+++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2
@@ -3,7 +3,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: kibana-logging
-  namespace: "{{ system_namespace }}"
+  namespace: "kube-system"
   labels:
     k8s-app: kibana-logging
     kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5b338a793d50839543b227f1180afb0e3dbde005
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md
@@ -0,0 +1,78 @@
+CephFS Volume Provisioner for Kubernetes 1.5+
+=============================================
+
+[![Docker Repository on Quay](https://quay.io/repository/external_storage/cephfs-provisioner/status "Docker Repository on Quay")](https://quay.io/repository/external_storage/cephfs-provisioner)
+
+Using Ceph volume client
+
+Development
+-----------
+
+Compile the provisioner
+
+``` console
+make
+```
+
+Make the container image and push to the registry
+
+``` console
+make push
+```
+
+Test instruction
+----------------
+
+-   Start Kubernetes local cluster
+
+See <a href="https://kubernetes.io/" class="uri" class="uri">https://kubernetes.io/</a>.
+
+-   Create a Ceph admin secret
+
+``` bash
+ceph auth get client.admin 2>&1 |grep "key = " |awk '{print  $3'} |xargs echo -n > /tmp/secret
+kubectl create ns cephfs
+kubectl create secret generic ceph-secret-admin --from-file=/tmp/secret --namespace=cephfs
+```
+
+-   Start CephFS provisioner
+
+The following example uses `cephfs-provisioner-1` as the identity for the instance and assumes kubeconfig is at `/root/.kube`. The identity should remain the same if the provisioner restarts. If there are multiple provisioners, each should have a different identity.
+
+``` bash
+docker run -ti -v /root/.kube:/kube -v /var/run/kubernetes:/var/run/kubernetes --privileged --net=host  cephfs-provisioner /usr/local/bin/cephfs-provisioner -master=http://127.0.0.1:8080 -kubeconfig=/kube/config -id=cephfs-provisioner-1
+```
+
+Alternatively, deploy it in kubernetes, see [deployment](deploy/README.md).
+
+-   Create a CephFS Storage Class
+
+Replace Ceph monitor's IP in <a href="example/class.yaml" class="uri" class="uri">example/class.yaml</a> with your own and create storage class:
+
+``` bash
+kubectl create -f example/class.yaml
+```
+
+-   Create a claim
+
+``` bash
+kubectl create -f example/claim.yaml
+```
+
+-   Create a Pod using the claim
+
+``` bash
+kubectl create -f example/test-pod.yaml
+```
+
+Known limitations
+-----------------
+
+-   Kernel CephFS doesn't work with SELinux, setting SELinux label in Pod's securityContext will not work.
+-   Kernel CephFS doesn't support quota or capacity, capacity requested by PVC is not enforced or validated.
+-   Currently each Ceph user created by the provisioner has `allow r` MDS cap to permit CephFS mount.
+
+Acknowledgement
+---------------
+
+Inspired by CephFS Manila provisioner and conversation with John Spray
diff --git a/roles/kubernetes-apps/cephfs_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml
similarity index 52%
rename from roles/kubernetes-apps/cephfs_provisioner/defaults/main.yml
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml
index 9a3bca1ef8116808a85c93fc3ecef84a6be053fb..aa1bbcf835d210a4163755d2120605a26a19d0a3 100644
--- a/roles/kubernetes-apps/cephfs_provisioner/defaults/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml
@@ -1,8 +1,5 @@
 ---
-cephfs_provisioner_image_repo: quay.io/kubespray/cephfs-provisioner
-cephfs_provisioner_image_tag: 92295a30
-
-cephfs_provisioner_namespace: "{{ system_namespace }}"
+cephfs_provisioner_namespace: "kube-system"
 cephfs_provisioner_cluster: ceph
 cephfs_provisioner_monitors: []
 cephfs_provisioner_admin_id: admin
diff --git a/roles/kubernetes-apps/cephfs_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
similarity index 86%
rename from roles/kubernetes-apps/cephfs_provisioner/tasks/main.yml
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
index 6e854f05ea7c9ab3cfb3fce070af45777dba7c95..c1fdc624c3dfc632eb6f4802e951ba082d2cf522 100644
--- a/roles/kubernetes-apps/cephfs_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
@@ -3,22 +3,23 @@
 - name: CephFS Provisioner | Create addon dir
   file:
     path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
+    state: directory
     owner: root
     group: root
     mode: 0755
-    recurse: true
 
 - name: CephFS Provisioner | Create manifests
   template:
     src: "{{ item.file }}.j2"
     dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
   with_items:
+    - { name: cephfs-provisioner-ns, file: cephfs-provisioner-ns.yml, type: ns }
     - { name: cephfs-provisioner-sa, file: cephfs-provisioner-sa.yml, type: sa }
     - { name: cephfs-provisioner-role, file: cephfs-provisioner-role.yml, type: role }
     - { name: cephfs-provisioner-rolebinding, file: cephfs-provisioner-rolebinding.yml, type: rolebinding }
     - { name: cephfs-provisioner-clusterrole, file: cephfs-provisioner-clusterrole.yml, type: clusterrole }
     - { name: cephfs-provisioner-clusterrolebinding, file: cephfs-provisioner-clusterrolebinding.yml, type: clusterrolebinding }
-    - { name: cephfs-provisioner-deploy, file: cephfs-provisioner-deploy.yml, type: deploy }
+    - { name: cephfs-provisioner-rs, file: cephfs-provisioner-rs.yml, type: rs }
     - { name: cephfs-provisioner-secret, file: cephfs-provisioner-secret.yml, type: secret }
     - { name: cephfs-provisioner-sc, file: cephfs-provisioner-sc.yml, type: sc }
   register: cephfs_manifests
@@ -27,7 +28,7 @@
 - name: CephFS Provisioner | Apply manifests
   kube:
     name: "{{ item.item.name }}"
-    namespace: "{{ system_namespace }}"
+    namespace: "{{ cephfs_provisioner_namespace }}"
     kubectl: "{{ bin_dir }}/kubectl"
     resource: "{{ item.item.type }}"
     filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2
similarity index 92%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2
index 272db0f704c3e4a96858a55e5d4a103894933f42..e714c3cb22ad2340e50ce812cd44041a7fec9cad 100644
--- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2
@@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
 kind: ClusterRole
 metadata:
   name: cephfs-provisioner
-  namespace: {{ system_namespace }}
+  namespace: {{ cephfs_provisioner_namespace }}
 rules:
   - apiGroups: [""]
     resources: ["persistentvolumes"]
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-ns.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-ns.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..2a2a67cf6e7f5e54d859dceba787fb115579b4ee
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-ns.yml.j2
@@ -0,0 +1,7 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: {{ cephfs_provisioner_namespace }}
+  labels:
+    name: {{ cephfs_provisioner_namespace }}
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2
similarity index 85%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2
index f84ed32baef76a9ba345da95036a9750e940f9c0..01ab87b7d0163166c36ea1cb859bbb77ac01be9d 100644
--- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2
@@ -7,6 +7,7 @@ metadata:
 subjects:
   - kind: ServiceAccount
     name: cephfs-provisioner
+    namespace: {{ cephfs_provisioner_namespace }}
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: Role
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-deploy.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rs.yml.j2
similarity index 52%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-deploy.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rs.yml.j2
index bfe2117548fcb8282d975f4b85752fe6c7874e79..976f29c05239dd9c2607da95c1bc572fa4e8a87e 100644
--- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-deploy.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rs.yml.j2
@@ -1,21 +1,28 @@
 ---
-apiVersion: extensions/v1beta1
-kind: Deployment
+apiVersion: apps/v1
+kind: ReplicaSet
 metadata:
-  name: cephfs-provisioner
+  name: cephfs-provisioner-v{{ cephfs_provisioner_image_tag }}
   namespace: {{ cephfs_provisioner_namespace }}
+  labels:
+    k8s-app: cephfs-provisioner
+    version: v{{ cephfs_provisioner_image_tag }}
 spec:
   replicas: 1
-  strategy:
-    type: Recreate
+  selector:
+    matchLabels:
+      k8s-app: cephfs-provisioner
+      version: v{{ cephfs_provisioner_image_tag }}
   template:
     metadata:
       labels:
-        app: cephfs-provisioner
+        k8s-app: cephfs-provisioner
+        version: v{{ cephfs_provisioner_image_tag }}
     spec:
       containers:
         - name: cephfs-provisioner
           image: {{ cephfs_provisioner_image_repo }}:{{ cephfs_provisioner_image_tag }}
+          imagePullPolicy: {{ k8s_image_pull_policy }}
           env:
             - name: PROVISIONER_NAME
               value: ceph.com/cephfs
@@ -23,4 +30,6 @@ spec:
             - "/usr/local/bin/cephfs-provisioner"
           args:
             - "-id=cephfs-provisioner-1"
+{% if rbac_enabled %}
       serviceAccount: cephfs-provisioner
+{% endif %}
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2
diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2
similarity index 100%
rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2
rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2
diff --git a/docs/local-storage-provisioner.md b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md
similarity index 64%
rename from docs/local-storage-provisioner.md
rename to roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md
index 9895cc473d23bd2c557fb5dbe26d705107ee463b..458a483cb7dda80619e066161dc547e3e59885af 100644
--- a/docs/local-storage-provisioner.md
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md
@@ -1,58 +1,62 @@
-# Local Storage Provisioner
+Local Storage Provisioner
+=========================
 
 The local storage provisioner is NOT a dynamic storage provisioner as you would
 expect from a cloud provider. Instead, it simply creates PersistentVolumes for
-all manually created volumes located in the directory `local_volume_base_dir`.
+all manually created volumes located in the directory `local_volume_provisioner_base_dir`.
 The default path is /mnt/disks and the rest of this doc will use that path as
 an example.
 
-## Examples to create local storage volumes
+Examples to create local storage volumes
+----------------------------------------
 
 ### tmpfs method:
 
-  ```
-  for vol in vol1 vol2 vol3; do
-    mkdir /mnt/disks/$vol
-    mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
-  done
-  ```
+``` bash
+for vol in vol1 vol2 vol3; do
+mkdir /mnt/disks/$vol
+mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
+done
+```
 
 The tmpfs method is not recommended for production because the mount is not
 persistent and data will be deleted on reboot.
 
 ### Mount physical disks
 
-  ```
-  mkdir /mnt/disks/ssd1
-  mount /dev/vdb1 /mnt/disks/ssd1
-  ```
+``` bash
+mkdir /mnt/disks/ssd1
+mount /dev/vdb1 /mnt/disks/ssd1
+```
 
 Physical disks are recommended for production environments because it offers
 complete isolation in terms of I/O and capacity.
 
 ### File-backed sparsefile method
 
-  ```
-  truncate /mnt/disks/disk5 --size 2G
-  mkfs.ext4 /mnt/disks/disk5
-  mkdir /mnt/disks/vol5
-  mount /mnt/disks/disk5 /mnt/disks/vol5
-  ```
+``` bash
+truncate /mnt/disks/disk5 --size 2G
+mkfs.ext4 /mnt/disks/disk5
+mkdir /mnt/disks/vol5
+mount /mnt/disks/disk5 /mnt/disks/vol5
+```
 
 If you have a development environment and only one disk, this is the best way
 to limit the quota of persistent volumes.
 
 ### Simple directories
-  ```
-  for vol in vol6 vol7 vol8; do
-    mkdir /mnt/disks/$vol
-  done
-  ```
+
+``` bash
+for vol in vol6 vol7 vol8; do
+mkdir /mnt/disks/$vol
+done
+```
 
 This is also acceptable in a development environment, but there is no capacity
 management.
 
-## Usage notes
+Usage notes
+-----------
 
 The volume provisioner cannot calculate volume sizes correctly, so you should
 delete the daemonset pod on the relevant host after creating volumes. The pod
@@ -62,6 +66,7 @@ Make sure to make any mounts persist via /etc/fstab or with systemd mounts (for
 CoreOS/Container Linux). Pods with persistent volume claims will not be
 able to start if the mounts become unavailable.
 
-## Further reading
+Further reading
+---------------
 
-Refer to the upstream docs here: https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume
+Refer to the upstream docs here: <https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume>
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4b18546d32d5ab624d0bcf2afefc32735dd9070e
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+local_volume_provisioner_namespace: "kube-system"
+local_volume_provisioner_base_dir: /mnt/disks
+local_volume_provisioner_mount_dir: /mnt/disks
+local_volume_provisioner_storage_class: local-storage
diff --git a/roles/kubernetes-apps/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
similarity index 52%
rename from roles/kubernetes-apps/local_volume_provisioner/tasks/main.yml
rename to roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
index 9766ea27c7bad524a5dbf0a18f8b23098a1ab3ac..b83e45a2023273f014333f45d7591ea2f714573f 100644
--- a/roles/kubernetes-apps/local_volume_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
@@ -1,8 +1,9 @@
 ---
+
 - name: Local Volume Provisioner | Ensure base dir is created on all hosts
   file:
-    path: "{{ local_volume_base_dir }}"
-    ensure: directory
+    path: "{{ local_volume_provisioner_base_dir }}"
+    state: directory
     owner: root
     group: root
     mode: 0700
@@ -13,31 +14,32 @@
 - name: Local Volume Provisioner | Create addon dir
   file:
     path: "{{ kube_config_dir }}/addons/local_volume_provisioner"
+    state: directory
     owner: root
     group: root
     mode: 0755
-    recurse: true
 
 - name: Local Volume Provisioner | Create manifests
   template:
     src: "{{ item.file }}.j2"
     dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}"
   with_items:
-    - { name: local-volume-serviceaccount, file: serviceaccount.yml, type, serviceaccount }
-    - { name: local-volume-clusterrolebinding, file: clusterrolebinding.yml, type, clusterrolebinding }
-    - { name: local-volume-configmap, file: configmap.yml, type, configmap }
-    - { name: local-volume-daemonset, file: daemonset.yml, type, daemonset }
-  register: local_volume_manifests
+    - { name: local-volume-provisioner-ns, file: local-volume-provisioner-ns.yml, type: ns }
+    - { name: local-volume-provisioner-sa, file: local-volume-provisioner-sa.yml, type: sa }
+    - { name: local-volume-provisioner-clusterrolebinding, file: local-volume-provisioner-clusterrolebinding.yml, type, clusterrolebinding }
+    - { name: local-volume-provisioner-cm, file: local-volume-provisioner-cm.yml, type, cm }
+    - { name: local-volume-provisioner-ds, file: local-volume-provisioner-ds.yml, type, ds }
+    - { name: local-volume-provisioner-sc, file: local-volume-provisioner-sc.yml, type, sc }
+  register: local_volume_provisioner_manifests
   when: inventory_hostname == groups['kube-master'][0]
 
-
 - name: Local Volume Provisioner | Apply manifests
   kube:
     name: "{{ item.item.name }}"
-    namespace: "{{ system_namespace }}"
+    namespace: "{{ local_volume_provisioner_namespace }}"
     kubectl: "{{ bin_dir }}/kubectl"
     resource: "{{ item.item.type }}"
     filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}"
     state: "latest"
-  with_items: "{{ local_volume_manifests.results }}"
+  with_items: "{{ local_volume_provisioner_manifests.results }}"
   when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/local_volume_provisioner/templates/clusterrolebinding.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2
similarity index 51%
rename from roles/kubernetes-apps/local_volume_provisioner/templates/clusterrolebinding.yml.j2
rename to roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2
index 5097d260781b77c994d49dbcac8b19b687a6621b..ab98f1f55f6b5926e3368f3e3f593f3238a47b5b 100644
--- a/roles/kubernetes-apps/local_volume_provisioner/templates/clusterrolebinding.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2
@@ -1,27 +1,28 @@
 ---
-kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
 metadata:
-  name: local-storage-provisioner-pv-binding
-  namespace: {{ system_namespace }}
+  name: local-volume-provisioner-system-persistent-volume-provisioner
+  namespace: {{ local_volume_provisioner_namespace }}
 subjects:
   - kind: ServiceAccount
-    name: local-storage-admin
-    namespace: {{ system_namespace }}
+    name: local-volume-provisioner
+    namespace: {{ local_volume_provisioner_namespace }}
 roleRef:
   kind: ClusterRole
   name: system:persistent-volume-provisioner
   apiGroup: rbac.authorization.k8s.io
+
 ---
-kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
 metadata:
-  name: local-storage-provisioner-node-binding
-  namespace: {{ system_namespace }}
+  name: local-volume-provisioner-system-node
+  namespace: {{ local_volume_provisioner_namespace }}
 subjects:
   - kind: ServiceAccount
-    name: local-storage-admin
-    namespace: {{ system_namespace }}
+    name: local-volume-provisioner
+    namespace: {{ local_volume_provisioner_namespace }}
 roleRef:
   kind: ClusterRole
   name: system:node
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..8ad76ab2d6a4a1ec7d26408be7822e20e82ee2fd
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2
@@ -0,0 +1,11 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: local-volume-provisioner
+  namespace: {{ local_volume_provisioner_namespace }}
+data:
+  storageClassMap: |
+    {{ local_volume_provisioner_storage_class }}:
+      hostDir: {{ local_volume_provisioner_base_dir }}
+      mountDir: {{ local_volume_provisioner_mount_dir }}
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..80a74f5f10f9bcb8d09fb5398004a0256acdf66f
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
@@ -0,0 +1,45 @@
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: local-volume-provisioner
+  namespace: {{ local_volume_provisioner_namespace }}
+  labels:
+    k8s-app: local-volume-provisioner
+    version: {{ local_volume_provisioner_image_tag }}
+spec:
+  selector:
+    matchLabels:
+      k8s-app: local-volume-provisioner
+      version: {{ local_volume_provisioner_image_tag }}
+  template:
+    metadata:
+      labels:
+        k8s-app: local-volume-provisioner
+        version: {{ local_volume_provisioner_image_tag }}
+    spec:
+      serviceAccountName: local-volume-provisioner
+      containers:
+        - name: provisioner
+          image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }}
+          imagePullPolicy: {{ k8s_image_pull_policy }}
+          securityContext:
+            privileged: true
+          env:
+          - name: MY_NODE_NAME
+            valueFrom:
+              fieldRef:
+                fieldPath: spec.nodeName
+          volumeMounts:
+            - name: local-volume-provisioner
+              mountPath: /etc/provisioner/config
+              readOnly: true
+            - name: local-volume-provisioner-hostpath-mnt-disks
+              mountPath: {{ local_volume_provisioner_mount_dir }}
+      volumes:
+        - name: local-volume-provisioner
+          configMap:
+            name: local-volume-provisioner
+        - name: local-volume-provisioner-hostpath-mnt-disks
+          hostPath:
+            path: {{ local_volume_provisioner_base_dir }}
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..04a791010e2d6c4a37873a311cbca963bd44c615
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2
@@ -0,0 +1,7 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: {{ local_volume_provisioner_namespace }}
+  labels:
+    name: {{ local_volume_provisioner_namespace }}
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c78a16b605d3b3bbf3e14f78a2bb7247edd129e1
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2
@@ -0,0 +1,6 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: local-volume-provisioner
+  namespace: {{ local_volume_provisioner_namespace }}
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..bf1f002624ca6f04515f9debe07b8cccd6e5085c
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2
@@ -0,0 +1,7 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: {{ local_volume_provisioner_storage_class }}
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: WaitForFirstConsumer
diff --git a/roles/kubernetes-apps/external_provisioner/meta/main.yml b/roles/kubernetes-apps/external_provisioner/meta/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b520922d6a705c38ecc853ebc09463271a927f0b
--- /dev/null
+++ b/roles/kubernetes-apps/external_provisioner/meta/main.yml
@@ -0,0 +1,15 @@
+---
+dependencies:
+  - role: kubernetes-apps/external_provisioner/local_volume_provisioner
+    when: local_volume_provisioner_enabled
+    tags:
+      - apps
+      - local-volume-provisioner
+      - external-provisioner
+
+  - role: kubernetes-apps/external_provisioner/cephfs_provisioner
+    when: cephfs_provisioner_enabled
+    tags:
+      - apps
+      - cephfs-provisioner
+      - external-provisioner
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index 06e97aff24210f82078ab5cca7982929e70a39a9..e7b3879446fd71a22c474adc660ff1e64ea39830 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -18,7 +18,7 @@
 - name: Helm | Apply Helm Manifests (RBAC)
   kube:
     name: "{{item.item.name}}"
-    namespace: "{{ system_namespace }}"
+    namespace: "kube-system"
     kubectl: "{{bin_dir}}/kubectl"
     resource: "{{item.item.type}}"
     filename: "{{kube_config_dir}}/{{item.item.file}}"
@@ -28,7 +28,7 @@
 
 - name: Helm | Install/upgrade helm
   command: >
-    {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ system_namespace }}
+    {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace=kube-system
     {% if helm_skip_refresh %} --skip-refresh{% endif %}
     {% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %}
     {% if rbac_enabled %} --service-account=tiller{% endif %}
diff --git a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml
index 0c8db4c78fe4697caf6341b3667bef8295cb92f3..00694181e25a01d0d479f551f6e207105e4494d2 100644
--- a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml
+++ b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml
@@ -3,11 +3,11 @@ kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1beta1
 metadata:
   name: tiller
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 subjects:
   - kind: ServiceAccount
     name: tiller
-    namespace: {{ system_namespace }}
+    namespace: kube-system
 roleRef:
   kind: ClusterRole
   name: cluster-admin
diff --git a/roles/kubernetes-apps/helm/templates/tiller-sa.yml b/roles/kubernetes-apps/helm/templates/tiller-sa.yml
index 26e575fb6a338703903eb09aaa772f55554df4c9..606dbb1471598d0c1333238564c183900793cdd4 100644
--- a/roles/kubernetes-apps/helm/templates/tiller-sa.yml
+++ b/roles/kubernetes-apps/helm/templates/tiller-sa.yml
@@ -3,6 +3,6 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: tiller
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/README.md b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b0f008676a1c63fcc0a3da7dab4937b9e202d4fa
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md
@@ -0,0 +1,17 @@
+Deployment files
+================
+
+This directory contains example deployment manifests for cert-manager that can
+be used in place of the official Helm chart.
+
+This is useful if you are deploying cert-manager into an environment without
+Helm, or want to inspect a 'bare minimum' deployment.
+
+Where do these come from?
+-------------------------
+
+The manifests in these subdirectories are generated from the Helm chart
+automatically. The `values.yaml` files used to configure cert-manager can be
+found in [`hack/deploy`](../../hack/deploy/).
+
+They are automatically generated by running `./hack/update-deploy-gen.sh`.
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..bc6bceb151eac7fa7ec872129decceec3e6a5cfc
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+cert_manager_namespace: "cert-manager"
+cert_manager_cpu_requests: 10m
+cert_manager_cpu_limits: 30m
+cert_manager_memory_requests: 32Mi
+cert_manager_memory_limits: 200Mi
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..eeb29da2d441a5af4a63520e4e54eb093936ce3e
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
@@ -0,0 +1,38 @@
+---
+
+- name: Cert Manager | Create addon dir
+  file:
+    path: "{{ kube_config_dir }}/addons/cert_manager"
+    state: directory
+    owner: root
+    group: root
+    mode: 0755
+
+- name: Cert Manager | Create manifests
+  template:
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}"
+  with_items:
+    - { name: cert-manager-ns, file: cert-manager-ns.yml, type: ns }
+    - { name: cert-manager-sa, file: cert-manager-sa.yml, type: sa }
+    - { name: cert-manager-clusterrole, file: cert-manager-clusterrole.yml, type: clusterrole }
+    - { name: cert-manager-clusterrolebinding, file: cert-manager-clusterrolebinding.yml, type: clusterrolebinding }
+    - { name: cert-manager-issuer-crd, file: cert-manager-issuer-crd.yml, type: crd }
+    - { name: cert-manager-clusterissuer-crd, file: cert-manager-clusterissuer-crd.yml, type: crd }
+    - { name: cert-manager-certificate-crd, file: cert-manager-certificate-crd.yml, type: crd }
+    - { name: cert-manager-deploy, file: cert-manager-deploy.yml, type: deploy }
+  register: cert_manager_manifests
+  when:
+    - inventory_hostname == groups['kube-master'][0]
+
+- name: Cert Manager | Apply manifests
+  kube:
+    name: "{{ item.item.name }}"
+    namespace: "{{ cert_manager_namespace }}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/addons/cert_manager/{{ item.item.file }}"
+    state: "latest"
+  with_items: "{{ cert_manager_manifests.results }}"
+  when:
+    - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..48d0c5b49ea947e6673da6261dcaad4be950a1b7
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2
@@ -0,0 +1,21 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: certificates.certmanager.k8s.io
+  labels:
+    app: cert-manager
+    chart: cert-manager-0.2.5
+    release: cert-manager
+    heritage: Tiller
+spec:
+  group: certmanager.k8s.io
+  version: v1alpha1
+  scope: Namespaced
+  names:
+    kind: Certificate
+    plural: certificates
+    shortNames:
+      - cert
+      - certs
+      
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..86601e098d1f3f68f4d0559cf8dc5867738926c0
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2
@@ -0,0 +1,17 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: clusterissuers.certmanager.k8s.io
+  labels:
+    app: cert-manager
+    chart: cert-manager-0.2.5
+    release: cert-manager
+    heritage: Tiller
+spec:
+  group: certmanager.k8s.io
+  version: v1alpha1
+  names:
+    kind: ClusterIssuer
+    plural: clusterissuers
+  scope: Cluster
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..9d36de5cb123f678c2956358ccabd138a2aacc25
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2
@@ -0,0 +1,25 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: cert-manager
+  labels:
+    app: cert-manager
+    chart: cert-manager-0.2.5
+    release: cert-manager
+    heritage: Tiller
+rules:
+  - apiGroups: ["certmanager.k8s.io"]
+    resources: ["certificates", "issuers", "clusterissuers"]
+    verbs: ["*"]
+  - apiGroups: [""]
+    # TODO: remove endpoints once 0.4 is released. We include it here in case
+    # users use the 'master' version of the Helm chart with a 0.2.x release of
+    # cert-manager that still performs leader election with Endpoint resources.
+    # We advise users don't do this, but some will anyway and this will reduce
+    # friction.
+    resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"]
+    verbs: ["*"]
+  - apiGroups: ["extensions"]
+    resources: ["ingresses"]
+    verbs: ["*"]
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..d0e481c6cc8aa4e2b386f11d20bbd2a8a76d0ea5
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2
@@ -0,0 +1,18 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: cert-manager
+  labels:
+    app: cert-manager
+    chart: cert-manager-0.2.5
+    release: cert-manager
+    heritage: Tiller
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: cert-manager
+subjects:
+  - name: cert-manager
+    namespace: {{ cert_manager_namespace }}
+    kind: ServiceAccount
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..ef66bef0523d395e81ef3811b8913f959a3f9ce3
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2
@@ -0,0 +1,51 @@
+---
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+  name: cert-manager
+  namespace: {{ cert_manager_namespace }}
+  labels:
+    app: cert-manager
+    chart: cert-manager-0.2.5
+    release: cert-manager
+    heritage: Tiller
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        k8s-app: cert-manager
+        release: cert-manager
+      annotations:
+    spec:
+      serviceAccountName: cert-manager
+      containers:
+        - name: cert-manager
+          image: {{ cert_manager_controller_image_repo }}:{{ cert_manager_controller_image_tag }}
+          imagePullPolicy: {{ k8s_image_pull_policy }}
+          args:
+            - --cluster-resource-namespace=$(POD_NAMESPACE)
+          env:
+            - name: POD_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+          resources:
+            requests:
+              cpu: {{ cert_manager_cpu_requests }}
+              memory: {{ cert_manager_memory_requests }}
+            limits:
+              cpu: {{ cert_manager_cpu_limits }}
+              memory: {{ cert_manager_memory_limits }}
+            
+        - name: ingress-shim
+          image: {{ cert_manager_ingress_shim_image_repo }}:{{ cert_manager_ingress_shim_image_tag }}
+          imagePullPolicy: {{ k8s_image_pull_policy }}
+          resources:
+            requests:
+              cpu: {{ cert_manager_cpu_requests }}
+              memory: {{ cert_manager_memory_requests }}
+            limits:
+              cpu: {{ cert_manager_cpu_limits }}
+              memory: {{ cert_manager_memory_limits }}
+            
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..7e344d9f9a666f043f6a6cb3f3e3a89f5670446c
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2
@@ -0,0 +1,17 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: issuers.certmanager.k8s.io
+  labels:
+    app: cert-manager
+    chart: cert-manager-0.2.5
+    release: cert-manager
+    heritage: Tiller
+spec:
+  group: certmanager.k8s.io
+  version: v1alpha1
+  names:
+    kind: Issuer
+    plural: issuers
+  scope: Namespaced
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..7cf3a282dc113c6b615406b050116b91df9f1db5
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2
@@ -0,0 +1,7 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: {{ cert_manager_namespace }}
+  labels:
+    name: {{ cert_manager_namespace }}
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..ccdd5f430c118fa7b37d3f923aa324bb77a92781
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2
@@ -0,0 +1,11 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: cert-manager
+  namespace: {{ cert_manager_namespace }}
+  labels:
+    app: cert-manager
+    chart: cert-manager-0.2.5
+    release: cert-manager
+    heritage: Tiller
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/README.md b/roles/kubernetes-apps/ingress_controller/ingress_nginx/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..0fb40f31e3d14beba71f30389ea65cafdb40875c
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/README.md
@@ -0,0 +1,283 @@
+Installation Guide
+==================
+
+Contents
+--------
+
+-   [Mandatory commands](#mandatory-commands)
+-   [Install without RBAC roles](#install-without-rbac-roles)
+-   [Install with RBAC roles](#install-with-rbac-roles)
+-   [Custom Provider](#custom-provider)
+-   [minikube](#minikube)
+-   [AWS](#aws)
+-   [GCE - GKE](#gce---gke)
+-   [Azure](#azure)
+-   [Baremetal](#baremetal)
+-   [Using Helm](#using-helm)
+-   [Verify installation](#verify-installation)
+-   [Detect installed version](#detect-installed-version)
+-   [Deploying the config-map](#deploying-the-config-map)
+
+Generic Deployment
+------------------
+
+The following resources are required for a generic deployment.
+
+### Mandatory commands
+
+``` console
+curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/namespace.yaml \
+    | kubectl apply -f -
+
+curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/default-backend.yaml \
+    | kubectl apply -f -
+
+curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/configmap.yaml \
+    | kubectl apply -f -
+
+curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/tcp-services-configmap.yaml \
+    | kubectl apply -f -
+
+curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/udp-services-configmap.yaml \
+    | kubectl apply -f -
+```
+
+### Install without RBAC roles
+
+``` console
+curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/without-rbac.yaml \
+    | kubectl apply -f -
+```
+
+### Install with RBAC roles
+
+Please check the [RBAC](rbac.md) document.
+
+``` console
+curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/rbac.yaml \
+    | kubectl apply -f -
+
+curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/with-rbac.yaml \
+    | kubectl apply -f -
+```
+
+Custom Service Provider Deployment
+----------------------------------
+
+There are cloud provider specific yaml files.
+
+### minikube
+
+For standard usage:
+
+``` console
+minikube addons enable ingress
+```
+
+For development:
+
+1.  Disable the ingress addon:
+
+    ``` console
+    $ minikube addons disable ingress
+    ```
+
+2.  Use the [docker daemon](https://github.com/kubernetes/minikube/blob/master/docs/reusing_the_docker_daemon.md)
+3.  [Build the image](../docs/development.md)
+4.  Perform [Mandatory commands](#mandatory-commands)
+5.  Install the `nginx-ingress-controller` deployment [without RBAC roles](#install-without-rbac-roles) or [with RBAC roles](#install-with-rbac-roles)
+6.  Edit the `nginx-ingress-controller` deployment to use your custom image. Local images can be seen by performing `docker images`.
+
+    ``` console
+    $ kubectl edit deployment nginx-ingress-controller -n ingress-nginx
+    ```
+
+    edit the following section:
+
+    ``` yaml
+    image: <IMAGE-NAME>:<TAG>
+    imagePullPolicy: IfNotPresent
+    name: nginx-ingress-controller
+    ```
+
+7.  Confirm the `nginx-ingress-controller` deployment exists:
+
+``` console
+$ kubectl get pods -n ingress-nginx 
+NAME                                       READY     STATUS    RESTARTS   AGE
+default-http-backend-66b447d9cf-rrlf9      1/1       Running   0          12s
+nginx-ingress-controller-fdcdcd6dd-vvpgs   1/1       Running   0          11s
+```
+
+### AWS
+
+In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of `Type=LoadBalancer`.
+This setup requires to choose in which layer (L4 or L7) we want to configure the ELB:
+
+-   [Layer 4](https://en.wikipedia.org/wiki/OSI_model#Layer_4:_Transport_Layer): use TCP as the listener protocol for ports 80 and 443.
+-   [Layer 7](https://en.wikipedia.org/wiki/OSI_model#Layer_7:_Application_Layer): use HTTP as the listener protocol for port 80 and terminate TLS in the ELB
+
+Patch the nginx ingress controller deployment to add the flag `--publish-service`
+
+``` console
+kubectl patch deployment -n ingress-nginx nginx-ingress-controller --type='json' \
+  --patch="$(curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/publish-service-patch.yaml)"
+```
+
+For L4:
+
+``` console
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/service-l4.yaml
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l4.yaml
+```
+
+For L7:
+
+Change line of the file `provider/aws/service-l7.yaml` replacing the dummy id with a valid one `"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX"`
+Then execute:
+
+``` console
+kubectl apply -f provider/aws/service-l7.yaml
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l7.yaml
+```
+
+This example creates an ELB with just two listeners, one in port 80 and another in port 443
+
+![Listeners](../docs/images/elb-l7-listener.png)
+
+If the ingress controller uses RBAC run:
+
+``` console
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-with-rbac.yaml
+```
+
+If not run:
+
+``` console
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-without-rbac.yaml
+```
+
+### GCE - GKE
+
+Patch the nginx ingress controller deployment to add the flag `--publish-service`
+
+``` console
+kubectl patch deployment -n ingress-nginx nginx-ingress-controller --type='json' \
+  --patch="$(curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/publish-service-patch.yaml)"
+```
+
+``` console
+curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/gce-gke/service.yaml \
+    | kubectl apply -f -
+```
+
+If the ingress controller uses RBAC run:
+
+``` console
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-with-rbac.yaml
+```
+
+If not run:
+
+``` console
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-without-rbac.yaml
+```
+
+**Important Note:** proxy protocol is not supported in GCE/GKE
+
+### Azure
+
+Patch the nginx ingress controller deployment to add the flag `--publish-service`
+
+``` console
+kubectl patch deployment -n ingress-nginx nginx-ingress-controller --type='json' \
+  --patch="$(curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/publish-service-patch.yaml)"
+```
+
+``` console
+curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/azure/service.yaml \
+    | kubectl apply -f -
+```
+
+If the ingress controller uses RBAC run:
+
+``` console
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-with-rbac.yaml
+```
+
+If not run:
+
+``` console
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-without-rbac.yaml
+```
+
+**Important Note:** proxy protocol is not supported in GCE/GKE
+
+### Baremetal
+
+Using [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport):
+
+``` console
+curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/baremetal/service-nodeport.yaml \
+    | kubectl apply -f -
+```
+
+Using Helm
+----------
+
+NGINX Ingress controller can be installed via [Helm](https://helm.sh/) using the chart [stable/nginx](https://github.com/kubernetes/charts/tree/master/stable/nginx-ingress) from the official charts repository.
+To install the chart with the release name `my-nginx`:
+
+``` console
+helm install stable/nginx-ingress --name my-nginx
+```
+
+If the kubernetes cluster has RBAC enabled, then run:
+
+``` console
+helm install stable/nginx-ingress --name my-nginx --set rbac.create=true
+```
+
+Verify installation
+-------------------
+
+To check if the ingress controller pods have started, run the following command:
+
+``` console
+kubectl get pods --all-namespaces -l app=ingress-nginx --watch
+```
+
+Once the operator pods are running, you can cancel the above command by typing `Ctrl+C`.
+Now, you are ready to create your first ingress.
+
+Detect installed version
+------------------------
+
+To detect which version of the ingress controller is running, exec into the pod and run `nginx-ingress-controller version` command.
+
+``` console
+POD_NAMESPACE=ingress-nginx
+POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app=ingress-nginx -o jsonpath={.items[0].metadata.name})
+kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version
+```
+
+Deploying the config-map
+------------------------
+
+A config map can be used to configure system components for the nginx-controller. In order to begin using a config-map
+make sure it has been created and is being used in the deployment.
+
+It is created as seen in the [Mandatory Commands](#mandatory-commands) section above.
+
+``` console
+curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/configmap.yaml \
+    | kubectl apply -f -
+```
+
+and is setup to be used in the deployment [without-rbac](without-rbac.yaml) or [with-rbac](with-rbac.yaml) with the following line:
+
+``` yaml
+- --configmap=$(POD_NAMESPACE)/nginx-configuration
+```
+
+For information on using the config-map, see its [user-guide](../docs/user-guide/configmap.md).
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ff12178092aed6b814b0c957b75cbff1ad3784f0
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+ingress_nginx_namespace: "ingress-nginx"
+ingress_nginx_host_network: false
+ingress_nginx_insecure_port: 80
+ingress_nginx_secure_port: 443
+ingress_nginx_configmap: {}
+ingress_nginx_configmap_tcp_services: {}
+ingress_nginx_configmap_udp_services: {}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0a37e94cdd7b40b6edc7dd5226fe231901a3ef0b
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml
@@ -0,0 +1,42 @@
+---
+
+- name: NGINX Ingress Controller | Create addon dir
+  file:
+    path: "{{ kube_config_dir }}/addons/ingress_nginx"
+    state: directory
+    owner: root
+    group: root
+    mode: 0755
+
+- name: NGINX Ingress Controller | Create manifests
+  template:
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.file }}"
+  with_items:
+    - { name: ingress-nginx-ns, file: ingress-nginx-ns.yml, type: ns }
+    - { name: ingress-nginx-sa, file: ingress-nginx-sa.yml, type: sa }
+    - { name: ingress-nginx-role, file: ingress-nginx-role.yml, type: role }
+    - { name: ingress-nginx-rolebinding, file: ingress-nginx-rolebinding.yml, type: rolebinding }
+    - { name: ingress-nginx-clusterrole, file: ingress-nginx-clusterrole.yml, type: clusterrole }
+    - { name: ingress-nginx-clusterrolebinding, file: ingress-nginx-clusterrolebinding.yml, type: clusterrolebinding }
+    - { name: ingress-nginx-cm, file: ingress-nginx-cm.yml, type: cm }
+    - { name: ingress-nginx-tcp-servicecs-cm, file: ingress-nginx-tcp-servicecs-cm.yml, type: cm }
+    - { name: ingress-nginx-udp-servicecs-cm, file: ingress-nginx-udp-servicecs-cm.yml, type: cm }
+    - { name: ingress-nginx-default-backend-svc, file: ingress-nginx-default-backend-svc.yml, type: svc }
+    - { name: ingress-nginx-default-backend-rs, file: ingress-nginx-default-backend-rs.yml, type: rs }
+    - { name: ingress-nginx-controller-ds, file: ingress-nginx-controller-ds.yml, type: ds }
+  register: ingress_nginx_manifests
+  when:
+    - inventory_hostname == groups['kube-master'][0]
+
+- name: NGINX Ingress Controller | Apply manifests
+  kube:
+    name: "{{ item.item.name }}"
+    namespace: "{{ ingress_nginx_namespace }}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.item.file }}"
+    state: "latest"
+  with_items: "{{ ingress_nginx_manifests.results }}"
+  when:
+    - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrole.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrole.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..e6c36ef30695cd270c2b8aa045a9f313ade73e50
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrole.yml.j2
@@ -0,0 +1,25 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: ingress-nginx
+  namespace: {{ ingress_nginx_namespace }}
+rules:
+  - apiGroups: [""]
+    resources: ["configmaps", "endpoints", "nodes", "pods", "secrets"]
+    verbs: ["list", "watch"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get"]
+  - apiGroups: [""]
+    resources: ["services"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["extensions"]
+    resources: ["ingresses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["create", "patch"]
+  - apiGroups: ["extensions"]
+    resources: ["ingresses/status"]
+    verbs: ["update"]
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrolebinding.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..8d14af4b7d7b6eeb38c6d86abdfae47814e69745
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrolebinding.yml.j2
@@ -0,0 +1,14 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: ingress-nginx
+  namespace: {{ ingress_nginx_namespace }}
+subjects:
+  - kind: ServiceAccount
+    name: ingress-nginx
+    namespace: {{ ingress_nginx_namespace }}
+roleRef:
+  kind: ClusterRole
+  name: ingress-nginx
+  apiGroup: rbac.authorization.k8s.io
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..7e47e81b13cae7655ed70535f96293d2d196721f
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ingress-nginx
+  namespace: {{ ingress_nginx_namespace }}
+  labels:
+    k8s-app: ingress-nginx
+data:
+  {{ ingress_nginx_configmap | to_nice_yaml | indent(2) }}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..52501a4c7780c90473588c0587d9656c0fdc27fd
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2
@@ -0,0 +1,82 @@
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: ingress-nginx-controller
+  namespace: {{ ingress_nginx_namespace }}
+  labels:
+    k8s-app: ingress-nginx
+    version: v{{ ingress_nginx_controller_image_tag }}
+  annotations:
+    prometheus.io/port: '10254'
+    prometheus.io/scrape: 'true'
+spec:
+  selector:
+    matchLabels:
+      k8s-app: ingress-nginx
+      version: v{{ ingress_nginx_controller_image_tag }}
+  template:
+    metadata:
+      labels:
+        k8s-app: ingress-nginx
+        version: v{{ ingress_nginx_controller_image_tag }}
+      annotations:
+        prometheus.io/port: '10254'
+        prometheus.io/scrape: 'true'
+    spec:
+{% if ingress_nginx_host_network %}
+      hostNetwork: true
+{% endif %}
+      nodeSelector:
+        node-role.kubernetes.io/ingress: "true"
+      terminationGracePeriodSeconds: 60
+      containers:
+        - name: ingress-nginx-controller
+          image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }}
+          imagePullPolicy: {{ k8s_image_pull_policy }}
+          args:
+            - /nginx-ingress-controller
+            - --default-backend-service=$(POD_NAMESPACE)/ingress-nginx-default-backend
+            - --configmap=$(POD_NAMESPACE)/ingress-nginx
+            - --tcp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-tcp-services
+            - --udp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-udp-services
+            - --annotations-prefix=nginx.ingress.kubernetes.io
+          env:
+            - name: POD_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.name
+            - name: POD_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+          ports:
+            - name: http
+              containerPort: 80
+              hostPort: {{ ingress_nginx_insecure_port }}
+            - name: https
+              containerPort: 443
+              hostPort: {{ ingress_nginx_secure_port }}
+          livenessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /healthz
+              port: 10254
+              scheme: HTTP
+            initialDelaySeconds: 10
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 1
+          readinessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /healthz
+              port: 10254
+              scheme: HTTP
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 1
+{% if rbac_enabled %}
+      serviceAccountName: ingress-nginx
+{% endif %}
+
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c0bed920b25fd6511a6d4e2f45f4c694c1eadad7
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2
@@ -0,0 +1,37 @@
+---
+apiVersion: apps/v1
+kind: ReplicaSet
+metadata:
+  name: ingress-nginx-default-backend-v{{ ingress_nginx_default_backend_image_tag }}
+  namespace: {{ ingress_nginx_namespace }}
+  labels:
+    k8s-app: ingress-nginx-default-backend
+    version: v{{ ingress_nginx_default_backend_image_tag }}
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      k8s-app: ingress-nginx-default-backend
+      version: v{{ ingress_nginx_default_backend_image_tag }}
+  template:
+    metadata:
+      labels:
+        k8s-app: ingress-nginx-default-backend
+        version: v{{ ingress_nginx_default_backend_image_tag }}
+    spec:
+      terminationGracePeriodSeconds: 60
+      containers:
+        - name: ingress-nginx-default-backend
+          # Any image is permissible as long as:
+          # 1. It serves a 404 page at /
+          # 2. It serves 200 on a /healthz endpoint
+          image: {{ ingress_nginx_default_backend_image_repo }}:{{ ingress_nginx_default_backend_image_tag }}
+          livenessProbe:
+            httpGet:
+              path: /healthz
+              port: 8080
+              scheme: HTTP
+            initialDelaySeconds: 30
+            timeoutSeconds: 5
+          ports:
+            - containerPort: 8080
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..ab23f37995976bcc3c60f33cbe697584ed626f8b
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2
@@ -0,0 +1,14 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: ingress-nginx-default-backend
+  namespace: {{ ingress_nginx_namespace }}
+  labels:
+    k8s-app: ingress-nginx-default-backend
+spec:
+  ports:
+    - port: 80
+      targetPort: 8080
+  selector:
+    k8s-app: ingress-nginx-default-backend
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..1f1236619918d2ce2b36c870adcbe9440e2e473a
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2
@@ -0,0 +1,7 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: {{ ingress_nginx_namespace }}
+  labels:
+    name: {{ ingress_nginx_namespace }}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-role.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-role.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..9254e035a26c681a9e0f652a7fa32fe9bfe02d79
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-role.yml.j2
@@ -0,0 +1,24 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: ingress-nginx
+  namespace: {{ ingress_nginx_namespace }}
+rules:
+  - apiGroups: [""]
+    resources: ["configmaps", "pods", "secrets", "namespaces"]
+    verbs: ["get"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    # Defaults to "<election-id>-<ingress-class>"
+    # Here: "<ingress-controller-leader>-<nginx>"
+    # This has to be adapted if you change either parameter
+    # when launching the nginx-ingress-controller.
+    resourceNames: ["ingress-controller-leader-nginx"]
+    verbs: ["get", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["create"]
+  - apiGroups: [""]
+    resources: ["endpoints"]
+    verbs: ["get"]
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-rolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-rolebinding.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..a6a8dec4ba4a11fcb81c7d49750c4112eb0964dc
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-rolebinding.yml.j2
@@ -0,0 +1,14 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: ingress-nginx
+  namespace: {{ ingress_nginx_namespace }}
+subjects:
+  - kind: ServiceAccount
+    name: ingress-nginx
+    namespace: {{ ingress_nginx_namespace }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: ingress-nginx
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-sa.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-sa.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..55d6d65181f4c0a2b4eef3718e3d4bfdf01bd462
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-sa.yml.j2
@@ -0,0 +1,6 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: ingress-nginx
+  namespace: {{ ingress_nginx_namespace }}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..0a87e91b7433c564e8730a9805aa920fe2666bf5
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ingress-nginx-tcp-services
+  namespace: {{ ingress_nginx_namespace }}
+  labels:
+    k8s-app: ingress-nginx
+data:
+  {{ ingress_nginx_configmap_tcp_services | to_nice_yaml | indent(2) }}
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..d943e57185cac2cdd1392df31624d0e2f388c619
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ingress-nginx-udp-services
+  namespace: {{ ingress_nginx_namespace }}
+  labels:
+    k8s-app: ingress-nginx
+data:
+  {{ ingress_nginx_configmap_udp_services | to_nice_yaml | indent(2) }}
diff --git a/roles/kubernetes-apps/ingress_controller/meta/main.yml b/roles/kubernetes-apps/ingress_controller/meta/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..617e9d9a7ccecc4e26d36025f11dc090dc41b7d8
--- /dev/null
+++ b/roles/kubernetes-apps/ingress_controller/meta/main.yml
@@ -0,0 +1,15 @@
+---
+dependencies:
+  - role: kubernetes-apps/ingress_controller/ingress_nginx
+    when: ingress_nginx_enabled
+    tags:
+      - apps
+      - ingress-nginx
+      - ingress-controller
+
+  - role: kubernetes-apps/ingress_controller/cert_manager
+    when: cert_manager_enabled
+    tags:
+      - apps
+      - cert-manager
+      - ingress-controller
diff --git a/roles/kubernetes-apps/istio/defaults/main.yml b/roles/kubernetes-apps/istio/defaults/main.yml
index dc51ea7d67f33cb13661f4b84cbbb002166f5dd3..6124ce42ed804337b7476ba0c759639091a7cd12 100644
--- a/roles/kubernetes-apps/istio/defaults/main.yml
+++ b/roles/kubernetes-apps/istio/defaults/main.yml
@@ -1,32 +1,2 @@
 ---
-istio_enabled: false
-
 istio_namespace: istio-system
-istio_version: "0.2.6"
-
-istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
-istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
-
-istio_proxy_image_repo: docker.io/istio/proxy
-istio_proxy_image_tag: "{{ istio_version }}"
-
-istio_proxy_init_image_repo: docker.io/istio/proxy_init
-istio_proxy_init_image_tag: "{{ istio_version }}"
-
-istio_ca_image_repo: docker.io/istio/istio-ca
-istio_ca_image_tag: "{{ istio_version }}"
-
-istio_mixer_image_repo: docker.io/istio/mixer
-istio_mixer_image_tag: "{{ istio_version }}"
-
-istio_pilot_image_repo: docker.io/istio/pilot
-istio_pilot_image_tag: "{{ istio_version }}"
-
-istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
-istio_proxy_debug_image_tag: "{{ istio_version }}"
-
-istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
-istio_sidecar_initializer_image_tag: "{{ istio_version }}"
-
-istio_statsd_image_repo: prom/statsd-exporter
-istio_statsd_image_tag: latest
diff --git a/roles/kubernetes-apps/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/local_volume_provisioner/defaults/main.yml
deleted file mode 100644
index d1e1d1d6934bdc6d8efddf97e131966c80514c9b..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/local_volume_provisioner/defaults/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-local_volume_provisioner_bootstrap_image_repo: quay.io/external_storage/local-volume-provisioner-bootstrap
-local_volume_provisioner_bootstrap_image_tag: v1.0.1
-
-local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
-local_volume_provisioner_image_tag: v1.0.1
diff --git a/roles/kubernetes-apps/local_volume_provisioner/templates/configmap.yml.j2 b/roles/kubernetes-apps/local_volume_provisioner/templates/configmap.yml.j2
deleted file mode 100644
index fd8a7a6373437177375e275bad4623b4b8784cba..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/local_volume_provisioner/templates/configmap.yml.j2
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# The config map is used to configure local volume discovery for Local SSDs on GCE and GKE. 
-# It is a map from storage class to its mount configuration.
-kind: ConfigMap
-apiVersion: v1
-metadata:
-  name: local-volume-config
-  namespace: {{ system_namespace }}
-data:
-  "{{ local_volume_storage_class }}": |
-    {
-      "hostDir": "{{ local_volume_base_dir }}",
-      "mountDir": "{{ local_volume_mount_dir }}"
-    }
diff --git a/roles/kubernetes-apps/local_volume_provisioner/templates/daemonset.yml.j2 b/roles/kubernetes-apps/local_volume_provisioner/templates/daemonset.yml.j2
deleted file mode 100644
index 6ffe5e36b8a164e003e0e4eec440577f94fa1b0d..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/local_volume_provisioner/templates/daemonset.yml.j2
+++ /dev/null
@@ -1,45 +0,0 @@
----
-kind: DaemonSet
-apiVersion: extensions/v1beta1
-metadata:
-  name: local-volume-provisioner
-  namespace: "{{ system_namespace }}"
-spec:
-  template:
-    metadata:
-      labels:
-        app: local-volume-provisioner
-    spec:
-      containers:
-        - name: provisioner
-          image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }}
-          imagePullPolicy: {{ k8s_image_pull_policy }}
-          securityContext:
-            privileged: true
-          volumeMounts:
-            - name: discovery-vol
-              mountPath: "{{ local_volume_mount_dir }}"
-            - name: local-volume-config
-              mountPath: /etc/provisioner/config/
-          env:
-            - name: MY_NODE_NAME
-              valueFrom:
-                fieldRef:
-                  apiVersion: v1
-                  fieldPath: spec.nodeName
-            - name: MY_NAMESPACE
-              valueFrom:
-                fieldRef:
-                  apiVersion: v1
-                  fieldPath: metadata.namespace
-            - name: VOLUME_CONFIG_NAME
-              value: "local-volume-config"
-      volumes:
-        - name: discovery-vol
-          hostPath:
-            path: "{{ local_volume_base_dir }}"
-        - configMap:
-            defaultMode: 420
-            name: local-volume-config
-          name: local-volume-config
-      serviceAccount: local-storage-admin
diff --git a/roles/kubernetes-apps/local_volume_provisioner/templates/serviceaccount.yml.j2 b/roles/kubernetes-apps/local_volume_provisioner/templates/serviceaccount.yml.j2
deleted file mode 100644
index 182248a6a25cac119eb3471d21a4f129cdac466c..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/local_volume_provisioner/templates/serviceaccount.yml.j2
+++ /dev/null
@@ -1,5 +0,0 @@
----
-kind: ServiceAccount
-apiVersion: v1
-metadata:
-  name: local-storage-admin
diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml
index 4f657bd2759a750e16d8a5bdde5cc4d92758879b..bc05e6f8c8b26ca3baa479e2d88a00f8f5ad2570 100644
--- a/roles/kubernetes-apps/meta/main.yml
+++ b/roles/kubernetes-apps/meta/main.yml
@@ -27,19 +27,11 @@ dependencies:
       - apps
       - registry
 
-  - role: kubernetes-apps/local_volume_provisioner
-    when: local_volume_provisioner_enabled
+  - role: kubernetes-apps/metrics
+    when: prometheus_operator_enabled
     tags:
       - apps
-      - local_volume_provisioner
-      - storage
-
-  - role: kubernetes-apps/cephfs_provisioner
-    when: cephfs_provisioner_enabled
-    tags:
-      - apps
-      - cephfs_provisioner
-      - storage
+      - metrics
 
   # istio role should be last because it takes a long time to initialize and
   # will cause timeouts trying to start other addons.
diff --git a/roles/kubernetes-apps/metrics/defaults/main.yml b/roles/kubernetes-apps/metrics/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..72018e6f5f58f6b04921e09c6000ae39d988a386
--- /dev/null
+++ b/roles/kubernetes-apps/metrics/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+# Prometheus Operator. Needs for k8s metrics. Installed Helm is required.
+prometheus_operator_enabled: false
+
+# K8s cluster metrics. Installed Helm and Prometheus Operators are required.
+k8s_metrics_enabled: false
+
+# Separate namespace for monitoring/metrics
+monitoring_namespace: "monitoring"
diff --git a/roles/kubernetes-apps/metrics/tasks/main.yml b/roles/kubernetes-apps/metrics/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e2280e98b3ec59c330223592471a493b71a953e8
--- /dev/null
+++ b/roles/kubernetes-apps/metrics/tasks/main.yml
@@ -0,0 +1,32 @@
+---
+- name: Metrics | Make sure Helm is installed
+  command: "{{ bin_dir }}/helm version"
+  register: helm_ready_result
+  until: helm_ready_result|succeeded
+  retries: 4
+  delay: 5
+  when:
+    - prometheus_operator_enabled
+    - inventory_hostname == groups['kube-master'][0]
+
+- name: Metrics | Add coreos repo
+  command: "{{ bin_dir }}/helm repo add coreos https://s3-eu-west-1.amazonaws.com/coreos-charts/stable/"
+  when:
+    - prometheus_operator_enabled
+    - inventory_hostname == groups['kube-master'][0]
+  run_once: true
+
+- name: Metrics | Install Prometheus Operator
+  command: "{{ bin_dir }}/helm upgrade --install prometheus-operator coreos/prometheus-operator --namespace {{ monitoring_namespace }}"
+  when:
+    - prometheus_operator_enabled
+    - inventory_hostname == groups['kube-master'][0]
+  run_once: true
+
+- name: Metrics | Install K8s cluster metrics
+  command: "{{ bin_dir }}/helm upgrade --install kube-prometheus     coreos/kube-prometheus     --namespace {{ monitoring_namespace }}"
+  when:
+    - prometheus_operator_enabled
+    - k8s_metrics_enabled
+    - inventory_hostname == groups['kube-master'][0]
+  run_once: true
diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
index f17e45c7abd7d16fb8a7f1f77e25de4b0965324e..4c8295c1e37038b3b0d234b3bff2c3c468c15c75 100644
--- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Start Calico resources
   kube:
     name: "{{item.item.name}}"
-    namespace: "{{ system_namespace }}"
+    namespace: "kube-system"
     kubectl: "{{bin_dir}}/kubectl"
     resource: "{{item.item.type}}"
     filename: "{{kube_config_dir}}/{{item.item.file}}"
diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
index cbe4f0ac7e1103fc07f6ffb3a52906f2cf60ed99..3640fe762eec1aa9385d89993ce8608825e2802f 100644
--- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Canal | Start Resources
   kube:
     name: "{{item.item.name}}"
-    namespace: "{{ system_namespace }}"
+    namespace: "kube-system"
     kubectl: "{{bin_dir}}/kubectl"
     resource: "{{item.item.type}}"
     filename: "{{kube_config_dir}}/{{item.item.file}}"
diff --git a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
index 2359fe2d496689a61e6c9da6e82d783637d7d41a..5d90bdb018257a0fbf43933701903bb174c174ad 100755
--- a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Cilium | Start Resources
   kube:
     name: "{{item.item.name}}"
-    namespace: "{{ system_namespace }}"
+    namespace: "kube-system"
     kubectl: "{{bin_dir}}/kubectl"
     resource: "{{item.item.type}}"
     filename: "{{kube_config_dir}}/{{item.item.file}}"
@@ -11,7 +11,7 @@
   when: inventory_hostname == groups['kube-master'][0] and not item|skipped
 
 - name: Cilium | Wait for pods to run
-  command: "{{bin_dir}}/kubectl -n {{system_namespace}} get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"
+  command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"
   register: pods_not_ready
   until: pods_not_ready.stdout.find("cilium")==-1
   retries: 30
diff --git a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml
index 330acc1cd0464d698819c7bedf2ea8de29a59bb4..5289296dc65104d528d6625aba235e2d3f99d345 100644
--- a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml
@@ -3,7 +3,7 @@
 - name: Contiv | Create Kubernetes resources
   kube:
     name: "{{ item.item.name }}"
-    namespace: "{{ system_namespace }}"
+    namespace: "kube-system"
     kubectl: "{{ bin_dir }}/kubectl"
     resource: "{{ item.item.type }}"
     filename: "{{ contiv_config_dir }}/{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
index 09603a79430e04d2e904419673f70c9d25c43255..bdf954bf99d673bb628a8db1d562e82ed265882b 100644
--- a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Flannel | Start Resources
   kube:
     name: "{{item.item.name}}"
-    namespace: "{{ system_namespace }}"
+    namespace: "kube-system"
     kubectl: "{{bin_dir}}/kubectl"
     resource: "{{item.item.type}}"
     filename: "{{kube_config_dir}}/{{item.item.file}}"
diff --git a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
index 66d900d55e028fb881e2a23d69a0581b10fbf095..53ad953b53a2a7f371964171a0662010c88e70bd 100644
--- a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
@@ -5,7 +5,7 @@
     kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/weave-net.yml"
     resource: "ds"
-    namespace: "{{system_namespace}}"
+    namespace: "kube-system"
     state: "latest"
   when: inventory_hostname == groups['kube-master'][0]
 
diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
index ba11627992c13ac62ee3ed061eff6f5bbc0e3432..62e929f413d7b2ec28619d9d1f3cb02171708a0d 100644
--- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
@@ -12,7 +12,7 @@
     name: calico-policy-controller
     kubectl: "{{bin_dir}}/kubectl"
     resource: rs
-    namespace: "{{ system_namespace }}"
+    namespace: "kube-system"
     state: absent
   run_once: true
 
@@ -32,7 +32,7 @@
 - name: Start of Calico kube controllers
   kube:
     name: "{{item.item.name}}"
-    namespace: "{{ system_namespace }}"
+    namespace: "kube-system"
     kubectl: "{{bin_dir}}/kubectl"
     resource: "{{item.item.type}}"
     filename: "{{kube_config_dir}}/{{item.item.file}}"
diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
index 7e1311b9286cd1f3f3b2200e7ed474017e4e3991..d7083e3e6b30f6b38d2668d43381f4e326bf7bd7 100644
--- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
+++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
@@ -2,7 +2,7 @@ apiVersion: apps/v1beta2
 kind: Deployment
 metadata:
   name: calico-kube-controllers
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     k8s-app: calico-kube-controllers
     kubernetes.io/cluster-service: "true"
@@ -15,7 +15,7 @@ spec:
   template:
     metadata:
       name: calico-kube-controllers
-      namespace: {{ system_namespace }}
+      namespace: kube-system
       labels:
         kubernetes.io/cluster-service: "true"
         k8s-app: calico-kube-controllers
diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2
index 82c2f3e44a978e78872ccfb84679d47285b5353b..d05e986a4c52033b4f821776dd22c2f40d2a3cc5 100644
--- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2
+++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2
@@ -3,7 +3,7 @@ kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1beta1
 metadata:
   name: calico-kube-controllers
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 rules:
   - apiGroups:
     - ""
diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2
index 38853a413575446c583a6609b00e433af0bb42b1..2e51184811e4f177da80366344dccb62dce0f786 100644
--- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2
+++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2
@@ -10,4 +10,4 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: calico-kube-controllers
-  namespace: {{ system_namespace }}
+  namespace: kube-system
diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2
index bf8958976d859a2b67ba021954dcb95300900be1..e42e89d1894628f9cd5931c2b721038d1f938b57 100644
--- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2
+++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2
@@ -3,6 +3,6 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: calico-kube-controllers
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/registry/README.md b/roles/kubernetes-apps/registry/README.md
index 59542355edaab79f44b70bed96f40d70fa463211..81615631e809d5aeccdf9f329e5426cddff352c6 100644
--- a/roles/kubernetes-apps/registry/README.md
+++ b/roles/kubernetes-apps/registry/README.md
@@ -1,36 +1,39 @@
-# Private Docker Registry in Kubernetes
+Private Docker Registry in Kubernetes
+=====================================
 
 Kubernetes offers an optional private Docker registry addon, which you can turn
-on when you bring up a cluster or install later.  This gives you a place to
+on when you bring up a cluster or install later. This gives you a place to
 store truly private Docker images for your cluster.
 
-## How it works
+How it works
+------------
 
-The private registry runs as a `Pod` in your cluster.  It does not currently
+The private registry runs as a `Pod` in your cluster. It does not currently
 support SSL or authentication, which triggers Docker's "insecure registry"
-logic.  To work around this, we run a proxy on each node in the cluster,
+logic. To work around this, we run a proxy on each node in the cluster,
 exposing a port onto the node (via a hostPort), which Docker accepts as
 "secure", since it is accessed by `localhost`.
 
-## Turning it on
+Turning it on
+-------------
 
-Some cluster installs (e.g. GCE) support this as a cluster-birth flag.  The
+Some cluster installs (e.g. GCE) support this as a cluster-birth flag. The
 `ENABLE_CLUSTER_REGISTRY` variable in `cluster/gce/config-default.sh` governs
-whether the registry is run or not.  To set this flag, you can specify
-`KUBE_ENABLE_CLUSTER_REGISTRY=true` when running `kube-up.sh`.  If your cluster
-does not include this flag, the following steps should work.  Note that some of
+whether the registry is run or not. To set this flag, you can specify
+`KUBE_ENABLE_CLUSTER_REGISTRY=true` when running `kube-up.sh`. If your cluster
+does not include this flag, the following steps should work. Note that some of
 this is cloud-provider specific, so you may have to customize it a bit.
 
 ### Make some storage
 
-The primary job of the registry is to store data.  To do that we have to decide
-where to store it.  For cloud environments that have networked storage, we can
-use Kubernetes's `PersistentVolume` abstraction.  The following template is
+The primary job of the registry is to store data. To do that we have to decide
+where to store it. For cloud environments that have networked storage, we can
+use Kubernetes's `PersistentVolume` abstraction. The following template is
 expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
 other situations:
 
 <!-- BEGIN MUNGE: EXAMPLE registry-pv.yaml.in -->
-```yaml
+``` yaml
 kind: PersistentVolume
 apiVersion: v1
 metadata:
@@ -64,14 +67,15 @@ just want to kick the tires on this without committing to it, you can easily
 adapt the `ReplicationController` specification below to use a simple
 `emptyDir` volume instead of a `persistentVolumeClaim`.
 
-## Claim the storage
+Claim the storage
+-----------------
 
 Now that the Kubernetes cluster knows that some storage exists, you can put a
-claim on that storage.  As with the `PersistentVolume` above, you can start
+claim on that storage. As with the `PersistentVolume` above, you can start
 with the `salt` template:
 
 <!-- BEGIN MUNGE: EXAMPLE registry-pvc.yaml.in -->
-```yaml
+``` yaml
 kind: PersistentVolumeClaim
 apiVersion: v1
 metadata:
@@ -90,15 +94,16 @@ spec:
 
 This tells Kubernetes that you want to use storage, and the `PersistentVolume`
 you created before will be bound to this claim (unless you have other
-`PersistentVolumes` in which case those might get bound instead).  This claim
+`PersistentVolumes` in which case those might get bound instead). This claim
 gives you the right to use this storage until you release the claim.
 
-## Run the registry
+Run the registry
+----------------
 
 Now we can run a Docker registry:
 
 <!-- BEGIN MUNGE: EXAMPLE registry-rc.yaml -->
-```yaml
+``` yaml
 apiVersion: v1
 kind: ReplicationController
 metadata:
@@ -146,12 +151,13 @@ spec:
 ```
 <!-- END MUNGE: EXAMPLE registry-rc.yaml -->
 
-## Expose the registry in the cluster
+Expose the registry in the cluster
+----------------------------------
 
 Now that we have a registry `Pod` running, we can expose it as a Service:
 
 <!-- BEGIN MUNGE: EXAMPLE registry-svc.yaml -->
-```yaml
+``` yaml
 apiVersion: v1
 kind: Service
 metadata:
@@ -171,14 +177,15 @@ spec:
 ```
 <!-- END MUNGE: EXAMPLE registry-svc.yaml -->
 
-## Expose the registry on each node
+Expose the registry on each node
+--------------------------------
 
 Now that we have a running `Service`, we need to expose it onto each Kubernetes
-`Node` so that Docker will see it as `localhost`.  We can load a `Pod` on every
+`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
 node by creating following daemonset.
 
 <!-- BEGIN MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
-```yaml
+``` yaml
 apiVersion: extensions/v1beta1
 kind: DaemonSet
 metadata:
@@ -217,7 +224,7 @@ spec:
 <!-- END MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
 
 When modifying replication-controller, service and daemon-set defintions, take
-care to ensure _unique_ identifiers for the rc-svc couple and the daemon-set.
+care to ensure *unique* identifiers for the rc-svc couple and the daemon-set.
 Failing to do so will have register the localhost proxy daemon-sets to the
 upstream service. As a result they will then try to proxy themselves, which
 will, for obvious reasons, not work.
@@ -226,29 +233,30 @@ This ensures that port 5000 on each node is directed to the registry `Service`.
 You should be able to verify that it is running by hitting port 5000 with a web
 browser and getting a 404 error:
 
-```console
+``` console
 $ curl localhost:5000
 404 page not found
 ```
 
-## Using the registry
+Using the registry
+------------------
 
 To use an image hosted by this registry, simply say this in your `Pod`'s
 `spec.containers[].image` field:
 
-```yaml
+``` yaml
     image: localhost:5000/user/container
 ```
 
 Before you can use the registry, you have to be able to get images into it,
-though.  If you are building an image on your Kubernetes `Node`, you can spell
-out `localhost:5000` when you build and push.  More likely, though, you are
+though. If you are building an image on your Kubernetes `Node`, you can spell
+out `localhost:5000` when you build and push. More likely, though, you are
 building locally and want to push to your cluster.
 
 You can use `kubectl` to set up a port-forward from your local node to a
 running Pod:
 
-```console
+``` console
 $ POD=$(kubectl get pods --namespace kube-system -l k8s-app=kube-registry-upstream \
             -o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
             | grep Running | head -1 | cut -f1 -d' ')
@@ -260,15 +268,14 @@ Now you can build and push images on your local computer as
 `localhost:5000/yourname/container` and those images will be available inside
 your kubernetes cluster with the same name.
 
-# More Extensions
+More Extensions
+===============
 
-- [Use GCS as storage backend](gcs/README.md)
-- [Enable TLS/SSL](tls/README.md)
-- [Enable Authentication](auth/README.md)
+-   [Use GCS as storage backend](gcs/README.md)
+-   [Enable TLS/SSL](tls/README.md)
+-   [Enable Authentication](auth/README.md)
 
-## Future improvements
+Future improvements
+-------------------
 
-* Allow port-forwarding to a Service rather than a pod (#15180)
-
-
-[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/README.md?pixel)]()
+-   Allow port-forwarding to a Service rather than a pod (\#15180)
diff --git a/roles/kubernetes-apps/registry/defaults/main.yml b/roles/kubernetes-apps/registry/defaults/main.yml
index d13290b3b82ba2c07793604258d35243956217ac..aa52347bc3a7622950a276270fa7cca603ff80cb 100644
--- a/roles/kubernetes-apps/registry/defaults/main.yml
+++ b/roles/kubernetes-apps/registry/defaults/main.yml
@@ -1,5 +1,4 @@
 ---
-registry_image_repo: registry
-registry_image_tag: 2.6
-registry_proxy_image_repo: gcr.io/google_containers/kube-registry-proxy
-registry_proxy_image_tag: 0.4
+registry_namespace: "kube-system"
+registry_storage_class: ""
+registry_disk_size: "10Gi"
diff --git a/roles/kubernetes-apps/registry/files/images/Dockerfile b/roles/kubernetes-apps/registry/files/images/Dockerfile
deleted file mode 100644
index 4223025a8c47f11ad71d2ff84bf31f681d2c59a4..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/files/images/Dockerfile
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2016 The Kubernetes Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM nginx:1.12
-
-RUN apt-get update \
-	&& apt-get install -y \
-		curl \
-		--no-install-recommends \
-	&& apt-get clean \
-	&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/man /usr/share/doc
-
-COPY rootfs /
-
-CMD ["/bin/boot"]
diff --git a/roles/kubernetes-apps/registry/files/images/Makefile b/roles/kubernetes-apps/registry/files/images/Makefile
deleted file mode 100644
index c1b64de1c2046b1333dd641b9be0825af1ed803d..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/files/images/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2016 The Kubernetes Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-.PHONY: build push vet test clean
-
-TAG = 0.4
-REPO = gcr.io/google_containers/kube-registry-proxy
-
-build:
-	docker build --pull -t $(REPO):$(TAG) .
-
-push:
-	gcloud docker -- push $(REPO):$(TAG)
diff --git a/roles/kubernetes-apps/registry/files/images/rootfs/bin/boot b/roles/kubernetes-apps/registry/files/images/rootfs/bin/boot
deleted file mode 100755
index 04262b4642e40df72260608dab2f162181e79f36..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/files/images/rootfs/bin/boot
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env bash
-
-# fail if no hostname is provided
-REGISTRY_HOST=${REGISTRY_HOST:?no host}
-REGISTRY_PORT=${REGISTRY_PORT:-5000}
-
-# we are always listening on port 80
-# https://github.com/nginxinc/docker-nginx/blob/43c112100750cbd1e9f2160324c64988e7920ac9/stable/jessie/Dockerfile#L25
-PORT=80
-
-sed -e "s/%HOST%/$REGISTRY_HOST/g" \
-	-e "s/%PORT%/$REGISTRY_PORT/g" \
-	-e "s/%BIND_PORT%/$PORT/g" \
-	</etc/nginx/conf.d/default.conf.in >/etc/nginx/conf.d/default.conf
-
-# wait for registry to come online
-while ! curl -sS "$REGISTRY_HOST:$REGISTRY_PORT" &>/dev/null; do
-	printf "waiting for the registry (%s:%s) to come online...\n" "$REGISTRY_HOST" "$REGISTRY_PORT"
-	sleep 1
-done
-
-printf "starting proxy...\n"
-exec nginx -g "daemon off;" "$@"
diff --git a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/conf.d/default.conf.in b/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/conf.d/default.conf.in
deleted file mode 100644
index ecd95fd2fe1c00d63e6a353f799575dc376eb3fd..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/conf.d/default.conf.in
+++ /dev/null
@@ -1,28 +0,0 @@
-# Docker registry proxy for api version 2
-
-upstream docker-registry {
-    server %HOST%:%PORT%;
-}
-
-# No client auth or TLS
-# TODO(bacongobbler): experiment with authenticating the registry if it's using TLS
-server {
-    listen %BIND_PORT%;
-    server_name localhost;
-
-    # disable any limits to avoid HTTP 413 for large image uploads
-    client_max_body_size 0;
-
-    # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
-    chunked_transfer_encoding on;
-
-    location / {
-        # Do not allow connections from docker 1.5 and earlier
-        # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
-        if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
-            return 404;
-        }
-
-        include docker-registry.conf;
-    }
-}
diff --git a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/docker-registry.conf b/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/docker-registry.conf
deleted file mode 100644
index 7dc8cfff266f54b99ae8c81ae18e579230fdff1a..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/docker-registry.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-proxy_pass                          http://docker-registry;
-proxy_set_header  Host              $http_host;   # required for docker client's sake
-proxy_set_header  X-Real-IP         $remote_addr; # pass on real client's IP
-proxy_set_header  X-Forwarded-For   $proxy_add_x_forwarded_for;
-proxy_set_header  X-Forwarded-Proto $scheme;
-proxy_read_timeout                  900;
diff --git a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/nginx.conf b/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/nginx.conf
deleted file mode 100644
index 54ecc888e55279a6ea2cf0743b2270c87810d6b9..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/nginx.conf
+++ /dev/null
@@ -1,26 +0,0 @@
-user nginx;
-worker_processes auto;
-
-error_log   /var/log/nginx/error.log    warn;
-pid         /var/run/nginx.pid;
-
-events {
-    worker_connections  1024;
-}
-
-http {
-    include      /etc/nginx/mime.types;
-    default_type application/octet-stream;
-
-    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
-                      '$status $body_bytes_sent "$http_referer" '
-                      '"$http_user_agent" "$http_x_forwarded_for"';
-
-    access_log  /var/log/nginx/access.log main;
-
-    sendfile on;
-
-    keepalive_timeout 65;
-
-    include /etc/nginx/conf.d/*.conf;
-}
diff --git a/roles/kubernetes-apps/registry/tasks/main.yml b/roles/kubernetes-apps/registry/tasks/main.yml
index a236d273cacf73286ed64d2bfb2c161b84ff0252..a175064ee3475daa5acbb9d351904563e862c7f5 100644
--- a/roles/kubernetes-apps/registry/tasks/main.yml
+++ b/roles/kubernetes-apps/registry/tasks/main.yml
@@ -3,29 +3,56 @@
 - name: Registry | Create addon dir
   file:
     path: "{{ kube_config_dir }}/addons/registry"
+    state: directory
     owner: root
     group: root
     mode: 0755
-    recurse: true
 
 - name: Registry | Create manifests
   template:
     src: "{{ item.file }}.j2"
     dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}"
   with_items:
-    - { name: registry-svc, file: registry-svc.yml, type: service }
-    - { name: registry-rc, file: registry-rc.yml, type: replicationcontroller }
-    - { name: registry-ds, file: registry-ds.yml, type: daemonset }
+    - { name: registry-ns, file: registry-ns.yml, type: ns }
+    - { name: registry-svc, file: registry-svc.yml, type: svc }
+    - { name: registry-rs, file: registry-rs.yml, type: rs }
+    - { name: registry-proxy-ds, file: registry-proxy-ds.yml, type: ds }
   register: registry_manifests
   when: inventory_hostname == groups['kube-master'][0]
 
 - name: Registry | Apply manifests
   kube:
     name: "{{ item.item.name }}"
-    namespace: "{{ system_namespace }}"
+    namespace: "{{ registry_namespace }}"
     kubectl: "{{ bin_dir }}/kubectl"
     resource: "{{ item.item.type }}"
     filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ registry_manifests.results }}"
   when: inventory_hostname == groups['kube-master'][0]
+
+- name: Registry | Create PVC manifests
+  template:
+    src: "{{ item.file }}.j2"
+    dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}"
+  with_items:
+    - { name: registry-pvc, file: registry-pvc.yml, type: pvc }
+  register: registry_manifests
+  when:
+    - registry_storage_class != none
+    - registry_disk_size != none
+    - inventory_hostname == groups['kube-master'][0]
+
+- name: Registry | Apply PVC manifests
+  kube:
+    name: "{{ item.item.name }}"
+    namespace: "{{ registry_namespace }}"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "{{ item.item.type }}"
+    filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}"
+    state: "latest"
+  with_items: "{{ registry_manifests.results }}"
+  when:
+    - registry_storage_class != none
+    - registry_disk_size != none
+    - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/registry/templates/auth/README.md b/roles/kubernetes-apps/registry/templates/auth/README.md
deleted file mode 100644
index 040c54bcb8d1b376ae4f3c75351a257dda8503dc..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/templates/auth/README.md
+++ /dev/null
@@ -1,92 +0,0 @@
-# Enable Authentication with Htpasswd for Kube-Registry 
-
-Docker registry support a few authentication providers. Full list of supported provider can be found [here](https://docs.docker.com/registry/configuration/#auth). This document describes how to enable authentication with htpasswd for kube-registry. 
-
-### Prepare Htpasswd Secret
-
-Please generate your own htpasswd file. Assuming the file you generated is `htpasswd`. 
-Creating secret to hold htpasswd...
-```console
-$ kubectl --namespace=kube-system create secret generic registry-auth-secret --from-file=htpasswd=htpasswd
-```
-
-### Run Registry
-
-Please be noted that this sample rc is using emptyDir as storage backend for simplicity. 
-
-<!-- BEGIN MUNGE: EXAMPLE registry-auth-rc.yaml -->
-```yaml
-apiVersion: v1
-kind: ReplicationController
-metadata:
-  name: kube-registry-v0
-  namespace: kube-system
-  labels:
-    k8s-app: kube-registry
-    version: v0
-#    kubernetes.io/cluster-service: "true"
-spec:
-  replicas: 1
-  selector:
-    k8s-app: kube-registry
-    version: v0
-  template:
-    metadata:
-      labels:
-        k8s-app: kube-registry
-        version: v0
-#        kubernetes.io/cluster-service: "true"
-    spec:
-      containers:
-      - name: registry
-        image: registry:2
-        resources:
-          # keep request = limit to keep this container in guaranteed class
-          limits:
-            cpu: 100m
-            memory: 100Mi
-          requests:
-            cpu: 100m
-            memory: 100Mi
-        env:
-        - name: REGISTRY_HTTP_ADDR
-          value: :5000
-        - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
-          value: /var/lib/registry
-        - name: REGISTRY_AUTH_HTPASSWD_REALM
-          value: basic_realm
-        - name: REGISTRY_AUTH_HTPASSWD_PATH
-          value: /auth/htpasswd
-        volumeMounts:
-        - name: image-store
-          mountPath: /var/lib/registry
-        - name: auth-dir
-          mountPath: /auth
-        ports:
-        - containerPort: 5000
-          name: registry
-          protocol: TCP
-      volumes:
-      - name: image-store
-        emptyDir: {}
-      - name: auth-dir
-        secret:
-          secretName: registry-auth-secret
-```
-<!-- END MUNGE: EXAMPLE registry-auth-rc.yaml -->
-
-No changes are needed for other components (kube-registry service and proxy). 
-
-### To Verify
-
-Setup proxy or port-forwarding to the kube-registry. Image push/pull should fail without authentication. Then use `docker login` to authenticate with kube-registry and see if it works.
-
-### Configure Nodes to Authenticate with Kube-Registry
-
-By default, nodes assume no authentication is required by kube-registry. Without authentication, nodes cannot pull images from kube-registry. To solve this, more documentation can be found [Here](https://github.com/kubernetes/kubernetes.github.io/blob/master/docs/concepts/containers/images.md#configuring-nodes-to-authenticate-to-a-private-repository).
-
-
-
-
-
-[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/auth/README.md?pixel)]()
diff --git a/roles/kubernetes-apps/registry/templates/auth/registry-auth-rc.yml.j2 b/roles/kubernetes-apps/registry/templates/auth/registry-auth-rc.yml.j2
deleted file mode 100644
index 1af623d0909183c7d9d8e1ebd76f57d1f40ad63d..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/templates/auth/registry-auth-rc.yml.j2
+++ /dev/null
@@ -1,56 +0,0 @@
-apiVersion: v1
-kind: ReplicationController
-metadata:
-  name: kube-registry-v0
-  namespace: kube-system
-  labels:
-    k8s-app: kube-registry
-    version: v0
-#    kubernetes.io/cluster-service: "true"
-spec:
-  replicas: 1
-  selector:
-    k8s-app: kube-registry
-    version: v0
-  template:
-    metadata:
-      labels:
-        k8s-app: kube-registry
-        version: v0
-#        kubernetes.io/cluster-service: "true"
-    spec:
-      containers:
-      - name: registry
-        image: registry:2
-        resources:
-          # keep request = limit to keep this container in guaranteed class
-          limits:
-            cpu: 100m
-            memory: 100Mi
-          requests:
-            cpu: 100m
-            memory: 100Mi
-        env:
-        - name: REGISTRY_HTTP_ADDR
-          value: :5000
-        - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
-          value: /var/lib/registry
-        - name: REGISTRY_AUTH_HTPASSWD_REALM
-          value: basic_realm
-        - name: REGISTRY_AUTH_HTPASSWD_PATH
-          value: /auth/htpasswd
-        volumeMounts:
-        - name: image-store
-          mountPath: /var/lib/registry
-        - name: auth-dir
-          mountPath: /auth
-        ports:
-        - containerPort: 5000
-          name: registry
-          protocol: TCP
-      volumes:
-      - name: image-store
-        emptyDir: {}
-      - name: auth-dir
-        secret:
-          secretName: registry-auth-secret
\ No newline at end of file
diff --git a/roles/kubernetes-apps/registry/templates/gcs/README.md b/roles/kubernetes-apps/registry/templates/gcs/README.md
deleted file mode 100644
index 5706a848f8d432b287e7486a5684392072e60737..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/templates/gcs/README.md
+++ /dev/null
@@ -1,81 +0,0 @@
-# Kube-Registry with GCS storage backend
-
-Besides local file system, docker registry also supports a number of cloud storage backends. Full list of supported backend can be found [here](https://docs.docker.com/registry/configuration/#storage). This document describes how to enable GCS for kube-registry as storage backend. 
-
-A few preparation steps are needed. 
- 1. Create a bucket named kube-registry in GCS.
- 1. Create a service account for GCS access and create key file in json format. Detail instruction can be found [here](https://cloud.google.com/storage/docs/authentication#service_accounts).
-
-
-### Pack Keyfile into a Secret
-
-Assuming you have downloaded the keyfile as `keyfile.json`. Create secret with the `keyfile.json`...
-```console
-$ kubectl --namespace=kube-system create secret generic gcs-key-secret --from-file=keyfile=keyfile.json
-```
-
-
-### Run Registry
-
-<!-- BEGIN MUNGE: EXAMPLE registry-gcs-rc.yaml -->
-```yaml
-apiVersion: v1
-kind: ReplicationController
-metadata:
-  name: kube-registry-v0
-  namespace: kube-system
-  labels:
-    k8s-app: kube-registry
-    version: v0
-#    kubernetes.io/cluster-service: "true"
-spec:
-  replicas: 1
-  selector:
-    k8s-app: kube-registry
-    version: v0
-  template:
-    metadata:
-      labels:
-        k8s-app: kube-registry
-        version: v0
-#        kubernetes.io/cluster-service: "true"
-    spec:
-      containers:
-      - name: registry
-        image: registry:2
-        resources:
-          # keep request = limit to keep this container in guaranteed class
-          limits:
-            cpu: 100m
-            memory: 100Mi
-          requests:
-            cpu: 100m
-            memory: 100Mi
-        env:
-        - name: REGISTRY_HTTP_ADDR
-          value: :5000
-        - name: REGISTRY_STORAGE
-          value: gcs
-        - name: REGISTRY_STORAGE_GCS_BUCKET
-          value: kube-registry
-        - name: REGISTRY_STORAGE_GCS_KEYFILE
-          value: /gcs/keyfile
-        ports:
-        - containerPort: 5000
-          name: registry
-          protocol: TCP
-        volumeMounts:
-        - name: gcs-key
-          mountPath: /gcs
-      volumes:
-      - name: gcs-key
-        secret:
-          secretName: gcs-key-secret
-```
-<!-- END MUNGE: EXAMPLE registry-gcs-rc.yaml -->
-
-
-No changes are needed for other components (kube-registry service and proxy). 
-
-
-[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/gcs/README.md?pixel)]()
diff --git a/roles/kubernetes-apps/registry/templates/gcs/registry-gcs-rc.yml.j2 b/roles/kubernetes-apps/registry/templates/gcs/registry-gcs-rc.yml.j2
deleted file mode 100644
index e69740335646ae29c47ffd76afdc165de55c27ab..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/templates/gcs/registry-gcs-rc.yml.j2
+++ /dev/null
@@ -1,52 +0,0 @@
-apiVersion: v1
-kind: ReplicationController
-metadata:
-  name: kube-registry-v0
-  namespace: kube-system
-  labels:
-    k8s-app: kube-registry
-    version: v0
-#    kubernetes.io/cluster-service: "true"
-spec:
-  replicas: 1
-  selector:
-    k8s-app: kube-registry
-    version: v0
-  template:
-    metadata:
-      labels:
-        k8s-app: kube-registry
-        version: v0
-#        kubernetes.io/cluster-service: "true"
-    spec:
-      containers:
-      - name: registry
-        image: registry:2
-        resources:
-          # keep request = limit to keep this container in guaranteed class
-          limits:
-            cpu: 100m
-            memory: 100Mi
-          requests:
-            cpu: 100m
-            memory: 100Mi
-        env:
-        - name: REGISTRY_HTTP_ADDR
-          value: :5000
-        - name: REGISTRY_STORAGE
-          value: gcs
-        - name: REGISTRY_STORAGE_GCS_BUCKET
-          value: kube-registry
-        - name: REGISTRY_STORAGE_GCS_KEYFILE
-          value: /gcs/keyfile
-        ports:
-        - containerPort: 5000
-          name: registry
-          protocol: TCP
-        volumeMounts:
-        - name: gcs-key
-          mountPath: /gcs
-      volumes:
-      - name: gcs-key
-        secret:
-          secretName: gcs-key-secret
diff --git a/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c224337af230a76cfe2fc2cf1fa7ff64a5bd6d62
--- /dev/null
+++ b/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2
@@ -0,0 +1,7 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: {{ registry_namespace }}
+  labels:
+    name: {{ registry_namespace }}
diff --git a/roles/kubernetes-apps/registry/templates/registry-ds.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2
similarity index 56%
rename from roles/kubernetes-apps/registry/templates/registry-ds.yml.j2
rename to roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2
index 4d6a7800bd5b91eb71fd5e2429adc4d35c926922..84bf1cf5a98f59ae9a8abe9e0d7ff493b5b47a2c 100644
--- a/roles/kubernetes-apps/registry/templates/registry-ds.yml.j2
+++ b/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2
@@ -1,28 +1,33 @@
 ---
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1
 kind: DaemonSet
 metadata:
-  name: kube-registry-proxy
-  namespace: {{ system_namespace }}
+  name: registry-proxy
+  namespace: {{ registry_namespace }}
   labels:
-    k8s-app: kube-registry-proxy
+    k8s-app: registry-proxy
     kubernetes.io/cluster-service: "true"
     version: v{{ registry_proxy_image_tag }}
 spec:
+  selector:
+    matchLabels:
+      k8s-app: registry-proxy
+      version: v{{ registry_proxy_image_tag }}
   template:
     metadata:
       labels:
-        k8s-app: kube-registry-proxy
-        kubernetes.io/name: "kube-registry-proxy"
+        k8s-app: registry-proxy
+        kubernetes.io/name: "registry-proxy"
         kubernetes.io/cluster-service: "true"
         version: v{{ registry_proxy_image_tag }}
     spec:
       containers:
-        - name: kube-registry-proxy
+        - name: registry-proxy
           image: {{ registry_proxy_image_repo }}:{{ registry_proxy_image_tag }}
+          imagePullPolicy: {{ k8s_image_pull_policy }}
           env:
             - name: REGISTRY_HOST
-              value: kube-registry.kube-system.svc.cluster.local
+              value: registry.{{ registry_namespace }}.svc.{{ cluster_name }}
             - name: REGISTRY_PORT
               value: "5000"
           ports:
diff --git a/roles/kubernetes-apps/registry/templates/registry-pv.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-pv.yml.j2
deleted file mode 100644
index 196efa928dafa7163310c248a1459878b36cc912..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/templates/registry-pv.yml.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-kind: PersistentVolume
-apiVersion: v1
-metadata:
-  name: kube-system-kube-registry-pv
-  labels:
-    kubernetes.io/cluster-service: "true"
-    addonmanager.kubernetes.io/mode: Reconcile
-spec:
-{% if pillar.get('cluster_registry_disk_type', '') == 'gce' %}
-  capacity:
-    storage: {{ pillar['cluster_registry_disk_size'] }}
-  accessModes:
-    - ReadWriteOnce
-  gcePersistentDisk:
-    pdName: "{{ pillar['cluster_registry_disk_name'] }}"
-    fsType: "ext4"
-{% endif %}
diff --git a/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2
index 35c787177137083a4f009cc5f786b3e4cbb062e2..0db26db96c9303f835b0b56e34dda552d2ab2566 100644
--- a/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2
+++ b/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2
@@ -1,14 +1,16 @@
-kind: PersistentVolumeClaim
+---
 apiVersion: v1
+kind: PersistentVolumeClaim
 metadata:
-  name: kube-registry-pvc
-  namespace: kube-system
+  name: registry-pvc
+  namespace: {{ registry_namespace }}
   labels:
     kubernetes.io/cluster-service: "true"
     addonmanager.kubernetes.io/mode: Reconcile
 spec:
   accessModes:
     - ReadWriteOnce
+  storageClassName: {{ registry_storage_class }}
   resources:
     requests:
-      storage: {{ pillar['cluster_registry_disk_size'] }}
+      storage: {{ registry_disk_size }}
diff --git a/roles/kubernetes-apps/registry/templates/registry-rc.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2
similarity index 62%
rename from roles/kubernetes-apps/registry/templates/registry-rc.yml.j2
rename to roles/kubernetes-apps/registry/templates/registry-rs.yml.j2
index 90c01c4aa374e38615f2505d7891305d34bfbaed..730ce272bf235b5a6111b2cd83d71489bf02f978 100644
--- a/roles/kubernetes-apps/registry/templates/registry-rc.yml.j2
+++ b/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2
@@ -1,41 +1,48 @@
 ---
-apiVersion: v1
-kind: ReplicationController
+apiVersion: apps/v1
+kind: ReplicaSet
 metadata:
-  name: kube-registry-v{{ registry_image_tag }}
-  namespace: {{ system_namespace }}
+  name: registry-v{{ registry_image_tag }}
+  namespace: {{ registry_namespace }}
   labels:
-    k8s-app: kube-registry-upstream
+    k8s-app: registry
     version: v{{ registry_image_tag }}
     kubernetes.io/cluster-service: "true"
     addonmanager.kubernetes.io/mode: Reconcile
 spec:
   replicas: 1
   selector:
-    k8s-app: kube-registry-upstream
-    version: v{{ registry_image_tag }}
+    matchLabels:
+      k8s-app: registry
+      version: v{{ registry_image_tag }}
   template:
     metadata:
       labels:
-        k8s-app: kube-registry-upstream
+        k8s-app: registry
         version: v{{ registry_image_tag }}
         kubernetes.io/cluster-service: "true"
     spec:
       containers:
         - name: registry
           image: {{ registry_image_repo }}:{{ registry_image_tag }}
+          imagePullPolicy: {{ k8s_image_pull_policy }}
           env:
             - name: REGISTRY_HTTP_ADDR
               value: :5000
             - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
               value: /var/lib/registry
           volumeMounts:
-            - name: image-store
+            - name: registry-pvc
               mountPath: /var/lib/registry
           ports:
             - containerPort: 5000
               name: registry
               protocol: TCP
       volumes:
-        - name: image-store
+        - name: registry-pvc
+{% if registry_storage_class != none %}
+          persistentVolumeClaim:
+            claimName: registry-pvc
+{% else %}
           emptyDir: {}
+{% endif %}
diff --git a/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2
index 5669624690b84eee19218b68380284816b061c70..58d101d298d3510b249be4efc48bfaf9068e9596 100644
--- a/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2
+++ b/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2
@@ -2,17 +2,17 @@
 apiVersion: v1
 kind: Service
 metadata:
-  name: kube-registry
-  namespace: {{ system_namespace }}
+  name: registry
+  namespace: {{ registry_namespace }}
   labels:
-    k8s-app: kube-registry-upstream
+    k8s-app: registry
     kubernetes.io/cluster-service: "true"
     addonmanager.kubernetes.io/mode: Reconcile
     kubernetes.io/name: "KubeRegistry"
 spec:
   selector:
-    k8s-app: kube-registry-upstream
+    k8s-app: registry
   ports:
-  - name: registry
-    port: 5000
-    protocol: TCP
+    - name: registry
+      port: 5000
+      protocol: TCP
diff --git a/roles/kubernetes-apps/registry/templates/tls/README.md b/roles/kubernetes-apps/registry/templates/tls/README.md
deleted file mode 100644
index 7ba5cc628b3339d557511917c27d867e2ae95298..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/templates/tls/README.md
+++ /dev/null
@@ -1,116 +0,0 @@
-# Enable TLS for Kube-Registry 
-
-This document describes how to enable TLS for kube-registry. Before you start, please check if you have all the prerequisite:
-
-- A domain for kube-registry. Assuming it is ` myregistrydomain.com`.
-- Domain certificate and key. Assuming they are `domain.crt` and `domain.key`
-
-### Pack domain.crt and domain.key into a Secret 
-
-```console
-$ kubectl --namespace=kube-system create secret generic registry-tls-secret --from-file=domain.crt=domain.crt --from-file=domain.key=domain.key
-```
-
-### Run Registry
-
-Please be noted that this sample rc is using emptyDir as storage backend for simplicity. 
-
-<!-- BEGIN MUNGE: EXAMPLE registry-tls-rc.yaml -->
-```yaml
-apiVersion: v1
-kind: ReplicationController
-metadata:
-  name: kube-registry-v0
-  namespace: kube-system
-  labels:
-    k8s-app: kube-registry
-    version: v0
-#    kubernetes.io/cluster-service: "true"
-spec:
-  replicas: 1
-  selector:
-    k8s-app: kube-registry
-    version: v0
-  template:
-    metadata:
-      labels:
-        k8s-app: kube-registry
-        version: v0
-#        kubernetes.io/cluster-service: "true"
-    spec:
-      containers:
-      - name: registry
-        image: registry:2
-        resources:
-          # keep request = limit to keep this container in guaranteed class
-          limits:
-            cpu: 100m
-            memory: 100Mi
-          requests:
-            cpu: 100m
-            memory: 100Mi
-        env:
-        - name: REGISTRY_HTTP_ADDR
-          value: :5000
-        - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
-          value: /var/lib/registry
-        - name: REGISTRY_HTTP_TLS_CERTIFICATE
-          value: /certs/domain.crt
-        - name: REGISTRY_HTTP_TLS_KEY
-          value: /certs/domain.key
-        volumeMounts:
-        - name: image-store
-          mountPath: /var/lib/registry
-        - name: cert-dir
-          mountPath: /certs
-        ports:
-        - containerPort: 5000
-          name: registry
-          protocol: TCP
-      volumes:
-      - name: image-store
-        emptyDir: {}
-      - name: cert-dir
-        secret:
-          secretName: registry-tls-secret
-```
-<!-- END MUNGE: EXAMPLE registry-tls-rc.yaml -->
-
-### Expose External IP for Kube-Registry
-
-Modify the default kube-registry service to `LoadBalancer` type and point the DNS record of `myregistrydomain.com` to the service external ip. 
-
-<!-- BEGIN MUNGE: EXAMPLE registry-tls-svc.yaml -->
-```yaml
-apiVersion: v1
-kind: Service
-metadata:
-  name: kube-registry
-  namespace: kube-system
-  labels:
-    k8s-app: kube-registry
-#    kubernetes.io/cluster-service: "true"
-    kubernetes.io/name: "KubeRegistry"
-spec:
-  selector:
-    k8s-app: kube-registry
-  type: LoadBalancer
-  ports:
-  - name: registry
-    port: 5000
-    protocol: TCP
-```
-<!-- END MUNGE: EXAMPLE registry-tls-svc.yaml -->
-
-### To Verify 
-
-Now you should be able to access your kube-registry from another docker host. 
-```console
-docker pull busybox
-docker tag busybox myregistrydomain.com:5000/busybox
-docker push myregistrydomain.com:5000/busybox
-docker pull myregistrydomain.com:5000/busybox
-```
-
-
-[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/tls/README.md?pixel)]()
diff --git a/roles/kubernetes-apps/registry/templates/tls/registry-tls-rc.yml.j2 b/roles/kubernetes-apps/registry/templates/tls/registry-tls-rc.yml.j2
deleted file mode 100644
index c2411c05246cbcdc0d3bc864b96411fce9c8b891..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/templates/tls/registry-tls-rc.yml.j2
+++ /dev/null
@@ -1,57 +0,0 @@
-apiVersion: v1
-kind: ReplicationController
-metadata:
-  name: kube-registry-v0
-  namespace: kube-system
-  labels:
-    k8s-app: kube-registry
-    version: v0
-#    kubernetes.io/cluster-service: "true"
-spec:
-  replicas: 1
-  selector:
-    k8s-app: kube-registry
-    version: v0
-  template:
-    metadata:
-      labels:
-        k8s-app: kube-registry
-        version: v0
-#        kubernetes.io/cluster-service: "true"
-    spec:
-      containers:
-      - name: registry
-        image: registry:2
-        resources:
-          # keep request = limit to keep this container in guaranteed class
-          limits:
-            cpu: 100m
-            memory: 100Mi
-          requests:
-            cpu: 100m
-            memory: 100Mi
-        env:
-        - name: REGISTRY_HTTP_ADDR
-          value: :5000
-        - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
-          value: /var/lib/registry
-        - name: REGISTRY_HTTP_TLS_CERTIFICATE
-          value: /certs/domain.crt
-        - name: REGISTRY_HTTP_TLS_KEY
-          value: /certs/domain.key
-        volumeMounts:
-        - name: image-store
-          mountPath: /var/lib/registry
-        - name: cert-dir
-          mountPath: /certs
-        ports:
-        - containerPort: 5000
-          name: registry
-          protocol: TCP
-      volumes:
-      - name: image-store
-        emptyDir: {}
-      - name: cert-dir
-        secret:
-          secretName: registry-tls-secret
-
diff --git a/roles/kubernetes-apps/registry/templates/tls/registry-tls-svc.yml.j2 b/roles/kubernetes-apps/registry/templates/tls/registry-tls-svc.yml.j2
deleted file mode 100644
index a9d59f117d4429f266e65853c039188c220b1249..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/registry/templates/tls/registry-tls-svc.yml.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  name: kube-registry
-  namespace: kube-system
-  labels:
-    k8s-app: kube-registry
-#    kubernetes.io/cluster-service: "true"
-    kubernetes.io/name: "KubeRegistry"
-spec:
-  selector:
-    k8s-app: kube-registry
-  type: LoadBalancer
-  ports:
-  - name: registry
-    port: 5000
-    protocol: TCP
diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
index 52101ae16c1284fd5f2882484045c4bce945eddf..2589b3610f30f067286fd129b796b30690c48544 100644
--- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
+++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
@@ -34,7 +34,7 @@
     {{ bin_dir }}/kubectl get secrets --all-namespaces
     -o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
     | grep kubernetes.io/service-account-token
-    | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller'
+    | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller|local-volume-provisioner'
   register: tokens_to_delete
   when: needs_rotation
 
@@ -44,5 +44,5 @@
   when: needs_rotation
 
 - name: Rotate Tokens | Delete pods in system namespace
-  command: "{{ bin_dir }}/kubectl delete pods -n {{ system_namespace }} --all"
+  command: "{{ bin_dir }}/kubectl delete pods -n kube-system --all"
   when: needs_rotation
diff --git a/roles/kubernetes/client/defaults/main.yml b/roles/kubernetes/client/defaults/main.yml
index 5864e991f524009b9d19fc2dd454119c20655ef6..32870df016a6ac1baf2066950ffca852f8bf4566 100644
--- a/roles/kubernetes/client/defaults/main.yml
+++ b/roles/kubernetes/client/defaults/main.yml
@@ -1,7 +1,7 @@
 ---
 kubeconfig_localhost: false
 kubectl_localhost: false
-artifacts_dir: "./artifacts"
+artifacts_dir: "{{ inventory_dir }}/artifacts"
 
 kube_config_dir: "/etc/kubernetes"
 kube_apiserver_port: "6443"
diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml
index e20a71eb833389937dd0e744b5455a11b4e8d497..d34131a3a35643b155f1ca52c445be28b6aa2732 100644
--- a/roles/kubernetes/client/tasks/main.yml
+++ b/roles/kubernetes/client/tasks/main.yml
@@ -1,15 +1,11 @@
 ---
-- name: Set first kube master
-  set_fact:
-    first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
-
 - name: Set external kube-apiserver endpoint
   set_fact:
     external_apiserver_endpoint: >-
       {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
       https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
       {%- else -%}
-      https://{{ first_kube_master }}:{{ kube_apiserver_port }}
+      https://{{ kube_apiserver_address }}:{{ kube_apiserver_port }}
       {%- endif -%}
   tags:
     - facts
@@ -65,3 +61,15 @@
   become: no
   run_once: yes
   when: kubectl_localhost|default(false)
+
+- name: create helper script kubectl.sh on ansible host
+  copy:
+    content: |
+      #!/bin/bash
+      kubectl --kubeconfig=admin.conf $@
+    dest: "{{ artifacts_dir }}/kubectl.sh"
+    mode: 0755
+  become: no
+  run_once: yes
+  delegate_to: localhost
+  when: kubectl_localhost|default(false) and kubeconfig_localhost|default(false)
diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml
index 64a71fc22c912c72d293c712bf2c082d51674ff1..6325bb31cedcdc7b4ddb194d3f72893426c822fe 100644
--- a/roles/kubernetes/master/defaults/main.yml
+++ b/roles/kubernetes/master/defaults/main.yml
@@ -78,6 +78,9 @@ kube_oidc_auth: false
 ## Variables for custom flags
 apiserver_custom_flags: []
 
+# List of the preferred NodeAddressTypes to use for kubelet connections.
+kubelet_preferred_address_types: 'InternalDNS,InternalIP,Hostname,ExternalDNS,ExternalIP'
+
 controller_mgr_custom_flags: []
 
 scheduler_custom_flags: []
@@ -89,3 +92,9 @@ kube_kubeadm_scheduler_extra_args: {}
 
 ## Variable for influencing kube-scheduler behaviour
 volume_cross_zone_attachment: false
+
+## Encrypting Secret Data at Rest
+kube_encrypt_secret_data: false
+kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}"
+# Must be either: aescbc, secretbox or aesgcm
+kube_encryption_algorithm: "aescbc"
diff --git a/roles/kubernetes/master/tasks/encrypt-at-rest.yml b/roles/kubernetes/master/tasks/encrypt-at-rest.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2e569b08bb63ca753f2c11ea232df494ba52ccc1
--- /dev/null
+++ b/roles/kubernetes/master/tasks/encrypt-at-rest.yml
@@ -0,0 +1,10 @@
+---
+- name: Write secrets for encrypting secret data at rest
+  template:
+    src: secrets_encryption.yaml.j2
+    dest: "{{ kube_config_dir }}/ssl/secrets_encryption.yaml"
+    owner: root
+    group: "{{ kube_cert_group }}"
+    mode: 0640
+  tags:
+    - kube-apiserver
diff --git a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
index a9f9383185e29461f26e5b3a049a5570d4e45881..58eaaa66f7bf469b52aa873d96a7e815ea1e8ce6 100644
--- a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
@@ -9,4 +9,6 @@
     - {src: apiserver-key.pem, dest: apiserver.key}
     - {src: ca.pem, dest: ca.crt}
     - {src: ca-key.pem, dest: ca.key}
+    - {src: service-account-key.pem, dest: sa.pub}
+    - {src: service-account-key.pem, dest: sa.key}
   register: kubeadm_copy_old_certs
diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml
index 04ad307fd536260bf0064c2c171fbb98cc6f2b67..daa10fd796ecd521f843d706791d879b547e3b53 100644
--- a/roles/kubernetes/master/tasks/main.yml
+++ b/roles/kubernetes/master/tasks/main.yml
@@ -12,6 +12,9 @@
 - import_tasks: users-file.yml
   when: kube_basic_auth|default(true)
 
+- import_tasks: encrypt-at-rest.yml
+  when: kube_encrypt_secret_data
+
 - name: Compare host kubectl with hyperkube container
   command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/cmp /hyperkube /systembindir/kubectl"
   register: kubectl_task_compare_result
diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml
index 3a9fe64174ab48d93792960c63a40df1d5409561..56e57b015d98eb1270d536c20d430889bf21f15d 100644
--- a/roles/kubernetes/master/tasks/pre-upgrade.yml
+++ b/roles/kubernetes/master/tasks/pre-upgrade.yml
@@ -30,4 +30,7 @@
   with_items:
     - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
   when: kube_apiserver_manifest_replaced.changed
-  run_once: true
+  register: remove_master_container
+  retries: 4
+  until: remove_master_container.rc == 0
+  delay: 5
\ No newline at end of file
diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2
index ed1cc7add3669d3279aaed7972776a56223bf716..0eccb4918545bbc21c2f07b9a8e82f1c6052264b 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2
@@ -36,7 +36,11 @@ apiServerExtraArgs:
   insecure-port: "{{ kube_apiserver_insecure_port }}"
   admission-control: {{ kube_apiserver_admission_control | join(',') }}
   apiserver-count: "{{ kube_apiserver_count }}"
+{% if kube_version | version_compare('v1.9', '>=') %}
+  endpoint-reconciler-type: lease
+{% endif %}
   service-node-port-range: {{ kube_apiserver_node_port_range }}
+  kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
 {% if kube_basic_auth|default(true) %}
   basic-auth-file: {{ kube_users_dir }}/known_users.csv
 {% endif %}
@@ -52,6 +56,9 @@ apiServerExtraArgs:
 {%   if kube_oidc_groups_claim is defined %}
   oidc-groups-claim: {{ kube_oidc_groups_claim }}
 {%   endif %}
+{% endif %}
+{% if kube_encrypt_secret_data %}
+  experimental-encryption-provider-config: {{ kube_config_dir }}/ssl/secrets_encryption.yaml
 {% endif %}
   storage-backend: {{ kube_apiserver_storage_backend }}
 {% if kube_api_runtime_config is defined %}
@@ -59,7 +66,7 @@ apiServerExtraArgs:
 {% endif %}
   allow-privileged: "true"
 {% for key in kube_kubeadm_apiserver_extra_args %}
-  {{ key }}: {{ kube_kubeadm_apiserver_extra_args[key] }}
+  {{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}"
 {% endfor %}
 controllerManagerExtraArgs:
   node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
@@ -69,12 +76,12 @@ controllerManagerExtraArgs:
   feature-gates: {{ kube_feature_gates|join(',') }}
 {% endif %}
 {% for key in kube_kubeadm_controller_extra_args %}
-  {{ key }}: {{ kube_kubeadm_controller_extra_args[key] }}
+  {{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
 {% endfor %}
 {% if kube_kubeadm_scheduler_extra_args|length > 0 %}
 schedulerExtraArgs:
 {% for key in kube_kubeadm_scheduler_extra_args %}
-  {{ key }}: {{ kube_kubeadm_scheduler_extra_args[key] }}
+  {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
 {% endfor %}
 {% endif %}
 apiServerCertSANs:
@@ -83,3 +90,6 @@ apiServerCertSANs:
 {% endfor %}
 certificatesDir: {{ kube_config_dir }}/ssl
 unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}"
+{% if kube_override_hostname|default('') %}
+nodeName: {{ kube_override_hostname }}
+{% endif %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
index a6f76c6d64c7c4098deb42f515d498463109a198..687ca415d3e21337e74d8e0007b0e0ef46777dec 100644
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -2,7 +2,7 @@ apiVersion: v1
 kind: Pod
 metadata:
   name: kube-apiserver
-  namespace: {{system_namespace}}
+  namespace: kube-system
   labels:
     k8s-app: kube-apiserver
     kubespray: v2
@@ -30,6 +30,9 @@ spec:
     - apiserver
     - --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
     - --etcd-servers={{ etcd_access_addresses }}
+{%   if etcd_events_cluster_setup  %}
+    - --etcd-servers-overrides=/events#{{ etcd_events_access_addresses }}
+{% endif %}
 {%   if kube_version | version_compare('v1.9', '<')  %}
     - --etcd-quorum-read=true
 {% endif %}
@@ -39,6 +42,9 @@ spec:
     - --insecure-bind-address={{ kube_apiserver_insecure_bind_address }}
     - --bind-address={{ kube_apiserver_bind_address }}
     - --apiserver-count={{ kube_apiserver_count }}
+{% if kube_version | version_compare('v1.9', '>=') %}
+    - --endpoint-reconciler-type=lease
+{% endif %}
     - --admission-control={{ kube_apiserver_admission_control | join(',') }}
     - --service-cluster-ip-range={{ kube_service_addresses }}
     - --service-node-port-range={{ kube_apiserver_node_port_range }}
@@ -48,17 +54,16 @@ spec:
     - --kubelet-client-certificate={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem
     - --kubelet-client-key={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem
     - --service-account-lookup=true
+    - --kubelet-preferred-address-types={{ kubelet_preferred_address_types }}
 {% if kube_basic_auth|default(true) %}
     - --basic-auth-file={{ kube_users_dir }}/known_users.csv
 {% endif %}
     - --tls-cert-file={{ kube_cert_dir }}/apiserver.pem
     - --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
-    - --proxy-client-cert-file={{ kube_cert_dir }}/apiserver.pem
-    - --proxy-client-key-file={{ kube_cert_dir }}/apiserver-key.pem
 {% if kube_token_auth|default(true) %}
     - --token-auth-file={{ kube_token_dir }}/known_tokens.csv
 {% endif %}
-    - --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem
+    - --service-account-key-file={{ kube_cert_dir }}/service-account-key.pem
 {% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
     - --oidc-issuer-url={{ kube_oidc_url }}
     - --oidc-client-id={{ kube_oidc_client_id }}
@@ -99,6 +104,9 @@ spec:
 {% if authorization_modes %}
     - --authorization-mode={{ authorization_modes|join(',') }}
 {% endif %}
+{% if kube_encrypt_secret_data %}
+    - --experimental-encryption-provider-config={{ kube_config_dir }}/ssl/secrets_encryption.yaml
+{% endif %}
 {% if kube_feature_gates %}
     - --feature-gates={{ kube_feature_gates|join(',') }}
 {% endif %}
@@ -123,7 +131,7 @@ spec:
       httpGet:
         host: 127.0.0.1
         path: /healthz
-{% if kube_apiserver_insecure_port == 0 %}
+{% if kube_apiserver_insecure_port|int == 0 %}
         port: {{ kube_apiserver_port }}
         scheme: HTTPS
 {% else %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
index 2b4282a2e3b3927bf48553826cde1961dd1eb7e8..0123724967e3a4bc62d7e3078df3a6f37a45ebd1 100644
--- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
@@ -2,7 +2,7 @@ apiVersion: v1
 kind: Pod
 metadata:
   name: kube-controller-manager
-  namespace: {{system_namespace}}
+  namespace: kube-system
   labels:
     k8s-app: kube-controller-manager
   annotations:
@@ -29,7 +29,7 @@ spec:
     - controller-manager
     - --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml
     - --leader-elect=true
-    - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
+    - --service-account-private-key-file={{ kube_cert_dir }}/service-account-key.pem
     - --root-ca-file={{ kube_cert_dir }}/ca.pem
     - --cluster-signing-cert-file={{ kube_cert_dir }}/ca.pem
     - --cluster-signing-key-file={{ kube_cert_dir }}/ca-key.pem
diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
index b13fc7fa32d259e7747817c38a768d5295f966bf..fee223eecfcfa94f0bfcf60890d2ac0fb4dab39d 100644
--- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
@@ -2,7 +2,7 @@ apiVersion: v1
 kind: Pod
 metadata:
   name: kube-scheduler
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     k8s-app: kube-scheduler
   annotations:
@@ -29,6 +29,7 @@ spec:
     - --leader-elect=true
     - --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml
 {% if volume_cross_zone_attachment %}
+    - --use-legacy-policy-config
     - --policy-config-file={{ kube_config_dir }}/kube-scheduler-policy.yaml
 {% endif %}
     - --profiling=false
diff --git a/roles/kubernetes/master/templates/secrets_encryption.yaml.j2 b/roles/kubernetes/master/templates/secrets_encryption.yaml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..84c6a4ea808e83dd2bc8ee8750d9541dae8ec567
--- /dev/null
+++ b/roles/kubernetes/master/templates/secrets_encryption.yaml.j2
@@ -0,0 +1,11 @@
+kind: EncryptionConfig
+apiVersion: v1
+resources:
+  - resources:
+    - secrets
+    providers:
+    - {{ kube_encryption_algorithm }}:
+        keys:
+        - name: key
+          secret: {{ kube_encrypt_token | b64encode }}
+    - identity: {}
diff --git a/roles/kubernetes/master/vars/main.yml b/roles/kubernetes/master/vars/main.yml
deleted file mode 100644
index a5eba4f2beb8e2ffd29feb1d06b0564fe438dd3f..0000000000000000000000000000000000000000
--- a/roles/kubernetes/master/vars/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-namespace_kubesystem:
-  apiVersion: v1
-  kind: Namespace
-  metadata:
-    name: "{{system_namespace}}"
diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml
index 7603ef6bef918171f0e94ffe5a789686bb31a21e..52ca8d59d5cc941adfa8362a447508df1827a4b2 100644
--- a/roles/kubernetes/node/defaults/main.yml
+++ b/roles/kubernetes/node/defaults/main.yml
@@ -35,12 +35,12 @@ kubelet_disable_shared_pid: true
 ### fail with swap on (default true)
 kubelet_fail_swap_on: true
 
-# Reserve this space for system resources
-kubelet_memory_limit: 256M
-kubelet_cpu_limit: 100m
+# Reserve this space for kube resources
+kube_memory_reserved: 256M
+kube_cpu_reserved: 100m
 # Reservation for master hosts
-kubelet_master_memory_limit: 512M
-kubelet_master_cpu_limit: 200m
+kube_master_memory_reserved: 512M
+kube_master_cpu_reserved: 200m
 
 kubelet_status_update_frequency: 10s
 
@@ -92,3 +92,46 @@ kube_cadvisor_port: 0
 
 # The read-only port for the Kubelet to serve on with no authentication/authorization.
 kube_read_only_port: 0
+
+
+# For the openstack integration kubelet will need credentials to access
+# openstack apis like nova and cinder. Per default this values will be
+# read from the environment.
+openstack_auth_url: "{{ lookup('env','OS_AUTH_URL')  }}"
+openstack_username: "{{ lookup('env','OS_USERNAME')  }}"
+openstack_password: "{{ lookup('env','OS_PASSWORD')  }}"
+openstack_region: "{{ lookup('env','OS_REGION_NAME')  }}"
+openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true)  }}"
+openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
+openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
+
+# For the vsphere integration, kubelet will need credentials to access
+# vsphere apis
+# Documentation regarding these values can be found
+# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
+vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
+vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
+vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
+vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
+vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
+vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
+vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
+vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
+vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
+
+vsphere_scsi_controller_type: pvscsi
+# vsphere_public_network is name of the network the VMs are joined to
+vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
+
+## When azure is used, you need to also set the following variables.
+## see docs/azure.md for details on how to get these values
+#azure_tenant_id:
+#azure_subscription_id:
+#azure_aad_client_id:
+#azure_aad_client_secret:
+#azure_resource_group:
+#azure_location:
+#azure_subnet_name:
+#azure_security_group_name:
+#azure_vnet_name:
+#azure_route_table_name:
diff --git a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml b/roles/kubernetes/node/tasks/azure-credential-check.yml
similarity index 89%
rename from roles/kubernetes/preinstall/tasks/azure-credential-check.yml
rename to roles/kubernetes/node/tasks/azure-credential-check.yml
index fa2d82fd221d6b8b0009ab0db52b998ff6ad49d3..68cbaa16055eb6fba3997a7c98d89cc6942bd841 100644
--- a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml
+++ b/roles/kubernetes/node/tasks/azure-credential-check.yml
@@ -44,6 +44,11 @@
     msg: "azure_vnet_name is missing"
   when: azure_vnet_name is not defined or azure_vnet_name == ""
 
+- name: check azure_vnet_resource_group value
+  fail:
+    msg: "azure_vnet_resource_group is missing"
+  when: azure_vnet_resource_group is not defined or azure_vnet_resource_group == ""
+
 - name: check azure_route_table_name value
   fail:
     msg: "azure_route_table_name is missing"
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 4d5fa5df541ef98ed4e1bd507b04242f510ad8df..13cc0740ddeb00da0a73664123ed674ef1f1d8ee 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -118,6 +118,19 @@
   tags:
     - kube-proxy
 
+- name: Persist ip_vs modules
+  copy:
+    dest: /etc/modules-load.d/kube_proxy-ipvs.conf
+    content: |
+      ip_vs
+      ip_vs_rr
+      ip_vs_wrr
+      ip_vs_sh
+      nf_conntrack_ipv4
+  when: kube_proxy_mode == 'ipvs'
+  tags:
+    - kube-proxy
+
 - name: Write proxy manifest
   template:
     src: manifests/kube-proxy.manifest.j2
@@ -134,6 +147,27 @@
   tags:
     - kube-proxy
 
+- include_tasks: "{{ cloud_provider }}-credential-check.yml"
+  when:
+    - cloud_provider is defined
+    - cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
+  tags:
+    - cloud-provider
+    - facts
+
+- name: Write cloud-config
+  template:
+    src: "{{ cloud_provider }}-cloud-config.j2"
+    dest: "{{ kube_config_dir }}/cloud_config"
+    group: "{{ kube_cert_group }}"
+    mode: 0640
+  when:
+    - cloud_provider is defined
+    - cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
+  notify: restart kubelet
+  tags:
+    - cloud-provider
+
 # reload-systemd
 - meta: flush_handlers
 
diff --git a/roles/kubernetes/preinstall/tasks/openstack-credential-check.yml b/roles/kubernetes/node/tasks/openstack-credential-check.yml
similarity index 100%
rename from roles/kubernetes/preinstall/tasks/openstack-credential-check.yml
rename to roles/kubernetes/node/tasks/openstack-credential-check.yml
diff --git a/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml b/roles/kubernetes/node/tasks/vsphere-credential-check.yml
similarity index 100%
rename from roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml
rename to roles/kubernetes/node/tasks/vsphere-credential-check.yml
diff --git a/roles/kubernetes/preinstall/templates/azure-cloud-config.j2 b/roles/kubernetes/node/templates/azure-cloud-config.j2
similarity index 88%
rename from roles/kubernetes/preinstall/templates/azure-cloud-config.j2
rename to roles/kubernetes/node/templates/azure-cloud-config.j2
index 139a06cc1f47b83c11845a475a11bb1c97be1e49..d33c044b23595258c101154b180da63bc549ff8a 100644
--- a/roles/kubernetes/preinstall/templates/azure-cloud-config.j2
+++ b/roles/kubernetes/node/templates/azure-cloud-config.j2
@@ -8,5 +8,6 @@
   "subnetName": "{{ azure_subnet_name }}",
   "securityGroupName": "{{ azure_security_group_name }}",
   "vnetName": "{{ azure_vnet_name }}",
+  "vnetResourceGroup": "{{ azure_vnet_resource_group }}",
   "routeTableName": "{{ azure_route_table_name }}"
-}
\ No newline at end of file
+}
diff --git a/roles/kubernetes/node/templates/kubelet-container.j2 b/roles/kubernetes/node/templates/kubelet-container.j2
index 6549a7044b90c7fba7e4a811c948f9aa6d9fd4da..22671b2c340c2433c7e0423569db65a5ecc29d4d 100644
--- a/roles/kubernetes/node/templates/kubelet-container.j2
+++ b/roles/kubernetes/node/templates/kubelet-container.j2
@@ -5,8 +5,8 @@
   --privileged \
   --name=kubelet \
   --restart=on-failure:5 \
-  --memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \
-  --cpu-shares={{ kubelet_cpu_limit|regex_replace('m', '')  }} \
+  --memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }} \
+  --cpu-shares={{ kube_cpu_reserved|regex_replace('m', '')  }} \
   -v /dev:/dev:rw \
   -v /etc/cni:/etc/cni:ro \
   -v /opt/cni:/opt/cni:ro \
@@ -26,9 +26,6 @@
   -v /var/run:/var/run:rw \
   -v {{kube_config_dir}}:{{kube_config_dir}}:ro \
   -v /etc/os-release:/etc/os-release:ro \
-{% if local_volume_provisioner_enabled == true %}
-  -v {{ local_volume_base_dir }}:{{ local_volume_base_dir }}:shared \
-{% endif %}
   {{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
   ./hyperkube kubelet \
   "$@"
diff --git a/roles/kubernetes/node/templates/kubelet.docker.service.j2 b/roles/kubernetes/node/templates/kubelet.docker.service.j2
index fdbdb89692c9de944a9cb51e33fd95f96473c334..bba1a5fc4b5824c042dabd32ec84ccab447423d5 100644
--- a/roles/kubernetes/node/templates/kubelet.docker.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.docker.service.j2
@@ -5,6 +5,7 @@ After=docker.service
 Wants=docker.socket
 
 [Service]
+User=root
 EnvironmentFile={{kube_config_dir}}/kubelet.env
 ExecStart={{ bin_dir }}/kubelet \
 		$KUBE_LOGTOSTDERR \
diff --git a/roles/kubernetes/node/templates/kubelet.host.service.j2 b/roles/kubernetes/node/templates/kubelet.host.service.j2
index 78ba51f70f7db350f9162cc72a59fda8540415a3..c7dad4e290c99fa6fd55d301099168c3ce6a0329 100644
--- a/roles/kubernetes/node/templates/kubelet.host.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.host.service.j2
@@ -5,6 +5,7 @@ After=docker.service
 Wants=docker.socket
 
 [Service]
+User=root
 EnvironmentFile=-{{kube_config_dir}}/kubelet.env
 {% if kubelet_flexvolumes_plugins_dir is defined %}
 ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
index e49b15f489a5cc13308f8df6793a5b3d321ce98f..acc7411e30650274f9a43d2772b1758434ddd174 100644
--- a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
+++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
@@ -1,4 +1,4 @@
-### Upstream source https://github.com/kubernetes/release/blob/master/debian/xenial/kubeadm/channel/stable/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+### Upstream source https://github.com/kubernetes/release/blob/master/debian/xenial/kubeadm/channel/stable/etc/systemd/system/kubelet.service.d/
 ### All upstream values should be present in this file
 
 # logging to stderr means we get it in the systemd journal
@@ -20,13 +20,17 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 {% if kube_version | version_compare('v1.8', '<') %}
 --require-kubeconfig \
 {% endif %}
+{% if kubelet_authentication_token_webhook %}
+--authentication-token-webhook \
+{% endif %}
+{% if kubelet_authorization_mode_webhook %}
 --authorization-mode=Webhook \
+{% endif %}
 --client-ca-file={{ kube_cert_dir }}/ca.crt \
 --pod-manifest-path={{ kube_manifest_dir }} \
 --cadvisor-port={{ kube_cadvisor_port }} \
 {# end kubeadm specific settings #}
 --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
---kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \
 --node-status-update-frequency={{ kubelet_status_update_frequency }} \
 --cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
 --docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
@@ -41,14 +45,16 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 
 {# Node reserved CPU/memory #}
 {% if is_kube_master|bool %}
-{% set kubelet_reserve %}--kube-reserved cpu={{ kubelet_master_cpu_limit }},memory={{ kubelet_master_memory_limit|regex_replace('Mi', 'M') }}{% endset %}
+{% set kube_reserved %}--kube-reserved cpu={{ kube_master_cpu_reserved }},memory={{ kube_master_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
 {% else %}
-{% set kubelet_reserve %}--kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }}{% endset %}
+{% set kube_reserved %}--kube-reserved cpu={{ kube_cpu_reserved }},memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
 {% endif %}
 
 {# DNS settings for kubelet #}
-{% if dns_mode == 'kubedns' %}
+{% if dns_mode in ['kubedns', 'coredns'] %}
 {% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
+{% elif dns_mode == 'coredns_dual' %}
+{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }},{{ skydns_server_secondary }}{% endset %}
 {% elif dns_mode == 'dnsmasq_kubedns' %}
 {% set kubelet_args_cluster_dns %}--cluster-dns={{ dnsmasq_dns_server }}{% endset %}
 {% elif dns_mode == 'manual' %}
@@ -59,7 +65,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 {% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
 
 
-KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_reserve }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
+KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kube_reserved }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
 {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
 KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
 {% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
index e1406e7e1b27dd0ca1a3d673176af7d0fb8976b0..4286d94708a246f5b917e0d425b6fb2083e060bc 100644
--- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
@@ -4,6 +4,7 @@ Documentation=https://github.com/GoogleCloudPlatform/kubernetes
 Wants=network.target
 
 [Service]
+User=root
 Restart=on-failure
 RestartSec=10s
 TimeoutStartSec=0
@@ -36,9 +37,6 @@ ExecStart=/usr/bin/rkt run \
         --volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \
         --volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \
         --volume var-log,kind=host,source=/var/log \
-{% if local_volume_provisioner_enabled == true %}
-        --volume local-volume-base-dir,kind=host,source={{ local_volume_base_dir }},readOnly=false,recursive=true \
-{% endif %}
 {% if kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium"] %}
         --volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
         --volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
@@ -67,9 +65,6 @@ ExecStart=/usr/bin/rkt run \
         --mount volume=var-lib-kubelet,target=/var/lib/kubelet \
         --mount volume=var-log,target=/var/log \
         --mount volume=hosts,target=/etc/hosts \
-{% if local_volume_provisioner_enabled == true %}
-        --mount volume=local-volume-base-dir,target={{ local_volume_base_dir }} \
-{% endif %}
 {% if kubelet_flexvolumes_plugins_dir is defined %}
         --mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \
 {% endif %}
diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2
index cb7d83d35da8f6ab8d8acf06427b89f8ca1c5848..5fef2476e0e33525b403574cc67716d98ad89ec7 100644
--- a/roles/kubernetes/node/templates/kubelet.standard.env.j2
+++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2
@@ -33,11 +33,19 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 {% else %}
 --fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
 {% endif %}
+{% if kubelet_authentication_token_webhook %}
+--authentication-token-webhook \
+{% endif %}
+{% if kubelet_authorization_mode_webhook %}
+--authorization-mode=Webhook \
+{% endif %}
 --enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} {% endif %}{% endset %}
 
 {# DNS settings for kubelet #}
-{% if dns_mode == 'kubedns' %}
+{% if dns_mode in ['kubedns', 'coredns'] %}
 {% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
+{% elif dns_mode == 'coredns_dual' %}
+{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }},{{ skydns_server_secondary }}{% endset %}
 {% elif dns_mode == 'dnsmasq_kubedns' %}
 {% set kubelet_args_cluster_dns %}--cluster-dns={{ dnsmasq_dns_server }}{% endset %}
 {% elif dns_mode == 'manual' %}
@@ -67,22 +75,33 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 
 {# Node reserved CPU/memory #}
 {% if is_kube_master|bool %}
-{% set kubelet_reserve %}--kube-reserved cpu={{ kubelet_master_cpu_limit }},memory={{ kubelet_master_memory_limit|regex_replace('Mi', 'M') }}{% endset %}
+{% set kube_reserved %}--kube-reserved cpu={{ kube_master_cpu_reserved }},memory={{ kube_master_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
 {% else %}
-{% set kubelet_reserve %}--kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }}{% endset %}
+{% set kube_reserved %}--kube-reserved cpu={{ kube_cpu_reserved }},memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
 {% endif %}
 
 {# Kubelet node labels #}
+{% set role_node_labels = [] %}
 {% if inventory_hostname in groups['kube-master'] %}
-{%   set node_labels %}--node-labels=node-role.kubernetes.io/master=true{% endset %}
+{%   do role_node_labels.append('node-role.kubernetes.io/master=true') %}
 {%   if not standalone_kubelet|bool %}
-{%     set node_labels %}{{ node_labels }},node-role.kubernetes.io/node=true{% endset %}
+{%     do role_node_labels.append('node-role.kubernetes.io/node=true') %}
 {%   endif %}
 {% else %}
-{%   set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %}
+{%   do role_node_labels.append('node-role.kubernetes.io/node=true') %}
+{% endif %}
+{% if inventory_hostname in groups['kube-ingress']|default([]) %}
+{%   do role_node_labels.append('node-role.kubernetes.io/ingress=true') %}
+{% endif %}
+{% set inventory_node_labels = [] %}
+{% if node_labels is defined %}
+{% for labelname, labelvalue in node_labels.iteritems() %}
+{% do inventory_node_labels.append(labelname + '=' + labelvalue) %}
+{% endfor %}
 {% endif %}
+{% set all_node_labels = role_node_labels + inventory_node_labels %}
 
-KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kubelet_reserve }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
+KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
 {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
 KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
 {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
index 7c8e0062d220dc85f654b1e5390920e64203aa82..18e51069f104725167cb14d3ab70779a749b3229 100644
--- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
+++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
@@ -2,7 +2,7 @@ apiVersion: v1
 kind: Pod
 metadata:
   name: kube-proxy
-  namespace: {{system_namespace}}
+  namespace: kube-system
   labels:
     k8s-app: kube-proxy
   annotations:
@@ -48,7 +48,6 @@ spec:
 {% elif kube_proxy_mode == 'ipvs' %}
     - --masquerade-all
     - --feature-gates=SupportIPVSProxyMode=true
-    - --proxy-mode=ipvs
     - --ipvs-min-sync-period=5s
     - --ipvs-sync-period=5s
     - --ipvs-scheduler=rr
diff --git a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
index 2d566cad10ad144eb06d08333260c02d6ef544b6..a1e9a78156ade8abafc085be0d1ab9967d6aaedb 100644
--- a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
+++ b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
@@ -2,7 +2,7 @@ apiVersion: v1
 kind: Pod
 metadata:
   name: nginx-proxy
-  namespace: {{system_namespace}}
+  namespace: kube-system
   labels:
     k8s-app: kube-nginx
 spec:
diff --git a/roles/kubernetes/preinstall/templates/openstack-cloud-config.j2 b/roles/kubernetes/node/templates/openstack-cloud-config.j2
similarity index 100%
rename from roles/kubernetes/preinstall/templates/openstack-cloud-config.j2
rename to roles/kubernetes/node/templates/openstack-cloud-config.j2
diff --git a/roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2 b/roles/kubernetes/node/templates/vsphere-cloud-config.j2
similarity index 100%
rename from roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2
rename to roles/kubernetes/node/templates/vsphere-cloud-config.j2
diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml
index 295f101789d728fc1ae3ee96963acc13ee7838e4..149cbb42a1d9c6636f492572ce8543ca766ab25a 100644
--- a/roles/kubernetes/preinstall/defaults/main.yml
+++ b/roles/kubernetes/preinstall/defaults/main.yml
@@ -23,35 +23,6 @@ disable_ipv6_dns: false
 kube_cert_group: kube-cert
 kube_config_dir: /etc/kubernetes
 
-# For the openstack integration kubelet will need credentials to access
-# openstack apis like nova and cinder. Per default this values will be
-# read from the environment.
-openstack_auth_url: "{{ lookup('env','OS_AUTH_URL')  }}"
-openstack_username: "{{ lookup('env','OS_USERNAME')  }}"
-openstack_password: "{{ lookup('env','OS_PASSWORD')  }}"
-openstack_region: "{{ lookup('env','OS_REGION_NAME')  }}"
-openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true)  }}"
-openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
-openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
-
-# For the vsphere integration, kubelet will need credentials to access
-# vsphere apis
-# Documentation regarding these values can be found
-# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
-vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
-vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
-vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
-vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
-vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
-vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
-vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
-vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
-vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
-
-vsphere_scsi_controller_type: pvscsi
-# vsphere_public_network is name of the network the VMs are joined to
-vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
-
 # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
 # for hostnet pods and infra needs
 resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
diff --git a/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml b/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml
index 8c0a5f5991a07a1883100b4278dcd1350748bb97..0ab2c9b07dcd6f74156482664de1ff3d78a07a95 100644
--- a/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml
+++ b/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml
@@ -15,7 +15,7 @@
   notify: Preinstall | restart network
   when: dhclientconffile is defined
 
-- name: Configue dhclient hooks for resolv.conf (non-RH)
+- name: Configure dhclient hooks for resolv.conf (non-RH)
   template:
     src: dhclient_dnsupdate.sh.j2
     dest: "{{ dhclienthookfile }}"
@@ -24,7 +24,7 @@
   notify: Preinstall | restart network
   when: ansible_os_family != "RedHat"
 
-- name: Configue dhclient hooks for resolv.conf (RH-only)
+- name: Configure dhclient hooks for resolv.conf (RH-only)
   template:
     src: dhclient_dnsupdate_rh.sh.j2
     dest: "{{ dhclienthookfile }}"
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index 289065c71a7fe9b66c4a2c6a0ddc1819f13d087f..652e35682fc809579947fd86c7e8689de8022f06 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -3,6 +3,11 @@
   tags:
     - asserts
 
+# This is run before bin_dir is pinned because these tasks are run on localhost
+- import_tasks: pre_upgrade.yml
+  tags:
+    - upgrade
+
 - name: Force binaries directory for Container Linux by CoreOS
   set_fact:
     bin_dir: "/opt/bin"
@@ -60,7 +65,6 @@
     - "{{ kube_config_dir }}/ssl"
     - "{{ kube_manifest_dir }}"
     - "{{ kube_script_dir }}"
-    - "{{ local_volume_base_dir }}"
 
 - name: check cloud_provider value
   fail:
@@ -72,14 +76,6 @@
     - cloud-provider
     - facts
 
-- include_tasks: "{{ cloud_provider }}-credential-check.yml"
-  when:
-    - cloud_provider is defined
-    - cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
-  tags:
-    - cloud-provider
-    - facts
-
 - name: Create cni directories
   file:
     path: "{{ item }}"
@@ -257,19 +253,6 @@
   tags:
     - bootstrap-os
 
-- name: Write cloud-config
-  template:
-    src: "{{ cloud_provider }}-cloud-config.j2"
-    dest: "{{ kube_config_dir }}/cloud_config"
-    group: "{{ kube_cert_group }}"
-    mode: 0640
-  when:
-    - inventory_hostname in groups['k8s-cluster']
-    - cloud_provider is defined
-    - cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
-  tags:
-    - cloud-provider
-
 - import_tasks: etchosts.yml
   tags:
     - bootstrap-os
diff --git a/roles/kubernetes/preinstall/tasks/pre_upgrade.yml b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml
new file mode 100644
index 0000000000000000000000000000000000000000..63cbc9be1112745236f412a8fa14f88f3a5d6b4b
--- /dev/null
+++ b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml
@@ -0,0 +1,28 @@
+---
+- name: "Pre-upgrade | check if old credential dir exists"
+  local_action:
+    module: stat
+    path: "{{ inventory_dir }}/../credentials"
+  vars:
+    ansible_python_interpreter: "/usr/bin/env python"
+  register: old_credential_dir
+  become: no
+
+- name: "Pre-upgrade | check if new credential dir exists"
+  local_action:
+    module: stat
+    path: "{{ inventory_dir }}/credentials"
+  vars:
+    ansible_python_interpreter: "/usr/bin/env python"
+  register: new_credential_dir
+  become: no
+  when: old_credential_dir.stat.exists
+
+- name: "Pre-upgrade | move data from old credential dir to new"
+  local_action: command mv {{ inventory_dir }}/../credentials {{ inventory_dir }}/credentials
+  args:
+    creates: "{{ inventory_dir }}/credentials"
+  vars:
+    ansible_python_interpreter: "/usr/bin/env python"
+  become: no
+  when: old_credential_dir.stat.exists and not new_credential_dir.stat.exists
diff --git a/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml b/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml
index fdc46125e97ac7e9fd5f8d09a84f04330bdf39ff..eb8f3f43f5bd49526a4f2eb251f87da9bc0b13cf 100644
--- a/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml
@@ -93,8 +93,10 @@
 - name: pick dnsmasq cluster IP or default resolver
   set_fact:
     dnsmasq_server: |-
-      {%- if dns_mode == 'kubedns' and not dns_early|bool -%}
+      {%- if dns_mode in ['kubedns', 'coredns'] and not dns_early|bool -%}
         {{ [ skydns_server ] + upstream_dns_servers|default([]) }}
+      {%- elif dns_mode == 'coredns_dual' and not dns_early|bool -%}
+        {{ [ skydns_server ] + [ skydns_server_secondary ] + upstream_dns_servers|default([]) }}
       {%- elif dns_mode == 'manual' and not dns_early|bool -%}
         {{ [ manual_dns_server ] + upstream_dns_servers|default([]) }}
       {%- elif dns_early|bool -%}
diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh
index 750e9c4fe79680cf430b423bea57f16313240bb5..1c34fc69dba0b67e46c1686acb6a3e9a52653e99 100755
--- a/roles/kubernetes/secrets/files/make-ssl.sh
+++ b/roles/kubernetes/secrets/files/make-ssl.sh
@@ -69,7 +69,7 @@ if [ -e "$SSLDIR/ca-key.pem" ]; then
     cp $SSLDIR/{ca.pem,ca-key.pem} .
 else
     openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
-    openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
+    openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
 fi
 
 gen_key_and_cert() {
@@ -77,11 +77,22 @@ gen_key_and_cert() {
     local subject=$2
     openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1
     openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1
-    openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
+    openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
 }
 
 # Admins
 if [ -n "$MASTERS" ]; then
+
+    # service-account
+    # If --service-account-private-key-file was previously configured to use apiserver-key.pem then copy that to the new dedicated service-account signing key location to avoid disruptions
+    if [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then
+       cp $SSLDIR/apiserver-key.pem $SSLDIR/service-account-key.pem
+    fi
+    # Generate dedicated service account signing key if one doesn't exist
+    if ! [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then
+        openssl genrsa -out service-account-key.pem 2048 > /dev/null 2>&1
+    fi
+
     # kube-apiserver
     # Generate only if we don't have existing ca and apiserver certs
     if ! [ -e "$SSLDIR/ca-key.pem" ] || ! [ -e "$SSLDIR/apiserver-key.pem" ]; then
diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml
index 6278897710c8814cac0a30ef3d931c1ded673491..07820edf7bacfa8ce850a57c41b810b555ba6a9f 100644
--- a/roles/kubernetes/secrets/tasks/check-certs.yml
+++ b/roles/kubernetes/secrets/tasks/check-certs.yml
@@ -50,6 +50,7 @@
        '{{ kube_cert_dir }}/kube-controller-manager-key.pem',
        '{{ kube_cert_dir }}/front-proxy-client.pem',
        '{{ kube_cert_dir }}/front-proxy-client-key.pem',
+       '{{ kube_cert_dir }}/service-account-key.pem',
        {% for host in groups['kube-master'] %}
        '{{ kube_cert_dir }}/admin-{{ host }}.pem'
        '{{ kube_cert_dir }}/admin-{{ host }}-key.pem'
@@ -71,7 +72,8 @@
       {% for cert in ['apiserver.pem', 'apiserver-key.pem',
                       'kube-scheduler.pem','kube-scheduler-key.pem',
                       'kube-controller-manager.pem','kube-controller-manager-key.pem',
-                      'front-proxy-client.pem','front-proxy-client-key.pem'] -%}
+                      'front-proxy-client.pem','front-proxy-client-key.pem',
+                      'service-account-key.pem'] -%}
         {% set cert_file = "%s/%s.pem"|format(kube_cert_dir, cert) %}
         {% if not cert_file in existing_certs -%}
         {%- set gen = True -%}
@@ -105,9 +107,9 @@
       {%- set certs = {'sync': False} -%}
       {% if gen_node_certs[inventory_hostname] or
         (not kubecert_node.results[0].stat.exists|default(False)) or
-          (not kubecert_node.results[10].stat.exists|default(False)) or
-            (not kubecert_node.results[7].stat.exists|default(False)) or
-              (kubecert_node.results[10].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[10].stat.path)|map(attribute="checksum")|first|default('')) -%}
+          (not kubecert_node.results[12].stat.exists|default(False)) or
+            (not kubecert_node.results[8].stat.exists|default(False)) or
+              (kubecert_node.results[12].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[12].stat.path)|map(attribute="checksum")|first|default('')) -%}
                 {%- set _ = certs.update({'sync': True}) -%}
       {% endif %}
       {{ certs.sync }}
diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
index c1dfeb394a1fe1980fc3d539498c86e1face0c51..c39f606ad43f85ea29d7c4c0d7d016891412d378 100644
--- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml
+++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
@@ -75,6 +75,7 @@
                        'kube-controller-manager-key.pem',
                        'front-proxy-client.pem',
                        'front-proxy-client-key.pem',
+                       'service-account-key.pem',
                        {% for node in groups['kube-master'] %}
                        'admin-{{ node }}.pem',
                        'admin-{{ node }}-key.pem',
@@ -86,6 +87,7 @@
                       'apiserver-key.pem',
                       'front-proxy-client.pem',
                       'front-proxy-client-key.pem',
+                      'service-account-key.pem',
                       'kube-scheduler.pem',
                       'kube-scheduler-key.pem',
                       'kube-controller-manager.pem',
@@ -179,6 +181,7 @@
   file:
     path: "{{ kube_cert_dir }}"
     group: "{{ kube_cert_group }}"
+    state: directory
     owner: kube
     mode: "u=rwX,g-rwx,o-rwx"
     recurse: yes
diff --git a/roles/kubernetes/secrets/tasks/gen_tokens.yml b/roles/kubernetes/secrets/tasks/gen_tokens.yml
index a4cc0f69bfdd9660711415032d351e9d5f9a11dd..df47d157dae885c5c0bc3e88d05ba897ab29e2ce 100644
--- a/roles/kubernetes/secrets/tasks/gen_tokens.yml
+++ b/roles/kubernetes/secrets/tasks/gen_tokens.yml
@@ -55,4 +55,4 @@
 - name: Gen_tokens | Copy tokens on masters
   shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /"
   when: inventory_hostname in groups['kube-master'] and sync_tokens|default(false) and
-        inventory_hostname != groups['kube-master'][0]
+        inventory_hostname != groups['kube-master'][0] and tokens_data.stdout != ''
diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2
index a214739c9a69affdffe5ec68abaa52bf21123764..38902aeef20f25c13dd4d23b3cc7cca5e78c38c8 100644
--- a/roles/kubernetes/secrets/templates/openssl.conf.j2
+++ b/roles/kubernetes/secrets/templates/openssl.conf.j2
@@ -1,4 +1,4 @@
-[req]
+{% set counter = {'dns': 6,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req]
 req_extensions = v3_req
 distinguished_name = req_distinguished_name
 [req_distinguished_name]
@@ -13,30 +13,30 @@ DNS.3 = kubernetes.default.svc
 DNS.4 = kubernetes.default.svc.{{ dns_domain }}
 DNS.5 = localhost
 {% for host in groups['kube-master'] %}
-DNS.{{ 5 + loop.index }} = {{ host }}
+DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }}
 {% endfor %}
-{% if loadbalancer_apiserver is defined  %}
-{% set idx =  groups['kube-master'] | length | int + 5 + 1 %}
-DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
+{% if apiserver_loadbalancer_domain_name is defined  %}
+DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }}
 {% endif %}
 {% for host in groups['kube-master'] %}
-IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
-IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
+{% if hostvars[host]['access_ip'] is defined  %}
+IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
+{% endif %}
+IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }}
 {% endfor %}
-{% set idx =  groups['kube-master'] | length | int * 2 + 1 %}
-IP.{{ idx }} = {{ kube_apiserver_ip }}
-{% if loadbalancer_apiserver is defined  %}
-IP.{{ idx + 1 }} = {{ loadbalancer_apiserver.address }}
-{% set idx = idx + 1 %}
+{% if kube_apiserver_ip is defined  %}
+IP.{{ counter["ip"] }} = {{ kube_apiserver_ip }}{{ increment(counter, 'ip') }}
+{% endif %}
+{% if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined  %}
+IP.{{ counter["ip"] }} = {{ loadbalancer_apiserver.address }}{{ increment(counter, 'ip') }}
 {% endif %}
-IP.{{ idx + 1 }} = 127.0.0.1
 {% if supplementary_addresses_in_ssl_keys is defined %}
-{% set is = idx + 1 %}
 {% for addr in supplementary_addresses_in_ssl_keys %}
 {% if addr | ipaddr %}
-IP.{{ is + loop.index }} = {{ addr }}
+IP.{{ counter["ip"] }} = {{ addr }}{{ increment(counter, 'ip') }}
 {% else %}
-DNS.{{ is + loop.index }} = {{ addr }}
+DNS.{{ counter["dns"] }} = {{ addr }}{{ increment(counter, 'dns') }}
 {% endif %}
 {% endfor %}
 {% endif %}
+IP.{{ counter["ip"] }} = 127.0.0.1
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 6883370ee908f8bfbf15cbb56c4eaf358f08b50d..d6217d654e6011d458fc5b7c29547397672a461a 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -5,15 +5,19 @@ bootstrap_os: none
 
 # Use proxycommand if bastion host is in group all
 # This change obseletes editing ansible.cfg file depending on bastion existance
-ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ ansible_user }}@{{hostvars['bastion']['ansible_host']}} ' {% endif %}"
+ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ ansible_user }}@{{hostvars['bastion']['ansible_host']}} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
 
 kube_api_anonymous_auth: false
 
 # Default value, but will be set to true automatically if detected
 is_atomic: false
 
+
 ## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.9.2
+kube_version: v1.9.5
+
+## Kube Proxy mode One of ['iptables','ipvs']
+kube_proxy_mode: iptables
 
 # Set to true to allow pre-checks to fail and continue deployment
 ignore_assert_errors: false
@@ -45,6 +49,7 @@ resolvconf_mode: docker_dns
 deploy_netchecker: false
 # Ip address of the kubernetes skydns service
 skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
 dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
 dns_domain: "{{ cluster_name }}"
 
@@ -56,7 +61,6 @@ dns_domain: "{{ cluster_name }}"
 kube_config_dir: /etc/kubernetes
 kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
 kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
 
 # This is where all the cert scripts and certs will be located
 kube_cert_dir: "{{ kube_config_dir }}/ssl"
@@ -123,7 +127,7 @@ kube_apiserver_insecure_bind_address: 127.0.0.1
 kube_apiserver_insecure_port: 8080
 
 # Aggregator
-kube_api_aggregator_routing: true
+kube_api_aggregator_routing: false
 
 # Path used to store Docker data
 docker_daemon_graph: "/var/lib/docker"
@@ -148,9 +152,9 @@ helm_deployment_type: host
 # Enable kubeadm deployment (experimental)
 kubeadm_enabled: false
 
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
 kubeconfig_localhost: false
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
 kubectl_localhost: false
 
 # K8s image pull policy (imagePullPolicy)
@@ -166,14 +170,11 @@ helm_enabled: false
 istio_enabled: false
 registry_enabled: false
 enable_network_policy: false
-local_volume_provisioner_enabled: false
+local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') }}"
 persistent_volumes_enabled: false
 cephfs_provisioner_enabled: false
-
-# Base path for local volume provisioner addon
-local_volume_base_dir: /mnt/disks
-local_volume_mount_dir: /local-disks
-local_volume_storage_class: local-storage
+ingress_nginx_enabled: false
+cert_manager_enabled: false
 
 ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
 # openstack_blockstorage_version: "v1/v2/auto (default)"
@@ -197,9 +198,19 @@ openstack_lbaas_monitor_max_retries: "3"
 authorization_modes: ['Node', 'RBAC']
 rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
 
+# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelet’s HTTPS endpoint
+kubelet_authentication_token_webhook: false
+
+# When enabled, access to the kubelet API requires authorization by delegation to the API server
+kubelet_authorization_mode_webhook: false
+
 ## List of key=value pairs that describe feature gates for
 ## the k8s cluster.
-kube_feature_gates: ['Initializers={{ istio_enabled|string }}', 'PersistentLocalVolumes={{ local_volume_provisioner_enabled|string }}']
+kube_feature_gates:
+  - "Initializers={{ istio_enabled | string }}"
+  - "PersistentLocalVolumes={{ local_volume_provisioner_enabled | string }}"
+  - "VolumeScheduling={{ local_volume_provisioner_enabled | string }}"
+  - "MountPropagation={{ local_volume_provisioner_enabled | string }}"
 
 # Vault data dirs.
 vault_base_dir: /etc/vault
@@ -230,6 +241,7 @@ weave_peers: uninitialized
 
 ## Set no_proxy to all assigned cluster IPs and hostnames
 no_proxy: >-
+  {%- if http_proxy is defined or https_proxy is defined %}
   {%- if loadbalancer_apiserver is defined -%}
   {{ apiserver_loadbalancer_domain_name| default('') }},
   {{ loadbalancer_apiserver.address | default('') }},
@@ -243,11 +255,12 @@ no_proxy: >-
   {{ item }},{{ item }}.{{ dns_domain }},
   {%- endfor -%}
   127.0.0.1,localhost
+  {%- endif %}
 
 proxy_env:
   http_proxy: "{{ http_proxy| default ('') }}"
   https_proxy: "{{ https_proxy| default ('') }}"
-  no_proxy: "{{ no_proxy }}"
+  no_proxy: "{{ no_proxy| default ('') }}"
 
 # Vars for pointing to kubernetes api endpoints
 is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
@@ -285,16 +298,25 @@ kube_apiserver_client_key: |-
   {{ kube_cert_dir }}/apiserver-key.pem
   {%- endif %}
 
+# Set to true to deploy etcd-events cluster
+etcd_events_cluster_setup: false
+
 # Vars for pointing to etcd endpoints
 is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
 etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
 etcd_access_address: "{{ access_ip | default(etcd_address) }}"
 etcd_peer_url: "https://{{ etcd_access_address }}:2380"
 etcd_client_url: "https://{{ etcd_access_address }}:2379"
+etcd_events_peer_url: "https://{{ etcd_access_address }}:2382"
+etcd_events_client_url: "https://{{ etcd_access_address }}:2381"
 etcd_access_addresses: |-
   {% for item in groups['etcd'] -%}
     https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}
   {%- endfor %}
+etcd_events_access_addresses: |-
+  {% for item in groups['etcd'] -%}
+    https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2381{% if not loop.last %},{% endif %}
+  {%- endfor %}
 etcd_member_name: |-
   {% for host in groups['etcd'] %}
   {%   if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
@@ -303,3 +325,7 @@ etcd_peer_addresses: |-
   {% for item in groups['etcd'] -%}
     {{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
   {%- endfor %}
+etcd_events_peer_addresses: |-
+  {% for item in groups['etcd'] -%}
+    {{ "etcd"+loop.index|string }}-events=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2382{% if not loop.last %},{% endif %}
+  {%- endfor %}
diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml
index 902d0170780b8e821b1abd90d6dab265bc63124d..857ebd11aac4400a67ecc5110d067933969ff915 100644
--- a/roles/network_plugin/calico/defaults/main.yml
+++ b/roles/network_plugin/calico/defaults/main.yml
@@ -16,9 +16,6 @@ etcd_cert_dir: /etc/ssl/etcd/ssl
 # Global as_num (/calico/bgp/v1/global/as_num)
 global_as_num: "64512"
 
-# Set to true if you need to configure multiple pools (this is not common)
-calico_ignore_extra_pools: false
-
 # You can set MTU value here. If left undefined or empty, it will
 # not be specified in calico CNI config, so Calico will use built-in
 # defaults. The value should be a number, not a string.
@@ -48,3 +45,9 @@ rbac_resources:
   - sa
   - clusterrole
   - clusterrolebinding
+
+# If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of:
+# * can-reach=DESTINATION
+# * interface=INTERFACE-REGEX
+# see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods
+# calico_ip_auto_method: "interface=eth.*"
diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml
index 5b893f38e378e5bf3e9bf924c8ab7b30c035cf27..02cfce152956258272a6ddb0f743d006417f8c5f 100644
--- a/roles/network_plugin/calico/rr/tasks/main.yml
+++ b/roles/network_plugin/calico/rr/tasks/main.yml
@@ -48,7 +48,10 @@
 
 - name: Calico-rr | Configure route reflector
   command: |-
-    {{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} \
+    {{ bin_dir }}/etcdctl \
+    --peers={{ etcd_access_addresses }} \
+    --cert-file {{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}.pem \
+    --key-file {{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem \
     set /calico/bgp/v1/rr_v4/{{ rr_ip }} \
     '{
        "ip": "{{ rr_ip }}",
@@ -57,9 +60,6 @@
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   delegate_to: "{{groups['etcd'][0]}}"
-  environment:
-    ETCDCTL_CERT: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
-    ETCDCTL_KEY: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
 
 - meta: flush_handlers
 
diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml
index f3072d3880e8359458e8b1d795b4b1cedddca5c3..05e7b96111f9d1a0c43c2d2b5e98723bd86f33f8 100644
--- a/roles/network_plugin/calico/tasks/main.yml
+++ b/roles/network_plugin/calico/tasks/main.yml
@@ -81,28 +81,26 @@
 
 - name: Calico | wait for etcd
   uri:
-    url: https://localhost:2379/health
+    url: "{{ etcd_access_addresses.split(',') | first }}/health"
     validate_certs: no
-    client_cert: "{{ etcd_cert_dir }}/node-{{ groups['etcd'][0] }}.pem"
-    client_key: "{{ etcd_cert_dir }}/node-{{ groups['etcd'][0] }}-key.pem"
+    client_cert: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
+    client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
   register: result
   until: result.status == 200 or result.status == 401
   retries: 10
   delay: 5
-  delegate_to: "{{groups['etcd'][0]}}"
   run_once: true
 
 - name: Calico | Check if calico network pool has already been configured
   command: |-
     curl \
       --cacert {{ etcd_cert_dir }}/ca.pem \
-      --cert {{ etcd_cert_dir}}/admin-{{ groups['etcd'][0] }}.pem \
-      --key {{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem \
-      https://localhost:2379/v2/keys/calico/v1/ipam/v4/pool
+      --cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \
+      --key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
+      {{ etcd_access_addresses.split(',') | first }}/v2/keys/calico/v1/ipam/v4/pool
   register: calico_conf
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
-  delegate_to: "{{groups['etcd'][0]}}"
   run_once: true
   changed_when: false
 
@@ -125,27 +123,18 @@
   command: |-
     curl \
       --cacert {{ etcd_cert_dir }}/ca.pem \
-      --cert {{ etcd_cert_dir}}/admin-{{ groups['etcd'][0] }}.pem \
-      --key {{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem \
-      https://localhost:2379/v2/keys/calico/v1/ipam/v4/pool
+      --cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \
+      --key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
+      {{ etcd_access_addresses.split(',') | first }}/v2/keys/calico/v1/ipam/v4/pool
   register: calico_pools_raw
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
-  delegate_to: "{{groups['etcd'][0]}}"
   run_once: true
 
 - set_fact:
     calico_pools: "{{ calico_pools_raw.stdout | from_json }}"
   run_once: true
 
-- name: Calico | Check if calico pool is properly configured
-  fail:
-    msg: 'Only one network pool must be configured and it must be the subnet {{ kube_pods_subnet }}.
-    Please erase calico configuration and run the playbook again ("etcdctl rm --recursive /calico/v1/ipam/v4/pool")'
-  when: ( calico_pools['node']['nodes'] | length > 1 and not calico_ignore_extra_pools ) or
-        ( not calico_pools['node']['nodes'][0]['key'] | search(".*{{ kube_pods_subnet | ipaddr('network') }}.*") )
-  run_once: true
-
 - name: Calico | Set global as_num
   command: "{{ bin_dir}}/calicoctl config set asNumber {{ global_as_num }}"
   run_once: true
diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2
index 92d2f1f0a8192c060048a6506f4b2a79753ca62e..3be65deaa49d35d36f7f7d1c1db396b9bc0642a9 100644
--- a/roles/network_plugin/calico/templates/calico-config.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-config.yml.j2
@@ -2,7 +2,7 @@ kind: ConfigMap
 apiVersion: v1
 metadata:
   name: calico-config
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 data:
   etcd_endpoints: "{{ etcd_access_addresses }}"
   etcd_ca: "/calico-secrets/ca_cert.crt"
diff --git a/roles/network_plugin/calico/templates/calico-cr.yml.j2 b/roles/network_plugin/calico/templates/calico-cr.yml.j2
index 47d6266593b90798a77ce87c7278845a150e83da..cef8331f39dbf6fd2e2d4a5794a63f64bc0d0108 100644
--- a/roles/network_plugin/calico/templates/calico-cr.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-cr.yml.j2
@@ -3,7 +3,7 @@ kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1beta1
 metadata:
   name: calico-node
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 rules:
   - apiGroups: [""]
     resources:
diff --git a/roles/network_plugin/calico/templates/calico-crb.yml.j2 b/roles/network_plugin/calico/templates/calico-crb.yml.j2
index 2e132a0dc527da745256b41fbb8a715890336451..1b4e8fe00972f721daf44cf4c699ae373095ec00 100644
--- a/roles/network_plugin/calico/templates/calico-crb.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-crb.yml.j2
@@ -10,4 +10,4 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: calico-node
-  namespace: {{ system_namespace }}
+  namespace: kube-system
diff --git a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2
index 5cce29793786552350911e8550f160ae0fad302e..68b1c286f9ba7ce445418db05a390251d69ff022 100644
--- a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2
@@ -3,6 +3,6 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: calico-node
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     kubernetes.io/cluster-service: "true"
diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2
index 3a01648f76318de005ae45f0df0c4017ca7e3d40..849ea0afb94094b2ae9d0574d7875ca8c882e81d 100644
--- a/roles/network_plugin/calico/templates/calico-node.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-node.yml.j2
@@ -6,7 +6,7 @@ kind: DaemonSet
 apiVersion: extensions/v1beta1
 metadata:
   name: calico-node
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     k8s-app: calico-node
 spec:
@@ -28,6 +28,9 @@ spec:
       tolerations:
         - effect: NoSchedule
           operator: Exists
+      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
+      # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
+      terminationGracePeriodSeconds: 0
       containers:
         # Runs calico/node container on each Kubernetes node.  This
         # container programs network policy and routes on each
@@ -53,6 +56,11 @@ spec:
                 configMapKeyRef:
                   name: calico-config
                   key: cluster_type
+            # Set noderef for node controller.
+            - name: CALICO_K8S_NODE_REF
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
             # Disable file logging so `kubectl logs` works.
             - name: CALICO_DISABLE_FILE_LOGGING
               value: "true"
@@ -108,10 +116,15 @@ spec:
                 configMapKeyRef:
                   name: calico-config
                   key: etcd_cert
+{% if calico_ip_auto_method is defined %}
+            - name: IP_AUTODETECTION_METHOD
+              value: "{{ calico_ip_auto_method }}"
+{% else %}
             - name: IP
               valueFrom:
                 fieldRef:
                   fieldPath: status.hostIP
+{% endif %}
             - name: NODENAME
               valueFrom:
                 fieldRef:
diff --git a/roles/network_plugin/calico/templates/cni-calico.conflist.j2 b/roles/network_plugin/calico/templates/cni-calico.conflist.j2
index 32f2bfff6394167406ad2c0d1a6dfcb2c5aa8f00..6dd51e91298449e879222d04a12d99cfbbd02836 100644
--- a/roles/network_plugin/calico/templates/cni-calico.conflist.j2
+++ b/roles/network_plugin/calico/templates/cni-calico.conflist.j2
@@ -15,16 +15,18 @@
       "etcd_ca_cert_file": "{{ etcd_cert_dir }}/ca.pem",
       "log_level": "info",
       "ipam": {
-        "type": "calico-ipam"
+        "type": "calico-ipam",
+        "assign_ipv4": "true",
+        "ipv4_pools": ["{{ kube_pods_subnet }}"]
       },
     {% if enable_network_policy %}
       "policy": {
         "type": "k8s"
       },
-    {% endif %}
+    {%- endif %}
     {% if calico_mtu is defined and calico_mtu is number %}
       "mtu": {{ calico_mtu }},
-    {% endif %}
+    {%- endif %}
       "kubernetes": {
         "kubeconfig": "{{ kube_config_dir }}/node-kubeconfig.yaml"
       }
diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml
index d42f4ec5688cfeedbe497bfce67e2c416af1998b..a42c2cfa7159fd1b5e15e8d00688e8c953507b2a 100644
--- a/roles/network_plugin/canal/tasks/main.yml
+++ b/roles/network_plugin/canal/tasks/main.yml
@@ -35,8 +35,8 @@
   changed_when: false
   run_once: true
   environment:
-    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ groups['etcd'][0] }}.pem"
-    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ groups['etcd'][0] }}-key.pem"
+    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}.pem"
+    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem"
 
 - name: Canal | Create canal node manifests
   template:
diff --git a/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2
index e3b048c640fe682e9d549db22ef82a2d5669fc0c..2e92b7b2b9cb2db3e5137acea8fa7f0e0ae2b100 100644
--- a/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2
+++ b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2
@@ -3,7 +3,7 @@ kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1beta1
 metadata:
   name: calico
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 rules:
   - apiGroups: [""]
     resources:
diff --git a/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2
index e1c1f5050a5d871ee43de2aa36b51615c325c906..016e5193e2398e1405e48318e1b73964ff1b893e 100644
--- a/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2
+++ b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2
@@ -11,4 +11,4 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: canal
-  namespace: {{ system_namespace }}
+  namespace: kube-system
diff --git a/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2
index 3b00017b13e02b48f6b89c42f38311d4c46b2383..097b1538e4c5c75ff4d7b297b412ee1dc0d1774b 100644
--- a/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2
+++ b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2
@@ -11,4 +11,4 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: canal
-  namespace: {{ system_namespace }}
+  namespace: kube-system
diff --git a/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2
index d5b9a6e971ae3b055d8289f19cef25e5c828acb8..aa168d15c14d43d493493fe23109c5f859cd45d3 100644
--- a/roles/network_plugin/canal/templates/canal-node-sa.yml.j2
+++ b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2
@@ -3,7 +3,7 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: canal
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     kubernetes.io/cluster-service: "true"
 
diff --git a/roles/network_plugin/canal/templates/canal-node.yaml.j2 b/roles/network_plugin/canal/templates/canal-node.yaml.j2
index 07754c089cbff0f43ecdfab91630056be718d83a..8535360a101b68010658b72b2beba9e42181763a 100644
--- a/roles/network_plugin/canal/templates/canal-node.yaml.j2
+++ b/roles/network_plugin/canal/templates/canal-node.yaml.j2
@@ -3,7 +3,7 @@ kind: DaemonSet
 apiVersion: extensions/v1beta1
 metadata:
   name: canal-node
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     k8s-app: canal-node
 spec:
@@ -148,14 +148,21 @@ spec:
                   name: canal-config
                   key: etcd_endpoints
             # Disable Calico BGP.  Calico is simply enforcing policy.
-            - name: CALICO_NETWORKING
-              value: "false"
+            - name: CALICO_NETWORKING_BACKEND
+              value: "none"
             # Cluster type to identify the deployment type
             - name: CLUSTER_TYPE
               value: "kubespray,canal"
             # Disable file logging so `kubectl logs` works.
             - name: CALICO_DISABLE_FILE_LOGGING
               value: "true"
+            # Set noderef for node controller.
+            - name: CALICO_K8S_NODE_REF
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+            - name: FELIX_HEALTHENABLED
+              value: "true"
             # Etcd SSL vars
             - name: ETCD_CA_CERT_FILE
               valueFrom:
@@ -178,6 +185,18 @@ spec:
                   fieldPath: spec.nodeName
           securityContext:
             privileged: true
+          livenessProbe:
+            httpGet:
+              path: /liveness
+              port: 9099
+            periodSeconds: 10
+            initialDelaySeconds: 10
+            failureThreshold: 6
+          readinessProbe:
+            httpGet:
+              path: /readiness
+              port: 9099
+            periodSeconds: 10
           volumeMounts:
             - mountPath: /lib/modules
               name: lib-modules
diff --git a/roles/network_plugin/cilium/templates/cilium-config.yml.j2 b/roles/network_plugin/cilium/templates/cilium-config.yml.j2
index a96bb8531388c0c1fa6a25a3b5e6320dcbd101b1..c5051e2cae12fada3b9d602a6764f5f77d934a04 100755
--- a/roles/network_plugin/cilium/templates/cilium-config.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-config.yml.j2
@@ -2,7 +2,7 @@ kind: ConfigMap
 apiVersion: v1
 metadata:
   name: cilium-config
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 data:
   # This etcd-config contains the etcd endpoints of your cluster. If you use
   # TLS please make sure you uncomment the ca-file line and add the respective
diff --git a/roles/network_plugin/cilium/templates/cilium-cr.yml.j2 b/roles/network_plugin/cilium/templates/cilium-cr.yml.j2
index 8eae0e8edb61a9cc3f3941e8cb0faf9512e26d55..11fd0108752328d011b3d65a393381ba19fc59c5 100755
--- a/roles/network_plugin/cilium/templates/cilium-cr.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-cr.yml.j2
@@ -54,9 +54,11 @@ rules:
   - get
   - list
   - watch
+  - update
 - apiGroups:
   - cilium.io
   resources:
   - ciliumnetworkpolicies
+  - ciliumendpoints
   verbs:
   - "*"
diff --git a/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 b/roles/network_plugin/cilium/templates/cilium-crb.yml.j2
index dcfe4d47122dbbbe69b41969607da4d3a8acdedf..04d603d57a7289ed874ff7fbf127ff68035b1bb2 100755
--- a/roles/network_plugin/cilium/templates/cilium-crb.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-crb.yml.j2
@@ -10,6 +10,6 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: cilium
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 - kind: Group
   name: system:nodes
diff --git a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
index 9f48a62db20e3e02b398e751cc9b786a26708a3d..8eaa24f3212bf6044e43cb263767d75de4df4631 100755
--- a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
@@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
 kind: DaemonSet
 metadata:
   name: cilium
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 spec:
   template:
     metadata:
@@ -79,6 +79,13 @@ spec:
                 optional: true
                 key: prometheus-serve-addr
 {% endif %}
+        resources:
+          limits:
+            cpu: {{ cilium_cpu_limit }}
+            memory: {{ cilium_memory_limit }}
+          requests:
+            cpu: {{ cilium_cpu_requests }}
+            memory: {{ cilium_memory_requests }}
         livenessProbe:
           exec:
             command:
diff --git a/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 b/roles/network_plugin/cilium/templates/cilium-sa.yml.j2
index d6ef2a4314d57101153b246160cb1b5bdea63f12..c03ac59b49b43b99a9ed99ae8fbd4953404d3c3f 100755
--- a/roles/network_plugin/cilium/templates/cilium-sa.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-sa.yml.j2
@@ -3,4 +3,4 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: cilium
-  namespace: {{ system_namespace }}
+  namespace: kube-system
diff --git a/roles/network_plugin/contiv/files/generate-certificate.sh b/roles/network_plugin/contiv/files/generate-certificate.sh
index e794dbb696655bbd2be878b275449a96964ece84..0235b2664bca87f2e68d2857f1053383531b5824 100644
--- a/roles/network_plugin/contiv/files/generate-certificate.sh
+++ b/roles/network_plugin/contiv/files/generate-certificate.sh
@@ -17,7 +17,7 @@ rm -f $KEY_PATH
 rm -f $CERT_PATH
 
 openssl genrsa -out $KEY_PATH 2048 >/dev/null 2>&1
-openssl req -new -x509 -sha256 -days 3650 \
+openssl req -new -x509 -sha256 -days 36500 \
 	-key $KEY_PATH \
 	-out $CERT_PATH \
 	-subj "/C=US/ST=CA/L=San Jose/O=CPSG/OU=IT Department/CN=auth-local.cisco.com"
diff --git a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
index 140379b13f4aa2647151703cc64dbae18848eb42..3ccaffaf89538a84a3ec5e5e8e26103f07c91333 100644
--- a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
@@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
 kind: DaemonSet
 metadata:
   name: contiv-api-proxy
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     k8s-app: contiv-api-proxy
 spec:
@@ -12,7 +12,7 @@ spec:
   template:
     metadata:
       name: contiv-api-proxy
-      namespace: {{ system_namespace }}
+      namespace: kube-system
       labels:
         k8s-app: contiv-api-proxy
       annotations:
diff --git a/roles/network_plugin/contiv/templates/contiv-config.yml.j2 b/roles/network_plugin/contiv/templates/contiv-config.yml.j2
index 0505cd1f1e9533bd9205148e6d2c27cc35bd0711..249d9d88ebbcbfb9478f9b0739fa80c8246ebd27 100644
--- a/roles/network_plugin/contiv/templates/contiv-config.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-config.yml.j2
@@ -5,7 +5,7 @@ kind: ConfigMap
 apiVersion: v1
 metadata:
   name: contiv-config
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 data:
   # The location of your cluster store. This is set to the
   # avdertise-client value below from the contiv-etcd service.
diff --git a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
index a9690cc2fa29cd83a66d5a13a10b679a6e3209a2..75946d82191729ac0a27d14c2407eb1fbe8b1e16 100644
--- a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
@@ -3,7 +3,7 @@ kind: DaemonSet
 apiVersion: extensions/v1beta1
 metadata:
   name: contiv-etcd-proxy
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     k8s-app: contiv-etcd-proxy
 spec:
diff --git a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
index 8060f4c01e678aaa6734301546fd8612b87f6888..a6e9121d4ff11f122a7fb37917feaba1aed78e25 100644
--- a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
@@ -3,7 +3,7 @@ kind: DaemonSet
 apiVersion: extensions/v1beta1
 metadata:
   name: contiv-etcd
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     k8s-app: contiv-etcd
 spec:
diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2
index 82ca00437532f6f2565ff5048724e55ee54568b1..6ccd4f9b49479985c31796d731aff975a8ee7ab9 100644
--- a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2
@@ -2,7 +2,7 @@ kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1beta1
 metadata:
   name: contiv-netmaster
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 rules:
   - apiGroups:
     - ""
diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2
index 74c5e3145beddbd94af988ba4c8b8d5583a58cf5..73d636775a10811e1ba0a1a0ceb04d9cb4cfab36 100644
--- a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2
@@ -9,4 +9,4 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: contiv-netmaster
-  namespace: {{ system_namespace }}
+  namespace: kube-system
diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2
index 0c1bfb3e58f206f904122530a5cf1afd0e91424b..758ea449336307555a5cf3cb7e7bdca4f62e2a46 100644
--- a/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2
@@ -2,6 +2,6 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: contiv-netmaster
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     kubernetes.io/cluster-service: "true"
diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
index 56be2d93d6a6fcb58a7bf59c2c07c29eb8086a24..d41259ec16b94fdb5bd3f592137b50bfe56cbf08 100644
--- a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
@@ -3,7 +3,7 @@ kind: DaemonSet
 apiVersion: extensions/v1beta1
 metadata:
   name: contiv-netmaster
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     k8s-app: contiv-netmaster
 spec:
@@ -12,7 +12,7 @@ spec:
   template:
     metadata:
       name: contiv-netmaster
-      namespace: {{ system_namespace }}
+      namespace: kube-system
       labels:
         k8s-app: contiv-netmaster
       annotations:
diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2
index c26e094edb36380cde23d1925ad884359d58f9c7..af4c6e584829459402a44f3e44b71855d5568380 100644
--- a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2
@@ -2,7 +2,7 @@ kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1beta1
 metadata:
   name: contiv-netplugin
-  namespace: {{ system_namespace }}
+  namespace: kube-system
 rules:
   - apiGroups:
     - ""
diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2
index 0c989008a6548a06ecb885b7c3a4bc44f95a7762..6cac217fc744e88a2f0958754118e1c6ad29577f 100644
--- a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2
@@ -9,4 +9,4 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: contiv-netplugin
-  namespace: {{ system_namespace }}
+  namespace: kube-system
diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2
index edfac8bb34a716bbaab20f9e52868444c5e2cc66..8d00ec8cb43f4d2cbba1a6eefbf7658cc04ff283 100644
--- a/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2
@@ -2,6 +2,6 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: contiv-netplugin
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     kubernetes.io/cluster-service: "true"
diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
index 9c2c0a036ad5fbcaf359f3419a7873dbcfe6a33e..2a7bf71cbb164786b56ebbb558e66c3c78154ea5 100644
--- a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
@@ -5,7 +5,7 @@ kind: DaemonSet
 apiVersion: extensions/v1beta1
 metadata:
   name: contiv-netplugin
-  namespace: {{ system_namespace }}
+  namespace: kube-system
   labels:
     k8s-app: contiv-netplugin
 spec:
diff --git a/roles/network_plugin/flannel/defaults/main.yml b/roles/network_plugin/flannel/defaults/main.yml
index 08f4ac145f1fc5f119c1647a68992161f62f591b..e48a9475a7311e690ee0c054877552b041c0c8d2 100644
--- a/roles/network_plugin/flannel/defaults/main.yml
+++ b/roles/network_plugin/flannel/defaults/main.yml
@@ -5,9 +5,15 @@
 # flannel_public_ip: "{{ access_ip|default(ip|default(ansible_default_ipv4.address)) }}"
 
 ## interface that should be used for flannel operations
-## This is actually an inventory node-level item
+## This is actually an inventory cluster-level item
 # flannel_interface:
 
+## Select interface that should be used for flannel operations by regexp on Name or IP
+## This is actually an inventory cluster-level item
+## example: select interface with ip from net 10.0.0.0/23
+## single quote and escape backslashes
+# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
+
 # You can choose what type of flannel backend to use
 # please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
 flannel_backend_type: "vxlan"
diff --git a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2
index aafe2a0f525a28ff8a94755444efe7830cde9813..6f5c9a2114c76c116ad074ac7718ef02241973b9 100644
--- a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2
+++ b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2
@@ -3,7 +3,7 @@ apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: flannel
-  namespace: "{{system_namespace}}"
+  namespace: "kube-system"
 ---
 kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -41,4 +41,4 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: flannel
-  namespace: "{{system_namespace}}"
\ No newline at end of file
+  namespace: "kube-system"
\ No newline at end of file
diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
index 6c69dcaa84a3fe2e68621609a9c7e73187c8af2e..7ecb21ad06848de6e06949c20547845fbd505768 100644
--- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
+++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
@@ -3,7 +3,7 @@ kind: ConfigMap
 apiVersion: v1
 metadata:
   name: kube-flannel-cfg
-  namespace: "{{system_namespace}}"
+  namespace: "kube-system"
   labels:
     tier: node
     app: flannel
@@ -41,7 +41,7 @@ apiVersion: extensions/v1beta1
 kind: DaemonSet
 metadata:
   name: kube-flannel
-  namespace: "{{system_namespace}}"
+  namespace: "kube-system"
   labels:
     tier: node
     k8s-app: flannel
@@ -66,7 +66,7 @@ spec:
           requests:
             cpu: {{ flannel_cpu_requests }}
             memory: {{ flannel_memory_requests }}
-        command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %} ]
+        command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %}{% if flannel_interface_regexp is defined %}, "--iface-regex={{ flannel_interface_regexp }}"{% endif %} ]
         securityContext:
           privileged: true
         env:
diff --git a/roles/network_plugin/weave/defaults/main.yml b/roles/network_plugin/weave/defaults/main.yml
index b59f0ab6341041c9660a23ae5e4014b78cd0d9aa..eecb06171be55c0ad95791c3ab0d72f52112aba1 100644
--- a/roles/network_plugin/weave/defaults/main.yml
+++ b/roles/network_plugin/weave/defaults/main.yml
@@ -1,7 +1,7 @@
 ---
 # Limits
-weave_memory_limit: 400M
-weave_cpu_limit: 30m
+weave_memory_limits: 400M
+weave_cpu_limits: 30m
 weave_memory_requests: 64M
 weave_cpu_requests: 10m
 
diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml
index f3f1da6ac3a83c3f403fee8e40b31109d4f4a0c4..c2c5d82c00e42b8286b8dd7ab1e2c60cb9d731f1 100644
--- a/roles/network_plugin/weave/tasks/main.yml
+++ b/roles/network_plugin/weave/tasks/main.yml
@@ -2,6 +2,12 @@
 - import_tasks: seed.yml
   when: weave_mode_seed
 
+- name: template weavenet conflist
+  template:
+    src: 00-weave.conflist.j2
+    dest: /etc/cni/net.d/00-weave.conflist
+    owner: kube
+
 - name: Weave | Copy cni plugins from hyperkube
   command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
   register: cni_task_result
diff --git a/roles/network_plugin/weave/templates/00-weave.conflist.j2 b/roles/network_plugin/weave/templates/00-weave.conflist.j2
new file mode 100644
index 0000000000000000000000000000000000000000..45ae0b9676d7fe2a2216402472b4d6185c888b19
--- /dev/null
+++ b/roles/network_plugin/weave/templates/00-weave.conflist.j2
@@ -0,0 +1,16 @@
+{
+    "cniVersion": "0.3.0",
+    "name": "mynet",
+      "plugins": [
+        {
+            "name": "weave",
+            "type": "weave-net",
+            "hairpinMode": true
+        },
+        {
+            "type": "portmap",
+            "capabilities": {"portMappings": true},
+            "snat": true
+        }
+    ]
+}
diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2
index b292339b5682468fab8b0b77da694a8201b134ce..9a7da7377e58cabfa9be3334b2d6e7bf12416224 100644
--- a/roles/network_plugin/weave/templates/weave-net.yml.j2
+++ b/roles/network_plugin/weave/templates/weave-net.yml.j2
@@ -8,13 +8,14 @@ items:
       name: weave-net
       labels:
         name: weave-net
-      namespace: {{ system_namespace }}
-  - apiVersion: rbac.authorization.k8s.io/v1
+      namespace: kube-system
+  - apiVersion: rbac.authorization.k8s.io/v1beta1
     kind: ClusterRole
     metadata:
       name: weave-net
       labels:
         name: weave-net
+      namespace: kube-system
     rules:
       - apiGroups:
           - ''
@@ -27,15 +28,7 @@ items:
           - list
           - watch
       - apiGroups:
-          - extensions
-        resources:
-          - networkpolicies
-        verbs:
-          - get
-          - list
-          - watch
-      - apiGroups:
-          - 'networking.k8s.io'
+          - networking.k8s.io
         resources:
           - networkpolicies
         verbs:
@@ -43,19 +36,34 @@ items:
           - list
           - watch
   - apiVersion: rbac.authorization.k8s.io/v1beta1
-    kind: Role
+    kind: ClusterRoleBinding
     metadata:
       name: weave-net
+      labels:
+        name: weave-net
       namespace: kube-system
+    roleRef:
+      kind: ClusterRole
+      name: weave-net
+      apiGroup: rbac.authorization.k8s.io
+    subjects:
+      - kind: ServiceAccount
+        name: weave-net
+        namespace: kube-system
+  - apiVersion: rbac.authorization.k8s.io/v1beta1
+    kind: Role
+    metadata:
+      name: weave-net
       labels:
         name: weave-net
+      namespace: kube-system
     rules:
       - apiGroups:
           - ''
-        resources:
-          - configmaps
         resourceNames:
           - weave-net
+        resources:
+          - configmaps
         verbs:
           - get
           - update
@@ -65,14 +73,15 @@ items:
           - configmaps
         verbs:
           - create
-  - apiVersion: rbac.authorization.k8s.io/v1
-    kind: ClusterRoleBinding
+  - apiVersion: rbac.authorization.k8s.io/v1beta1
+    kind: RoleBinding
     metadata:
       name: weave-net
       labels:
         name: weave-net
+      namespace: kube-system
     roleRef:
-      kind: ClusterRole
+      kind: Role
       name: weave-net
       apiGroup: rbac.authorization.k8s.io
     subjects:
@@ -85,9 +94,10 @@ items:
       name: weave-net
       labels:
         name: weave-net
-        version: {{ weave_version }}
-      namespace: {{ system_namespace }}
+        version: v{{ weave_version }}
+      namespace: kube-system
     spec:
+      minReadySeconds: 5
       template:
         metadata:
           labels:
@@ -122,7 +132,7 @@ items:
                 - name: WEAVE_PASSWORD
                   value: {{ weave_password }}
               image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }}
-              imagePullPolicy: Always
+              imagePullPolicy: {{ k8s_image_pull_policy }}
               livenessProbe:
                 httpGet:
                   host: 127.0.0.1
@@ -131,7 +141,11 @@ items:
                 initialDelaySeconds: 30
               resources:
                 requests:
-                  cpu: 10m
+                  cpu: {{ weave_cpu_requests }}
+                  memory: {{ weave_memory_requests }}
+                limits:
+                  cpu: {{ weave_cpu_limits }}
+                  memory: {{ weave_memory_limits }}
               securityContext:
                 privileged: true
               volumeMounts:
@@ -149,19 +163,28 @@ items:
                   mountPath: /lib/modules
                 - name: xtables-lock
                   mountPath: /run/xtables.lock
-                  readOnly: false
             - name: weave-npc
+              args: []
+              env:
+                - name: HOSTNAME
+                  valueFrom:
+                    fieldRef:
+                      apiVersion: v1
+                      fieldPath: spec.nodeName
               image: {{ weave_npc_image_repo }}:{{ weave_npc_image_tag }}
-              imagePullPolicy: Always
+              imagePullPolicy: {{ k8s_image_pull_policy }}
               resources:
                 requests:
                   cpu: {{ weave_cpu_requests }}
                   memory: {{ weave_memory_requests }}
                 limits:
-                  cpu: {{ weave_cpu_limit }}
-                  memory: {{ weave_memory_limit }}
+                  cpu: {{ weave_cpu_limits }}
+                  memory: {{ weave_memory_limits }}
               securityContext:
                 privileged: true
+              volumeMounts:
+                - name: xtables-lock
+                  mountPath: /run/xtables.lock
           hostNetwork: true
           hostPID: true
           restartPolicy: Always
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..395f9986baec2f3737bbe9f8dd244fc9980e0777
--- /dev/null
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete node
+  command: kubectl delete node {{ item }}
+  with_items:
+    - "{{ groups['kube-node'] }}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
+  ignore_errors: yes
diff --git a/roles/remove-node/pre-remove/defaults/main.yml b/roles/remove-node/pre-remove/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e9e1ba28d3128569355d2454baddebc67077d902
--- /dev/null
+++ b/roles/remove-node/pre-remove/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+drain_grace_period: 300
+drain_timeout: 360s
diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..12091917ac7fd9d0122db537537455132866c150
--- /dev/null
+++ b/roles/remove-node/pre-remove/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+
+- name: remove-node | Drain node except daemonsets resource
+  command: >-
+    {{ bin_dir }}/kubectl drain
+      --force
+      --ignore-daemonsets
+      --grace-period {{ drain_grace_period }}
+      --timeout {{ drain_timeout }}
+      --delete-local-data {{ item }}
+  with_items:
+    - "{{ groups['kube-node'] }}"
+  failed_when: false
+  delegate_to: "{{ groups['kube-master'][0] }}"
+  ignore_errors: yes
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index f6714f68008bbd761c8496d0372a5a1dd253fd46..9ae683df3431bbaf9804e2a833d949ba87461781 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -8,6 +8,7 @@
     - kubelet
     - vault
     - etcd
+    - etcd-events
   failed_when: false
   tags:
     - services
@@ -19,6 +20,7 @@
   with_items:
     - kubelet
     - etcd
+    - etcd-events
     - vault
     - calico-node
   register: services_removed
@@ -32,6 +34,7 @@
   with_items:
     - docker-dns.conf
     - docker-options.conf
+    - http-proxy.conf
   register: docker_dropins_removed
   tags:
     - docker
@@ -67,6 +70,10 @@
 - name: reset | unmount kubelet dirs
   command: umount {{item}}
   with_items: '{{ mounted_dirs.stdout_lines }}'
+  register: umount_dir
+  retries: 4
+  until: umount_dir.rc == 0
+  delay: 5
   tags:
     - mounts
 
@@ -91,6 +98,7 @@
     - /root/.kube
     - /root/.helm
     - "{{ etcd_data_dir }}"
+    - /var/lib/etcd-events
     - /etc/ssl/etcd
     - /var/log/calico
     - /etc/cni
@@ -121,6 +129,7 @@
     - "{{ bin_dir }}/kubelet"
     - "{{ bin_dir }}/etcd-scripts"
     - "{{ bin_dir }}/etcd"
+    - "{{ bin_dir }}/etcd-events"
     - "{{ bin_dir }}/etcdctl"
     - "{{ bin_dir }}/kubernetes-scripts"
     - "{{ bin_dir }}/kubectl"
diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml
index 4eb055f7e1362e8113c0e76dceeafa8b36911e00..8e5ad08a08371de29bd6c7b07d517e86dc628ce5 100644
--- a/roles/vault/defaults/main.yml
+++ b/roles/vault/defaults/main.yml
@@ -86,7 +86,7 @@ vault_ca_options:
     format: pem
     ttl: "{{ vault_max_lease_ttl }}"
     exclude_cn_from_sans: true
-    alt_names: "vault.{{ system_namespace }}.svc.{{ dns_domain }},vault.{{ system_namespace }}.svc,vault.{{ system_namespace }},vault"
+    alt_names: "vault.kube-system.svc.{{ dns_domain }},vault.kube-system.svc,vault.kube-system,vault"
   etcd:
     common_name: etcd
     format: pem
@@ -115,7 +115,7 @@ vault_pki_mounts:
     roles:
       - name: vault
         group: vault
-        password: "{{ lookup('password', inventory_dir + '/credentials/vault/vault length=15') }}"
+        password: "{{ lookup('password', inventory_dir + '/credentials/vault/vault.creds length=15') }}"
         policy_rules: default
         role_options: default
   etcd:
@@ -127,7 +127,7 @@ vault_pki_mounts:
     roles:
       - name: etcd
         group: etcd
-        password: "{{ lookup('password', inventory_dir + '/credentials/vault/etcd length=15') }}"
+        password: "{{ lookup('password', inventory_dir + '/credentials/vault/etcd.creds length=15') }}"
         policy_rules: default
         role_options:
           allow_any_name: true
@@ -142,7 +142,7 @@ vault_pki_mounts:
     roles:
       - name: kube-master
         group: kube-master
-        password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-master length=15') }}"
+        password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-master.creds length=15') }}"
         policy_rules: default
         role_options:
           allow_any_name: true
@@ -150,7 +150,7 @@ vault_pki_mounts:
           organization: "system:masters"
       - name: kube-node
         group: k8s-cluster
-        password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-node length=15') }}"
+        password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-node.creds length=15') }}"
         policy_rules: default
         role_options:
           allow_any_name: true
@@ -158,7 +158,7 @@ vault_pki_mounts:
           organization: "system:nodes"
       - name: kube-proxy
         group: k8s-cluster
-        password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy length=15') }}"
+        password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy.creds length=15') }}"
         policy_rules: default
         role_options:
           allow_any_name: true
@@ -166,7 +166,7 @@ vault_pki_mounts:
           organization: "system:node-proxier"
       - name: front-proxy-client
         group: k8s-cluster
-        password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy length=15') }}"
+        password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy.creds length=15') }}"
         policy_rules: default
         role_options:
           allow_any_name: true
diff --git a/roles/vault/tasks/cluster/systemd.yml b/roles/vault/tasks/cluster/systemd.yml
index 8df52f98255ed0863c1d226ff79eb7b8e463b904..f7139d336bc1159057e6e05b2e7168362f0c42ef 100644
--- a/roles/vault/tasks/cluster/systemd.yml
+++ b/roles/vault/tasks/cluster/systemd.yml
@@ -55,3 +55,4 @@
   register: vault_health_check
   until: vault_health_check|succeeded
   retries: 10
+  delay: "{{ retry_stagger | random + 3 }}"
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index 1a0e2307b7ef575cc9174033779f3cb9d8f429f1..14daf9d19bd2c6dcd603e255266eeb1b6129bdf6 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -114,7 +114,12 @@
       with_items: "{{logs}}"
 
     - name: Pack results and logs
-      local_action: raw GZIP=-9 tar --remove-files -cvzf {{dir|default(".")}}/logs.tar.gz -C /tmp collect-info
+      archive:
+        path: "/tmp/collect-info"
+        dest: "{{ dir|default('.') }}/logs.tar.gz"
+        remove: true
+      delegate_to: localhost
+      become: false
       run_once: true
 
     - name: Clean up collected command outputs
diff --git a/tests/ansible.cfg b/tests/ansible.cfg
index 9e734403e518ae0b2ed894ea949e17448840cbe5..9c405752924b602cdce2ac921925948b4f7a68d2 100644
--- a/tests/ansible.cfg
+++ b/tests/ansible.cfg
@@ -10,3 +10,4 @@ fact_caching_connection = /tmp
 stdout_callback = skippy
 library = ./library:../library
 callback_whitelist = profile_tasks
+jinja2_extensions = jinja2.ext.do
diff --git a/tests/files/gce_centos-weave-kubeadm.yml b/tests/files/gce_centos-weave-kubeadm.yml
index b4cd8e17c159b3b44be834b7b844c272640b46a7..a1c88e97661642e9328c9a2a722b2a77a67465b2 100644
--- a/tests/files/gce_centos-weave-kubeadm.yml
+++ b/tests/files/gce_centos-weave-kubeadm.yml
@@ -7,7 +7,7 @@ startup_script: ""
 
 # Deployment settings
 kube_network_plugin: weave
-weave_cpu_limit: "100m"
+weave_cpu_limits: "100m"
 weave_cpu_requests: "100m"
 kubeadm_enabled: true
 deploy_netchecker: true
diff --git a/tests/files/gce_centos7-cilium.yml b/tests/files/gce_centos7-cilium.yml
index ca682f7ed91328d037a96c51e57f435e8c31a84a..ec46a213d65abcfbd0636138e05b6acd041533a9 100644
--- a/tests/files/gce_centos7-cilium.yml
+++ b/tests/files/gce_centos7-cilium.yml
@@ -7,5 +7,6 @@ mode: default
 # Deployment settings
 kube_network_plugin: cilium
 deploy_netchecker: true
+enable_network_policy: true
 kubedns_min_replicas: 1
 cloud_provider: gce
diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml
index 272c5e7ae88391b2c1aa29e77d3f2fdd4b1f9297..c120920116551570ca024e345b3aa010ae540f6e 100644
--- a/tests/files/gce_centos7-flannel-addons.yml
+++ b/tests/files/gce_centos7-flannel-addons.yml
@@ -9,8 +9,14 @@ kube_network_plugin: flannel
 helm_enabled: true
 istio_enabled: true
 efk_enabled: true
+etcd_events_cluster_setup: true
 local_volume_provisioner_enabled: true
 etcd_deployment_type: host
 deploy_netchecker: true
 kubedns_min_replicas: 1
 cloud_provider: gce
+kube_encrypt_secret_data: true
+prometheus_operator_enabled: true
+k8s_metrics_enabled: true
+ingress_nginx_enabled: true
+cert_manager_enabled: true
diff --git a/tests/files/gce_coreos-alpha-weave-ha.yml b/tests/files/gce_coreos-alpha-weave-ha.yml
index dd579c0322fdc90fb98790d125ce1cf22d1586cc..1666e0927faa4a29ae070a5869a991ee4bee13ce 100644
--- a/tests/files/gce_coreos-alpha-weave-ha.yml
+++ b/tests/files/gce_coreos-alpha-weave-ha.yml
@@ -7,7 +7,7 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
 
 # Deployment settings
 kube_network_plugin: weave
-weave_cpu_limit: "100m"
+weave_cpu_limits: "100m"
 weave_cpu_requests: "100m"
 bootstrap_os: coreos
 resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
diff --git a/tests/files/gce_coreos-cilium.yml b/tests/files/gce_coreos-cilium.yml
index a090039700411f85b16ea4dacbd4e3cb7fb55a6c..1778929f09bf3289bf6becd867d432e4a159f4c5 100644
--- a/tests/files/gce_coreos-cilium.yml
+++ b/tests/files/gce_coreos-cilium.yml
@@ -9,5 +9,6 @@ kube_network_plugin: cilium
 bootstrap_os: coreos
 resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
 deploy_netchecker: true
+enable_network_policy: true
 kubedns_min_replicas: 1
 cloud_provider: gce
diff --git a/tests/files/gce_rhel7-cilium.yml b/tests/files/gce_rhel7-cilium.yml
index d67658a6c34ead1069d7b7a85cc04c7ca665ec5b..0994d0099968e866d51b514e3c5c3e1bff5ce709 100644
--- a/tests/files/gce_rhel7-cilium.yml
+++ b/tests/files/gce_rhel7-cilium.yml
@@ -6,5 +6,6 @@ mode: default
 # Deployment settings
 kube_network_plugin: cilium
 deploy_netchecker: true
+enable_network_policy: true
 kubedns_min_replicas: 1
 cloud_provider: gce
diff --git a/tests/files/gce_rhel7-weave.yml b/tests/files/gce_rhel7-weave.yml
index df80a556f82f4dcc224d6ed947e6f0b5b31a6ee0..e6928b7a2508bd813a7f7331102113cb33e34c4e 100644
--- a/tests/files/gce_rhel7-weave.yml
+++ b/tests/files/gce_rhel7-weave.yml
@@ -5,7 +5,7 @@ mode: default
 
 # Deployment settings
 kube_network_plugin: weave
-weave_cpu_limit: "100m"
+weave_cpu_limits: "100m"
 weave_cpu_requests: "100m"
 deploy_netchecker: true
 kubedns_min_replicas: 1
diff --git a/tests/files/gce_ubuntu-cilium-sep.yml b/tests/files/gce_ubuntu-cilium-sep.yml
index e7150a27ec5f48004e37d69a9833a7d5290df1ff..0c0647743922dee61e1914754fb618068edc942f 100644
--- a/tests/files/gce_ubuntu-cilium-sep.yml
+++ b/tests/files/gce_ubuntu-cilium-sep.yml
@@ -6,6 +6,7 @@ mode: separate
 # Deployment settings
 kube_network_plugin: cilium
 deploy_netchecker: true
+enable_network_policy: true
 kubedns_min_replicas: 1
 cloud_provider: gce
 
diff --git a/tests/files/gce_ubuntu-weave-sep.yml b/tests/files/gce_ubuntu-weave-sep.yml
index 133bd907af526f88794b3d03ceec7d81071fee6c..6e701cb233334b78d511f852fa2223469653029d 100644
--- a/tests/files/gce_ubuntu-weave-sep.yml
+++ b/tests/files/gce_ubuntu-weave-sep.yml
@@ -6,7 +6,7 @@ mode: separate
 # Deployment settings
 bootstrap_os: ubuntu
 kube_network_plugin: weave
-weave_cpu_limit: "100m"
+weave_cpu_limits: "100m"
 weave_cpu_requests: "100m"
 deploy_netchecker: true
 kubedns_min_replicas: 1
diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml
index de5e3a84a46f90349cea7a62dd83ff8d7b41f14c..68ea2e35d6e861bc32241886bba33f22688d7510 100644
--- a/tests/testcases/010_check-apiserver.yml
+++ b/tests/testcases/010_check-apiserver.yml
@@ -6,7 +6,7 @@
     uri:
       url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port }}/api/v1"
       user: kube
-      password: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+      password: "{{ lookup('password', inventory_dir + '/credentials/kube_user.creds length=15 chars=ascii_letters,digits') }}"
       validate_certs: no
       status_code: 200,401
     when: not kubeadm_enabled|default(false)
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index d279f563518393666fab800a2980447865905def..7acec3083385e82b96c6acbb4472cc3cf9d217ce 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -21,6 +21,12 @@
   vars:
     ansible_ssh_pipelining: true
   gather_facts: true
+  pre_tasks:
+    - name: gather facts from all instances
+      setup:
+      delegate_to: "{{item}}"
+      delegate_facts: True
+      with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
 
 - hosts: k8s-cluster:etcd:calico-rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"