diff --git a/.gitignore b/.gitignore
index fcbcd1da1122cde77feb0a6bb2f2ede8996d2c60..e50e78e224aa6f1666efdb6bf54e0f9a6f87d9ab 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
 .vagrant
 *.retry
-inventory/vagrant_ansible_inventory
+**/vagrant_ansible_inventory
+inventory/credentials/
 inventory/group_vars/fake_hosts.yml
 inventory/host_vars/
 temp
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 1014440abe046337fde60f049894933f0673b9bc..e03e640178bcdcd02f2430ebbe15b5ffa21c779a 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -20,6 +20,7 @@ variables:
   GCE_PREEMPTIBLE: "false"
   ANSIBLE_KEEP_REMOTE_FILES: "1"
   ANSIBLE_CONFIG: ./tests/ansible.cfg
+  ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
   IDEMPOT_CHECK: "false"
   RESET_CHECK: "false"
   UPGRADE_TEST: "false"
@@ -90,9 +91,9 @@ before_script:
     - cd tests && make create-${CI_PLATFORM} -s ; cd -
 
     # Check out latest tag if testing upgrade
-    # Uncomment when gitlab kargo repo has tags
+    # Uncomment when gitlab kubespray repo has tags
     #- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
-    - test "${UPGRADE_TEST}" != "false" && git checkout ba0a03a8ba2d97a73d06242ec4bb3c7e2012e58c
+    - test "${UPGRADE_TEST}" != "false" && git checkout f7d52564aad2ff8e337634951beb4a881c0e8aa6
     # Checkout the CI vars file so it is available
     - test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
     # Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
@@ -102,14 +103,13 @@ before_script:
     # Create cluster
     - >
       ansible-playbook
-      -i inventory/sample/hosts.ini
+      -i ${ANSIBLE_INVENTORY}
       -b --become-user=root
       --private-key=${HOME}/.ssh/id_rsa
       -u $SSH_USER
       ${SSH_ARGS}
       ${LOG_LEVEL}
       -e @${CI_TEST_VARS}
-      -e ansible_python_interpreter=${PYPATH}
       -e ansible_ssh_user=${SSH_USER}
       -e local_release_dir=${PWD}/downloads
       --limit "all:!fake_hosts"
@@ -122,14 +122,13 @@ before_script:
       test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
       git checkout "${CI_BUILD_REF}";
       ansible-playbook
-      -i inventory/sample/hosts.ini
+      -i ${ANSIBLE_INVENTORY}
       -b --become-user=root
       --private-key=${HOME}/.ssh/id_rsa
       -u $SSH_USER
       ${SSH_ARGS}
       ${LOG_LEVEL}
       -e @${CI_TEST_VARS}
-      -e ansible_python_interpreter=${PYPATH}
       -e ansible_ssh_user=${SSH_USER}
       -e local_release_dir=${PWD}/downloads
       --limit "all:!fake_hosts"
@@ -139,20 +138,20 @@ before_script:
     # Tests Cases
     ## Test Master API
     - >
-      ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
+      ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
       -e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
 
     ## Ping the between 2 pod
-    - ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
+    - ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
 
     ## Advanced DNS checks
-    - ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
+    - ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
 
     ## Idempotency checks 1/5 (repeat deployment)
     - >
       if [ "${IDEMPOT_CHECK}" = "true" ]; then
       ansible-playbook
-      -i inventory/sample/hosts.ini
+      -i ${ANSIBLE_INVENTORY}
       -b --become-user=root
       --private-key=${HOME}/.ssh/id_rsa
       -u $SSH_USER
@@ -169,7 +168,7 @@ before_script:
     - >
       if [ "${IDEMPOT_CHECK}" = "true" ]; then
       ansible-playbook
-      -i inventory/sample/hosts.ini
+      -i ${ANSIBLE_INVENTORY}
       -b --become-user=root
       --private-key=${HOME}/.ssh/id_rsa
       -u $SSH_USER
@@ -184,7 +183,7 @@ before_script:
     - >
       if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
       ansible-playbook
-      -i inventory/sample/hosts.ini
+      -i ${ANSIBLE_INVENTORY}
       -b --become-user=root
       --private-key=${HOME}/.ssh/id_rsa
       -u $SSH_USER
@@ -201,7 +200,7 @@ before_script:
     - >
       if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
       ansible-playbook
-      -i inventory/sample/hosts.ini
+      -i ${ANSIBLE_INVENTORY}
       -b --become-user=root
       --private-key=${HOME}/.ssh/id_rsa
       -u $SSH_USER
@@ -217,7 +216,7 @@ before_script:
     ## Idempotency checks 5/5 (Advanced DNS checks)
     - >
       if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
-      ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH}
+      ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH}
       -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
       --limit "all:!fake_hosts"
       tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
@@ -309,6 +308,10 @@ before_script:
 # stage: deploy-special
   MOVED_TO_GROUP_VARS: "true"
 
+.opensuse_canal_variables: &opensuse_canal_variables
+# stage: deploy-part2
+  MOVED_TO_GROUP_VARS: "true"
+
 
 # Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
 ### PR JOBS PART1
@@ -590,6 +593,17 @@ gce_centos7-calico-ha-triggers:
   when: on_success
   only: ['triggers']
 
+gce_opensuse-canal:
+  stage: deploy-part2
+  <<: *job
+  <<: *gce
+  variables:
+    <<: *gce_variables
+    <<: *opensuse_canal_variables
+  when: manual
+  except: ['triggers']
+  only: ['master', /^pr-.*$/]
+
 # no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
 gce_coreos-alpha-weave-ha:
   stage: deploy-special
diff --git a/README.md b/README.md
index 56210a8f9218c5471b32a35941d97fc3e2866e7e..45a3515bfd4715886ca6a58c7ceda072f83ba566 100644
--- a/README.md
+++ b/README.md
@@ -1,11 +1,11 @@
-![Kubernetes Logo](https://s28.postimg.org/lf3q4ocpp/k8s.png)
+![Kubernetes Logo](https://raw.githubusercontent.com/kubernetes-incubator/kubespray/master/docs/img/kubernetes-logo.png)
 
 Deploy a Production Ready Kubernetes Cluster
 ============================================
 
 If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
 
--   Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
+-   Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
 -   **High available** cluster
 -   **Composable** (Choice of the network plugin for instance)
 -   Support most popular **Linux distributions**
@@ -52,6 +52,7 @@ Documents
 -   [Vagrant install](docs/vagrant.md)
 -   [CoreOS bootstrap](docs/coreos.md)
 -   [Debian Jessie setup](docs/debian.md)
+-   [openSUSE setup](docs/opensuse.md)
 -   [Downloaded artifacts](docs/downloads.md)
 -   [Cloud providers](docs/cloud.md)
 -   [OpenStack](docs/openstack.md)
@@ -66,10 +67,11 @@ Supported Linux Distributions
 -----------------------------
 
 -   **Container Linux by CoreOS**
--   **Debian** Jessie
+-   **Debian** Jessie, Stretch, Wheezy
 -   **Ubuntu** 16.04
 -   **CentOS/RHEL** 7
 -   **Fedora/CentOS** Atomic
+-   **openSUSE** Leap 42.3/Tumbleweed
 
 Note: Upstart/SysV init based OS types are not supported.
 
@@ -83,7 +85,7 @@ Versions of supported components
 -   [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
 -   [cilium](https://github.com/cilium/cilium) v1.0.0-rc8
 -   [contiv](https://github.com/contiv/install/releases) v1.1.7
--   [weave](http://weave.works/) v2.2.1
+-   [weave](http://weave.works/) v2.3.0
 -   [docker](https://www.docker.com/) v17.03 (see note)
 -   [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
 
@@ -105,6 +107,9 @@ Requirements
 -   **Your ssh key must be copied** to all the servers part of your inventory.
 -   The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
     in order to avoid any issue during deployment you should disable your firewall.
+-   If kubespray is ran from non-root user account, correct privilege escalation method
+    should be configured in the target servers. Then the `ansible_become` flag
+    or command parameters `--become or -b` should be specified.
 
 Network Plugins
 ---------------
diff --git a/Vagrantfile b/Vagrantfile
index 536bbff2bf123c606d4a632584b2e6203db172a7..d0b6b73d13c3c78af15f7d30d8ed88dd1e1a294f 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -18,6 +18,8 @@ SUPPORTED_OS = {
   "coreos-beta"   => {box: "coreos-beta",        bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
   "ubuntu"        => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
   "centos"        => {box: "centos/7",           bootstrap_os: "centos", user: "vagrant"},
+  "opensuse"      => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
+  "opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
 }
 
 # Defaults for config options defined in CONFIG
@@ -52,7 +54,7 @@ end
 
 $box = SUPPORTED_OS[$os][:box]
 # if $inventory is not set, try to use example
-$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
+$inventory = File.join(File.dirname(__FILE__), "inventory", "sample") if ! $inventory
 
 # if $inventory has a hosts file use it, otherwise copy over vars etc
 # to where vagrant expects dynamic inventory to be.
@@ -84,7 +86,6 @@ Vagrant.configure("2") do |config|
   if Vagrant.has_plugin?("vagrant-vbguest") then
     config.vbguest.auto_update = false
   end
-
   (1..$num_instances).each do |i|
     config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
       config.vm.hostname = vm_name
@@ -110,8 +111,10 @@ Vagrant.configure("2") do |config|
         end
       end
 
+      config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
+
       $shared_folders.each do |src, dst|
-        config.vm.synced_folder src, dst
+        config.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
       end
 
       config.vm.provider :virtualbox do |vb|
diff --git a/ansible.cfg b/ansible.cfg
index 6f381690e42d6dc890dc4d0ae1eeb075abba66c2..d3102a6f41ca0223d5b603ecf9350960493a66b4 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -13,4 +13,3 @@ callback_whitelist = profile_tasks
 roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
 deprecation_warnings=False
 inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
-jinja2_extensions = jinja2.ext.do
diff --git a/cluster.yml b/cluster.yml
index 2b2716637e692cd94d2f926223af9e77e7777076..70f7c17f411dcceed25ac117cbd9f30142aee731 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -33,7 +33,7 @@
   roles:
     - { role: kubespray-defaults}
     - { role: kubernetes/preinstall, tags: preinstall }
-    - { role: docker, tags: docker }
+    - { role: docker, tags: docker, when: manage_docker|default(true) }
     - role: rkt
       tags: rkt
       when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md
index d69811335f1a11bfb749d37f19bddc52ecc0dd10..f62ba44cd45fc3871c21072548c2b60ade55356a 100644
--- a/contrib/terraform/aws/README.md
+++ b/contrib/terraform/aws/README.md
@@ -46,7 +46,7 @@ ssh -F ./ssh-bastion.conf user@$ip
 
 Example (this one assumes you are using CoreOS)
 ```commandline
-ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
+ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
 ```
 ***Using other distrib than CoreOs***
 If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
diff --git a/contrib/terraform/group_vars b/contrib/terraform/group_vars
index febd29cb3f61a7da6d6fd860b15fd179b8933ce6..4dd828e8e583a58a953599824833bd49b03f5f7c 120000
--- a/contrib/terraform/group_vars
+++ b/contrib/terraform/group_vars
@@ -1 +1 @@
-../../inventory/group_vars
\ No newline at end of file
+../../inventory/local/group_vars
\ No newline at end of file
diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md
index ed11bef1ef70fadd2b0b7c47bf18446bcad7b201..de717fb69f2ec16080aa7f5a638fa5fcbfce5242 100644
--- a/contrib/terraform/openstack/README.md
+++ b/contrib/terraform/openstack/README.md
@@ -135,7 +135,7 @@ the one you want to use with the environment variable `OS_CLOUD`:
 export OS_CLOUD=mycloud
 ```
 
-##### Openrc method (deprecated)
+##### Openrc method
 
 When using classic environment variables, Terraform uses default `OS_*`
 environment variables.  A script suitable for your environment may be available
@@ -218,6 +218,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
 |`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
 |`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
 | `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
+|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
 
 #### Terraform state files
 
@@ -299,11 +300,15 @@ If you have deployed and destroyed a previous iteration of your cluster, you wil
 
 #### Bastion host
 
-If you are not using a bastion host, but not all of your nodes have floating IPs, create a file `inventory/$CLUSTER/group_vars/no-floating.yml` with the following content.  Use one of your nodes with a floating IP (this should have been output at the end of the Terraform step) and the appropriate user for that OS, or if you have another jump host, use that.
+Bastion access will be determined by:
 
-```
-ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@MASTER_IP"'
-```
+ - Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable).
+ - The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables).
+
+If you have a bastion host, your ssh traffic will be directly routed through it. This is regardless of whether you have masters/nodes with a floating IP assigned.
+If you don't have a bastion host, but at least one of your masters/nodes have a floating IP, then ssh traffic will be tunneled by one of these machines.
+
+So, either a bastion host, or at least master/node with a floating IP are required.
 
 #### Test access
 
diff --git a/contrib/terraform/openstack/kubespray.tf b/contrib/terraform/openstack/kubespray.tf
index e0dbfd02de7f274f5ff4f1c36a745fda88a08f9b..c501302de04b8cfb16c33c945a70e2ac31e9175c 100644
--- a/contrib/terraform/openstack/kubespray.tf
+++ b/contrib/terraform/openstack/kubespray.tf
@@ -48,6 +48,7 @@ module "compute" {
   k8s_master_fips                              = "${module.ips.k8s_master_fips}"
   k8s_node_fips                                = "${module.ips.k8s_node_fips}"
   bastion_fips                                 = "${module.ips.bastion_fips}"
+  supplementary_master_groups                  = "${var.supplementary_master_groups}"
 
   network_id = "${module.network.router_id}"
 }
diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf
index e0a8eab4a6bf8cb038f1968936c5ba1402442e54..940049aa96505641d594d60014b5a3bef8e56016 100644
--- a/contrib/terraform/openstack/modules/compute/main.tf
+++ b/contrib/terraform/openstack/modules/compute/main.tf
@@ -83,7 +83,7 @@ resource "openstack_compute_instance_v2" "bastion" {
   }
 
   provisioner "local-exec" {
-    command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
+    command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/group_vars/no-floating.yml"
   }
 
 }
@@ -107,10 +107,14 @@ resource "openstack_compute_instance_v2" "k8s_master" {
 
   metadata = {
     ssh_user         = "${var.ssh_user}"
-    kubespray_groups = "etcd,kube-master,k8s-cluster,vault"
+    kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
     depends_on       = "${var.network_id}"
   }
 
+  provisioner "local-exec" {
+    command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
+  }
+
 }
 
 resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
@@ -125,15 +129,20 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
   }
 
   security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
+    "${openstack_compute_secgroup_v2.bastion.name}",
     "${openstack_compute_secgroup_v2.k8s.name}",
   ]
 
   metadata = {
     ssh_user         = "${var.ssh_user}"
-    kubespray_groups = "kube-master,k8s-cluster,vault"
+    kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
     depends_on       = "${var.network_id}"
   }
 
+  provisioner "local-exec" {
+    command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
+  }
+
 }
 
 resource "openstack_compute_instance_v2" "etcd" {
@@ -175,7 +184,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
 
   metadata = {
     ssh_user         = "${var.ssh_user}"
-    kubespray_groups = "etcd,kube-master,k8s-cluster,vault,no-floating"
+    kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
     depends_on       = "${var.network_id}"
   }
 
@@ -198,7 +207,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
 
   metadata = {
     ssh_user         = "${var.ssh_user}"
-    kubespray_groups = "kube-master,k8s-cluster,vault,no-floating"
+    kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
     depends_on       = "${var.network_id}"
   }
 
@@ -226,6 +235,10 @@ resource "openstack_compute_instance_v2" "k8s_node" {
     depends_on       = "${var.network_id}"
   }
 
+  provisioner "local-exec" {
+    command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
+  }
+
 }
 
 resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
diff --git a/contrib/terraform/openstack/modules/compute/variables.tf b/contrib/terraform/openstack/modules/compute/variables.tf
index 518e15069451589923126af7018026a16dc6ba80..58ab170672a601d83cefe29bd21c2554a842f3dd 100644
--- a/contrib/terraform/openstack/modules/compute/variables.tf
+++ b/contrib/terraform/openstack/modules/compute/variables.tf
@@ -55,3 +55,7 @@ variable "k8s_node_fips" {
 variable "bastion_fips" {
   type = "list"
 }
+
+variable "supplementary_master_groups" {
+  default = ""
+}
diff --git a/contrib/terraform/openstack/variables.tf b/contrib/terraform/openstack/variables.tf
index 925750ab1b335781c165aa60bc72332d42138a82..d49746c9204728b837b53dbe30b6e2011613fc8d 100644
--- a/contrib/terraform/openstack/variables.tf
+++ b/contrib/terraform/openstack/variables.tf
@@ -111,3 +111,8 @@ variable "floatingip_pool" {
 variable "external_net" {
   description = "uuid of the external/public network"
 }
+
+variable "supplementary_master_groups" {
+  description = "supplementary kubespray ansible groups for masters, such kube-node"
+  default = ""
+}
diff --git a/docs/calico.md b/docs/calico.md
index 7992e57eb6cf2270114ef146b4681291048a4a1a..b8cdc90cbec616afbff9f18669aa5467a0e82de4 100644
--- a/docs/calico.md
+++ b/docs/calico.md
@@ -169,3 +169,12 @@ By default the felix agent(calico-node) will abort if the Kernel RPF setting is
 ```
 calico_node_ignorelooserpf: true
 ```
+
+Note that in OpenStack you must allow `ipip` traffic in your security groups,
+otherwise you will experience timeouts.
+To do this you must add a rule which allows it, for example:
+
+```
+neutron  security-group-rule-create  --protocol 4  --direction egress  k8s-a0tp4t
+neutron  security-group-rule-create  --protocol 4  --direction igress  k8s-a0tp4t
+```
diff --git a/docs/img/kubernetes-logo.png b/docs/img/kubernetes-logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..2838a1829ff3588df5e0d8110a62703031be03ee
Binary files /dev/null and b/docs/img/kubernetes-logo.png differ
diff --git a/docs/netcheck.md b/docs/netcheck.md
index 80679cd7310201eba86380168e57a89559dad600..638e5cfced160da72796ec9a5ad867e1efa19e58 100644
--- a/docs/netcheck.md
+++ b/docs/netcheck.md
@@ -25,8 +25,8 @@ There are related application specifc variables:
 netchecker_port: 31081
 agent_report_interval: 15
 netcheck_namespace: default
-agent_img: "quay.io/l23network/k8s-netchecker-agent:v1.0"
-server_img: "quay.io/l23network/k8s-netchecker-server:v1.0"
+agent_img: "mirantis/k8s-netchecker-agent:v1.2.2"
+server_img: "mirantis/k8s-netchecker-server:v1.2.2"
 ```
 
 Note that the application verifies DNS resolve for FQDNs comprising only the
diff --git a/docs/opensuse.md b/docs/opensuse.md
new file mode 100644
index 0000000000000000000000000000000000000000..88fac3790e36a3a0865344e34ab5dfcf29875081
--- /dev/null
+++ b/docs/opensuse.md
@@ -0,0 +1,19 @@
+openSUSE Leap 42.3 and Tumbleweed
+===============
+
+openSUSE Leap installation Notes:
+
+- Install Ansible
+
+  ```
+  sudo zypper ref
+  sudo zypper -n install ansible
+
+  ```
+
+- Install Jinja2 and Python-Netaddr
+
+  ```sudo zypper -n install python-Jinja2 python-netaddr```
+
+
+Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
diff --git a/extra_playbooks/build-cephfs-provisioner.yml b/extra_playbooks/build-cephfs-provisioner.yml
index 6a72a076e91f3666c85b1b1d7eed3aebdfe650d1..267c724eeb18e32a97df4a1ee8a84c0b7781b9d9 100644
--- a/extra_playbooks/build-cephfs-provisioner.yml
+++ b/extra_playbooks/build-cephfs-provisioner.yml
@@ -8,8 +8,8 @@
         version: "{{ item.version }}"
         state: "{{ item.state }}"
       with_items:
-        - { state: "present", name: "docker", version: "2.7.0" }
-        - { state: "present", name: "docker-compose", version: "1.18.0" }
+        - { state: "present", name: "docker", version: "3.2.1" }
+        - { state: "present", name: "docker-compose", version: "1.21.0" }
 
     - name: CephFS Provisioner | Check Go version
       shell: |
@@ -36,18 +36,18 @@
           git:
             repo: https://github.com/kubernetes-incubator/external-storage.git
             dest: "~/go/src/github.com/kubernetes-incubator"
-            version: 92295a30
+            version: a71a49d4
             clone: no
             update: yes
             
         - name: CephFS Provisioner | Build image
           shell: |
             cd ~/go/src/github.com/kubernetes-incubator/external-storage
-            REGISTRY=quay.io/kubespray/ VERSION=92295a30 make ceph/cephfs
+            REGISTRY=quay.io/kubespray/ VERSION=a71a49d4 make ceph/cephfs
 
         - name: CephFS Provisioner | Push image
           docker_image:
-            name: quay.io/kubespray/cephfs-provisioner:92295a30
+            name: quay.io/kubespray/cephfs-provisioner:a71a49d4
             push: yes
           retries: 10
 
diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml
index 031108767faf692119ee1d9715f88091bc1d78a8..13a7ddff5cc46e093c751a88c797115201a8fff3 100644
--- a/inventory/sample/group_vars/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s-cluster.yml
@@ -1,8 +1,8 @@
 # Kubernetes configuration dirs and system namespace.
 # Those are where all the additional config stuff goes
-# the kubernetes normally puts in /srv/kubernets.
+# the kubernetes normally puts in /srv/kubernetes.
 # This puts them in a sane location and namespace.
-# Editting those values will almost surely break something.
+# Editing those values will almost surely break something.
 kube_config_dir: /etc/kubernetes
 kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
 kube_manifest_dir: "{{ kube_config_dir }}/manifests"
@@ -28,7 +28,7 @@ local_release_dir: "/tmp/releases"
 retry_stagger: 5
 
 # This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
+# cert files to. Not really changeable...
 kube_cert_group: kube-cert
 
 # Cluster Loglevel configuration
@@ -58,7 +58,9 @@ kube_users:
 ## Optional settings for OIDC
 # kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
 # kube_oidc_username_claim: sub
+# kube_oidc_username_prefix: oidc:
 # kube_oidc_groups_claim: groups
+# kube_oidc_groups_prefix: oidc:
 
 
 # Choose network plugin (cilium, calico, contiv, weave or flannel)
@@ -162,15 +164,9 @@ dashboard_enabled: true
 # Monitoring apps for k8s
 efk_enabled: false
 
-# Helm deployment. Needs for Prometheus Operator, k8s metrics.
+# Helm deployment
 helm_enabled: false
 
-# Prometheus Operator. Needs for k8s metrics. Installed Helm is required.
-prometheus_operator_enabled: false
-
-# K8s cluster metrics. Installed Helm and Prometheus Operator are required.
-k8s_metrics_enabled: false
-
 # Istio deployment
 istio_enabled: false
 
diff --git a/inventory/sample/hosts.ini b/inventory/sample/hosts.ini
index 24578333471f187456123c2228625e97cd87f9f9..bddfa2f80a2f28f2319745b8a17f0bf7a3ed4d33 100644
--- a/inventory/sample/hosts.ini
+++ b/inventory/sample/hosts.ini
@@ -1,14 +1,14 @@
 # ## Configure 'ip' variable to bind kubernetes services on a
 # ## different ip than the default iface
-# node1 ansible_ssh_host=95.54.0.12  # ip=10.3.0.1
-# node2 ansible_ssh_host=95.54.0.13  # ip=10.3.0.2
-# node3 ansible_ssh_host=95.54.0.14  # ip=10.3.0.3
-# node4 ansible_ssh_host=95.54.0.15  # ip=10.3.0.4
-# node5 ansible_ssh_host=95.54.0.16  # ip=10.3.0.5
-# node6 ansible_ssh_host=95.54.0.17  # ip=10.3.0.6
+# node1 ansible_host=95.54.0.12  # ip=10.3.0.1
+# node2 ansible_host=95.54.0.13  # ip=10.3.0.2
+# node3 ansible_host=95.54.0.14  # ip=10.3.0.3
+# node4 ansible_host=95.54.0.15  # ip=10.3.0.4
+# node5 ansible_host=95.54.0.16  # ip=10.3.0.5
+# node6 ansible_host=95.54.0.17  # ip=10.3.0.6
 
 # ## configure a bastion host if your nodes are not directly reachable
-# bastion ansible_ssh_host=x.x.x.x
+# bastion ansible_host=x.x.x.x ansible_user=some_user
 
 # [kube-master]
 # node1
diff --git a/roles/bootstrap-os/tasks/bootstrap-coreos.yml b/roles/bootstrap-os/tasks/bootstrap-coreos.yml
index 428065eba2074492012dfb1ac165672f7a40d5b7..be0030538de1df15a8d20509d93250f1543bc506 100644
--- a/roles/bootstrap-os/tasks/bootstrap-coreos.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-coreos.yml
@@ -22,7 +22,6 @@
   failed_when: false
   changed_when: false
   check_mode: no
-  when: need_bootstrap.rc != 0
   tags:
     - facts
 
@@ -30,24 +29,24 @@
   copy:
     src: get-pip.py
     dest: ~/get-pip.py
-  when: need_pip != 0
+  when: need_pip.rc != 0
 
 - name: Bootstrap | Install pip
   shell: "{{ansible_python_interpreter}} ~/get-pip.py"
-  when: need_pip != 0
+  when: need_pip.rc != 0
 
 - name: Bootstrap | Remove get-pip.py
   file:
     path: ~/get-pip.py
     state: absent
-  when: need_pip != 0
+  when: need_pip.rc != 0
 
 - name: Bootstrap | Install pip launcher
   copy:
     src: runner
     dest: /opt/bin/pip
     mode: 0755
-  when: need_pip != 0
+  when: need_pip.rc != 0
 
 - name: Install required python modules
   pip:
diff --git a/roles/bootstrap-os/tasks/bootstrap-opensuse.yml b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml
new file mode 100644
index 0000000000000000000000000000000000000000..abedd2195f84e9dd192e5c3adc113c4af282c324
--- /dev/null
+++ b/roles/bootstrap-os/tasks/bootstrap-opensuse.yml
@@ -0,0 +1,7 @@
+---
+- name: Install required packages (SUSE)
+  package:
+    name: "{{ item }}"
+    state: present
+  with_items:
+    - python-cryptography
diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml
index 01031deeb807fae8c70146e00c4060720ad4c1ec..c921b643e0e43c259e073bcc337c25ab6ab83b60 100644
--- a/roles/bootstrap-os/tasks/main.yml
+++ b/roles/bootstrap-os/tasks/main.yml
@@ -11,6 +11,9 @@
 - import_tasks: bootstrap-centos.yml
   when: bootstrap_os == "centos"
 
+- import_tasks: bootstrap-opensuse.yml
+  when: bootstrap_os == "opensuse"
+
 - import_tasks: setup-pipelining.yml
 
 - name: check if atomic host
@@ -26,18 +29,25 @@
     gather_subset: '!all'
     filter: ansible_*
 
-- name: Assign inventory name to unconfigured hostnames (non-CoreOS)
+- name: Assign inventory name to unconfigured hostnames (non-CoreOS and Tumbleweed)
   hostname:
     name: "{{inventory_hostname}}"
-  when: ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS'] and override_system_hostname
+  when:
+    - override_system_hostname
+    - ansible_distribution not in ['openSUSE Tumbleweed']
+    - ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS']
 
-- name: Assign inventory name to unconfigured hostnames (CoreOS only)
+- name: Assign inventory name to unconfigured hostnames (CoreOS and Tumbleweed only)
   command: "hostnamectl set-hostname  {{inventory_hostname}}"
   register: hostname_changed
-  when: ansible_hostname == 'localhost' and ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and override_system_hostname
+  when:
+    - ansible_hostname == 'localhost'
+    - ansible_distribution in ['openSUSE Tumbleweed'] or ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
+    - override_system_hostname
 
-- name: Update hostname fact (CoreOS only)
+- name: Update hostname fact (CoreOS and Tumbleweed only)
   setup:
     gather_subset: '!all'
     filter: ansible_hostname
-  when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and hostname_changed.changed
+  when:
+    - hostname_changed.changed
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 729397b449635a607c6faf318722c5f987da4fd1..3668f61b8dfb636755d8b86a5bbcea7875d0f861 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -15,6 +15,14 @@
   tags:
     - facts
 
+# https://yum.dockerproject.org/repo/main/opensuse/ contains packages for an EOL
+# openSUSE version so we can't use it. The only alternative is to use the docker
+# packages from the distribution repositories.
+- name: Warn about Docker version on SUSE
+  debug:
+    msg: "SUSE distributions always install Docker from the distro repos"
+  when: ansible_pkg_mgr == 'zypper'
+
 - include_tasks: set_facts_dns.yml
   when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
   tags:
@@ -43,7 +51,7 @@
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   with_items: "{{ docker_repo_key_info.repo_keys }}"
-  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic)
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic)
 
 - name: ensure docker-ce repository is enabled
   action: "{{ docker_repo_info.pkg_repo }}"
@@ -51,7 +59,7 @@
     repo: "{{item}}"
     state: present
   with_items: "{{ docker_repo_info.repos }}"
-  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (docker_repo_info.repos|length > 0)
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (docker_repo_info.repos|length > 0)
 
 - name: ensure docker-engine repository public key is installed
   action: "{{ dockerproject_repo_key_info.pkg_key }}"
@@ -64,7 +72,7 @@
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   with_items: "{{ dockerproject_repo_key_info.repo_keys }}"
-  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic)
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic)
 
 - name: ensure docker-engine repository is enabled
   action: "{{ dockerproject_repo_info.pkg_repo }}"
@@ -72,7 +80,7 @@
     repo: "{{item}}"
     state: present
   with_items: "{{ dockerproject_repo_info.repos }}"
-  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (dockerproject_repo_info.repos|length > 0)
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (dockerproject_repo_info.repos|length > 0)
 
 - name: Configure docker repository on RedHat/CentOS
   template:
@@ -110,6 +118,12 @@
   notify: restart docker
   when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_package_info.pkgs|length > 0)
 
+- name: ensure service is started if docker packages are already present
+  service:
+    name: docker
+    state: started
+  when: docker_task_result is not changed
+
 - name: flush handlers so we can wait for docker to come up
   meta: flush_handlers
 
diff --git a/roles/docker/tasks/pre-upgrade.yml b/roles/docker/tasks/pre-upgrade.yml
index 9315da30535fb3277b8cbcf2251942fa9f861604..8b75cba0ddef97518e20b85ee3551a61c6f7548c 100644
--- a/roles/docker/tasks/pre-upgrade.yml
+++ b/roles/docker/tasks/pre-upgrade.yml
@@ -6,7 +6,9 @@
   with_items:
     - docker
     - docker-engine
-  when: ansible_os_family == 'Debian' and (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
+  when:
+    - ansible_os_family == 'Debian'
+    - (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
 
 - name: Ensure old versions of Docker are not installed. | RedHat
   package:
@@ -17,4 +19,7 @@
     - docker-common
     - docker-engine
     - docker-selinux
-  when: ansible_os_family == 'RedHat' and (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
\ No newline at end of file
+  when:
+    - ansible_os_family == 'RedHat'
+    - (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
+    - not is_atomic
diff --git a/roles/docker/templates/docker.service.j2 b/roles/docker/templates/docker.service.j2
index d8efe202546dac187a44d4d7ba575fef27d66a98..8dc82bbb205acfc3a80bbd132f71290dccb7dbf6 100644
--- a/roles/docker/templates/docker.service.j2
+++ b/roles/docker/templates/docker.service.j2
@@ -7,6 +7,9 @@ Wants=docker-storage-setup.service
 {% elif ansible_os_family == "Debian" %}
 After=network.target docker.socket
 Wants=docker.socket
+{% elif ansible_os_family == "Suse" %}
+After=network.target containerd.socket containerd.service
+Requires=containerd.socket containerd.service
 {% endif %}
 
 [Service]
@@ -19,6 +22,9 @@ ExecReload=/bin/kill -s HUP $MAINPID
 Delegate=yes
 KillMode=process
 ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout|version_compare('17.03', '<') %} daemon{% else %}d{% endif %} \
+{% if ansible_os_family == "Suse" %}
+          --containerd /run/containerd/containerd.sock --add-runtime oci=/usr/bin/docker-runc \
+{% endif %}
           $DOCKER_OPTS \
           $DOCKER_STORAGE_OPTIONS \
           $DOCKER_NETWORK_OPTIONS \
diff --git a/roles/docker/vars/suse.yml b/roles/docker/vars/suse.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d89a50a7f4bdfc98c7223a7b843fa6b61ef23cda
--- /dev/null
+++ b/roles/docker/vars/suse.yml
@@ -0,0 +1,15 @@
+---
+docker_kernel_min_version: '0'
+
+docker_package_info:
+  pkg_mgr: zypper
+  pkgs:
+    - name: docker
+
+docker_repo_key_info:
+  pkg_key: ''
+  repo_keys: []
+
+docker_repo_info:
+  pkg_repo: ''
+  repos: []
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index 725fc0bbd7b0f2ac1e76a1524d6b95553c2b5af6..bcdd7295f60511723717a946306ea5a35101d4e7 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -38,7 +38,7 @@ flannel_version: "v0.10.0"
 flannel_cni_version: "v0.3.0"
 istio_version: "0.2.6"
 vault_version: 0.8.1
-weave_version: 2.2.1
+weave_version: 2.3.0
 pod_infra_version: 3.0
 contiv_version: 1.1.7
 cilium_version: "v1.0.0-rc8"
@@ -70,16 +70,32 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
 calico_policy_image_tag: "{{ calico_policy_version }}"
 calico_rr_image_repo: "quay.io/calico/routereflector"
 calico_rr_image_tag: "{{ calico_rr_version }}"
+istio_proxy_image_repo: docker.io/istio/proxy
+istio_proxy_image_tag: "{{ istio_version }}"
+istio_proxy_init_image_repo: docker.io/istio/proxy_init
+istio_proxy_init_image_tag: "{{ istio_version }}"
+istio_ca_image_repo: docker.io/istio/istio-ca
+istio_ca_image_tag: "{{ istio_version }}"
+istio_mixer_image_repo: docker.io/istio/mixer
+istio_mixer_image_tag: "{{ istio_version }}"
+istio_pilot_image_repo: docker.io/istio/pilot
+istio_pilot_image_tag: "{{ istio_version }}"
+istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
+istio_proxy_debug_image_tag: "{{ istio_version }}"
+istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
+istio_sidecar_initializer_image_tag: "{{ istio_version }}"
+istio_statsd_image_repo: prom/statsd-exporter
+istio_statsd_image_tag: latest
 hyperkube_image_repo: "gcr.io/google-containers/hyperkube"
 hyperkube_image_tag: "{{ kube_version }}"
 pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
 pod_infra_image_tag: "{{ pod_infra_version }}"
 install_socat_image_repo: "xueshanf/install-socat"
 install_socat_image_tag: "latest"
-netcheck_version: "v1.0"
-netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent"
+netcheck_version: "v1.2.2"
+netcheck_agent_img_repo: "mirantis/k8s-netchecker-agent"
 netcheck_agent_tag: "{{ netcheck_version }}"
-netcheck_server_img_repo: "quay.io/l23network/k8s-netchecker-server"
+netcheck_server_img_repo: "mirantis/k8s-netchecker-server"
 netcheck_server_tag: "{{ netcheck_version }}"
 weave_kube_image_repo: "weaveworks/weave-kube"
 weave_kube_image_tag: "{{ weave_version }}"
@@ -134,13 +150,15 @@ registry_image_repo: "registry"
 registry_image_tag: "2.6"
 registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy"
 registry_proxy_image_tag: "0.4"
+local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
+local_volume_provisioner_image_tag: "v2.0.0"
 cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner"
-cephfs_provisioner_image_tag: "92295a30"
+cephfs_provisioner_image_tag: "a71a49d4"
 ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
-ingress_nginx_controller_image_tag: "0.11.0"
+ingress_nginx_controller_image_tag: "0.14.0"
 ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
 ingress_nginx_default_backend_image_tag: "1.4"
-cert_manager_version: "v0.2.3"
+cert_manager_version: "v0.2.4"
 cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
 cert_manager_controller_image_tag: "{{ cert_manager_version }}"
 cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
@@ -197,6 +215,70 @@ downloads:
     mode: "0755"
     groups:
       - kube-master
+  istio_proxy:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_proxy_image_repo }}"
+    tag: "{{ istio_proxy_image_tag }}"
+    sha256: "{{ istio_proxy_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_proxy_init:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_proxy_init_image_repo }}"
+    tag: "{{ istio_proxy_init_image_tag }}"
+    sha256: "{{ istio_proxy_init_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_ca:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_ca_image_repo }}"
+    tag: "{{ istio_ca_image_tag }}"
+    sha256: "{{ istio_ca_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_mixer:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_mixer_image_repo }}"
+    tag: "{{ istio_mixer_image_tag }}"
+    sha256: "{{ istio_mixer_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_pilot:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_pilot_image_repo }}"
+    tag: "{{ istio_pilot_image_tag }}"
+    sha256: "{{ istio_pilot_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_proxy_debug:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_proxy_debug_image_repo }}"
+    tag: "{{ istio_proxy_debug_image_tag }}"
+    sha256: "{{ istio_proxy_debug_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_sidecar_initializer:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_sidecar_initializer_image_repo }}"
+    tag: "{{ istio_sidecar_initializer_image_tag }}"
+    sha256: "{{ istio_sidecar_initializer_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
+  istio_statsd:
+    enabled: "{{ istio_enabled }}"
+    container: true
+    repo: "{{ istio_statsd_image_repo }}"
+    tag: "{{ istio_statsd_image_tag }}"
+    sha256: "{{ istio_statsd_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   hyperkube:
     enabled: true
     container: true
@@ -451,6 +533,14 @@ downloads:
     sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
     groups:
       - kube-node
+  local_volume_provisioner:
+    enabled: "{{ local_volume_provisioner_enabled }}"
+    container: true
+    repo: "{{ local_volume_provisioner_image_repo }}"
+    tag: "{{ local_volume_provisioner_image_tag }}"
+    sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
+    groups:
+      - kube-node
   cephfs_provisioner:
     enabled: "{{ cephfs_provisioner_enabled }}"
     container: true
diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml
index a5659619c11e384c41f04e7cc8306b770c042c3d..7e3923606fb44f153b28bc078d8d12de443d2f1d 100644
--- a/roles/download/tasks/download_container.yml
+++ b/roles/download/tasks/download_container.yml
@@ -2,12 +2,11 @@
 - name: container_download | Make download decision if pull is required by tag or sha256
   include_tasks: set_docker_image_facts.yml
   delegate_to: "{{ download_delegate if download_run_once or omit }}"
-  delegate_facts: no
+  delegate_facts: yes
   run_once: "{{ download_run_once }}"
   when:
     - download.enabled
     - download.container
-    - group_names | intersect(download.groups) | length
   tags:
     - facts
 
@@ -24,7 +23,6 @@
     - download.enabled
     - download.container
     - pull_required|default(download_always_pull)
-    - group_names | intersect(download.groups) | length
   delegate_to: "{{ download_delegate }}"
   delegate_facts: yes
   run_once: yes
diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml
index c6e910e5df52a46fa520c30da0e910fbf7130381..2474b40290209fee09921add68fd67445300d19a 100644
--- a/roles/download/tasks/main.yml
+++ b/roles/download/tasks/main.yml
@@ -22,3 +22,4 @@
     - item.value.enabled
     - item.value.container
     - download_run_once
+    - group_names | intersect(download.groups) | length
diff --git a/roles/download/tasks/sync_container.yml b/roles/download/tasks/sync_container.yml
index 1ca84ad671d7ef623f34476b616f074dc395d83f..c7e37d7f3d69e5142f629de66d67358f9be9da4a 100644
--- a/roles/download/tasks/sync_container.yml
+++ b/roles/download/tasks/sync_container.yml
@@ -7,7 +7,6 @@
   when:
     - download.enabled
     - download.container
-    - group_names | intersect(download.groups) | length
   tags:
     - facts
 
@@ -18,7 +17,7 @@
     - download.enabled
     - download.container
     - download_run_once
-    - group_names | intersect(download.groups) | length
+
   tags:
     - facts
 
@@ -29,7 +28,6 @@
     - download.enabled
     - download.container
     - download_run_once
-    - group_names | intersect(download.groups) | length
 
 - name: "container_download | Update the 'container_changed' fact"
   set_fact:
@@ -39,14 +37,13 @@
     - download.container
     - download_run_once
     - pull_required|default(download_always_pull)
-    - group_names | intersect(download.groups) | length
   run_once: "{{ download_run_once }}"
   tags:
     - facts
 
 - name: container_download | Stat saved container image
   stat:
-    path: "{{fname}}"
+    path: "{{ fname }}"
   register: img
   changed_when: false
   delegate_to: "{{ download_delegate }}"
@@ -57,7 +54,6 @@
     - download.enabled
     - download.container
     - download_run_once
-    - group_names | intersect(download.groups) | length
   tags:
     - facts
 
@@ -73,7 +69,6 @@
     - download_run_once
     - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost")
     - (container_changed or not img.stat.exists)
-    - group_names | intersect(download.groups) | length
 
 - name: container_download | copy container images to ansible host
   synchronize:
@@ -93,7 +88,6 @@
     - inventory_hostname == download_delegate
     - download_delegate != "localhost"
     - saved.changed
-    - group_names | intersect(download.groups) | length
 
 - name: container_download | upload container images to nodes
   synchronize:
@@ -115,7 +109,6 @@
     - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
       inventory_hostname != download_delegate or
       download_delegate == "localhost")
-    - group_names | intersect(download.groups) | length
   tags:
     - upload
     - upgrade
@@ -128,7 +121,6 @@
     - download_run_once
     - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
       inventory_hostname != download_delegate or download_delegate == "localhost")
-    - group_names | intersect(download.groups) | length
   tags:
     - upload
     - upgrade
diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml
index 209b401fb21acadfd8874f926fd1928429239f6f..aff1c9b59860f31fcdcd0b0bb8c38d6ac8304518 100644
--- a/roles/etcd/defaults/main.yml
+++ b/roles/etcd/defaults/main.yml
@@ -32,6 +32,12 @@ etcd_election_timeout: "5000"
 
 etcd_metrics: "basic"
 
+## A dictionary of extra environment variables to add to etcd.env, formatted like:
+##  etcd_extra_vars:
+##    ETCD_VAR1: "value1"
+##    ETCD_VAR2: "value2"
+etcd_extra_vars: {}
+
 # Limits
 # Limit memory only if <4GB memory on host. 0=unlimited
 etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %}"
diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml
index dd36554fb95a3fcc643cf51823f96919b2373a2e..0ff3638601b73e6fb56b07c5985ff4a85efb0dc6 100644
--- a/roles/etcd/tasks/upd_ca_trust.yml
+++ b/roles/etcd/tasks/upd_ca_trust.yml
@@ -8,6 +8,8 @@
       /etc/pki/ca-trust/source/anchors/etcd-ca.crt
       {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
       /etc/ssl/certs/etcd-ca.pem
+      {%- elif ansible_os_family == "Suse" -%}
+      /etc/pki/trust/anchors/etcd-ca.pem
       {%- endif %}
   tags:
     - facts
@@ -19,9 +21,9 @@
     remote_src: true
   register: etcd_ca_cert
 
-- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS)
+- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
   command: update-ca-certificates
-  when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"]
+  when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"]
 
 - name: Gen_certs | update ca-certificates (RedHat)
   command: update-ca-trust extract
diff --git a/roles/etcd/templates/etcd.env.j2 b/roles/etcd/templates/etcd.env.j2
index 178366d006f0ae05afe76baddbfc028e522fa313..c18fb413260682f3cd306ebe62dc8ef62fc4eea6 100644
--- a/roles/etcd/templates/etcd.env.j2
+++ b/roles/etcd/templates/etcd.env.j2
@@ -27,3 +27,7 @@ ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
 ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem
 ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem
 ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }}
+
+{% for key, value in etcd_extra_vars.iteritems() %}
+{{ key }}={{ value }}
+{% endfor %}
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2
index 7a8c1d2731d6c4583745dd7ca5847e914930843e..19bdc8b1fa1aa61b1bbdcb4a3fcc58be6d4e5d25 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2
@@ -7,3 +7,6 @@ rules:
   - apiGroups: [""]
     resources: ["pods"]
     verbs: ["list"]
+  - apiGroups: ["apiextensions.k8s.io"]
+    resources: ["customresourcedefinitions"]
+    verbs: ['*']
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index fefa7caeb078ff96c54d84ee4e18373dca693bb9..0511b7be52240987773c24131c5c03c691a7cccd 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -104,6 +104,7 @@
     - rbac_enabled
     - cloud_provider is defined
     - cloud_provider == 'vsphere'
+    - vsphere_cloud_provider.rc is defined
     - vsphere_cloud_provider.rc != 0
     - kube_version | version_compare('v1.9.0', '>=')
     - kube_version | version_compare('v1.9.3', '<=')
@@ -121,6 +122,7 @@
     - rbac_enabled
     - cloud_provider is defined
     - cloud_provider == 'vsphere'
+    - vsphere_cloud_provider.rc is defined
     - vsphere_cloud_provider.rc != 0
     - kube_version | version_compare('v1.9.0', '>=')
     - kube_version | version_compare('v1.9.3', '<=')
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
index ee2eb8b21484fc6d6885ab7967c6df0992fbe8e3..4cdcf33ad0dbe515cfba3eb085b08242ecea91ea 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
@@ -30,12 +30,12 @@ spec:
           limits:
             cpu: {{ elasticsearch_cpu_limit }}
 {% if elasticsearch_mem_limit is defined and elasticsearch_mem_limit != "0M" %}
-            mem: {{ elasticsearch_mem_limit }}
+            memory: "{{ elasticsearch_mem_limit }}"
 {% endif %}
           requests:
             cpu: {{ elasticsearch_cpu_requests }}
 {% if elasticsearch_mem_requests is defined and elasticsearch_mem_requests != "0M" %}
-            mem: {{ elasticsearch_mem_requests }}
+            memory: "{{ elasticsearch_mem_requests }}"
 {% endif %}
         ports:
         - containerPort: 9200
diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
index 4fdf54c042112941fdee778f9cb3ebe42bcc9674..c5603d389a86b86d3904b9bacd42889c6bc14a94 100644
--- a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
+++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
@@ -26,12 +26,12 @@ spec:
           limits:
             cpu: {{ kibana_cpu_limit }}
 {% if kibana_mem_limit is defined and kibana_mem_limit != "0M" %}
-            mem: {{ kibana_mem_limit }}
+            memory: "{{ kibana_mem_limit }}"
 {% endif %}
           requests:
             cpu: {{ kibana_cpu_requests }}
 {% if kibana_mem_requests is defined and kibana_mem_requests != "0M" %}
-            mem: {{ kibana_mem_requests }}
+            memory: "{{ kibana_mem_requests }}"
 {% endif %}
         env:
           - name: "ELASTICSEARCH_URL"
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml
index ea5dcb079b486cb1d37f108e5b46e4b850b71594..4b18546d32d5ab624d0bcf2afefc32735dd9070e 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml
@@ -1,7 +1,4 @@
 ---
-local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
-local_volume_provisioner_image_tag: v2.0.0
-
 local_volume_provisioner_namespace: "kube-system"
 local_volume_provisioner_base_dir: /mnt/disks
 local_volume_provisioner_mount_dir: /mnt/disks
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2
index 48d0c5b49ea947e6673da6261dcaad4be950a1b7..0d27800b3feeb027cf615c90f90c4b243b77dff4 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2
@@ -5,7 +5,7 @@ metadata:
   name: certificates.certmanager.k8s.io
   labels:
     app: cert-manager
-    chart: cert-manager-0.2.5
+    chart: cert-manager-0.2.8
     release: cert-manager
     heritage: Tiller
 spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2
index 86601e098d1f3f68f4d0559cf8dc5867738926c0..8ac64e35f18b1041c8ff87fe82f8e6b9606d7bd2 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2
@@ -5,7 +5,7 @@ metadata:
   name: clusterissuers.certmanager.k8s.io
   labels:
     app: cert-manager
-    chart: cert-manager-0.2.5
+    chart: cert-manager-0.2.8
     release: cert-manager
     heritage: Tiller
 spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2
index 9d36de5cb123f678c2956358ccabd138a2aacc25..ce6aa48bfe4753bdf835e892fcd40b17dee0e0b8 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2
@@ -5,7 +5,7 @@ metadata:
   name: cert-manager
   labels:
     app: cert-manager
-    chart: cert-manager-0.2.5
+    chart: cert-manager-0.2.8
     release: cert-manager
     heritage: Tiller
 rules:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2
index d0e481c6cc8aa4e2b386f11d20bbd2a8a76d0ea5..d1e26e46298eddc474d01cc2ec8c529312512778 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2
@@ -5,7 +5,7 @@ metadata:
   name: cert-manager
   labels:
     app: cert-manager
-    chart: cert-manager-0.2.5
+    chart: cert-manager-0.2.8
     release: cert-manager
     heritage: Tiller
 roleRef:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2
index ef66bef0523d395e81ef3811b8913f959a3f9ce3..7fe98407bfc9a04bc0e32d2f48a10a51dbbaf44b 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2
@@ -6,7 +6,7 @@ metadata:
   namespace: {{ cert_manager_namespace }}
   labels:
     app: cert-manager
-    chart: cert-manager-0.2.5
+    chart: cert-manager-0.2.8
     release: cert-manager
     heritage: Tiller
 spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2
index 7e344d9f9a666f043f6a6cb3f3e3a89f5670446c..a11386d10d9c7e788a2f19f834b67ea196be2489 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2
@@ -5,7 +5,7 @@ metadata:
   name: issuers.certmanager.k8s.io
   labels:
     app: cert-manager
-    chart: cert-manager-0.2.5
+    chart: cert-manager-0.2.8
     release: cert-manager
     heritage: Tiller
 spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2
index ccdd5f430c118fa7b37d3f923aa324bb77a92781..1a67bf6a41878b55be111f3f50df46865d763127 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2
@@ -6,6 +6,6 @@ metadata:
   namespace: {{ cert_manager_namespace }}
   labels:
     app: cert-manager
-    chart: cert-manager-0.2.5
+    chart: cert-manager-0.2.8
     release: cert-manager
     heritage: Tiller
diff --git a/roles/kubernetes-apps/istio/defaults/main.yml b/roles/kubernetes-apps/istio/defaults/main.yml
index dc51ea7d67f33cb13661f4b84cbbb002166f5dd3..6124ce42ed804337b7476ba0c759639091a7cd12 100644
--- a/roles/kubernetes-apps/istio/defaults/main.yml
+++ b/roles/kubernetes-apps/istio/defaults/main.yml
@@ -1,32 +1,2 @@
 ---
-istio_enabled: false
-
 istio_namespace: istio-system
-istio_version: "0.2.6"
-
-istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
-istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
-
-istio_proxy_image_repo: docker.io/istio/proxy
-istio_proxy_image_tag: "{{ istio_version }}"
-
-istio_proxy_init_image_repo: docker.io/istio/proxy_init
-istio_proxy_init_image_tag: "{{ istio_version }}"
-
-istio_ca_image_repo: docker.io/istio/istio-ca
-istio_ca_image_tag: "{{ istio_version }}"
-
-istio_mixer_image_repo: docker.io/istio/mixer
-istio_mixer_image_tag: "{{ istio_version }}"
-
-istio_pilot_image_repo: docker.io/istio/pilot
-istio_pilot_image_tag: "{{ istio_version }}"
-
-istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
-istio_proxy_debug_image_tag: "{{ istio_version }}"
-
-istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
-istio_sidecar_initializer_image_tag: "{{ istio_version }}"
-
-istio_statsd_image_repo: prom/statsd-exporter
-istio_statsd_image_tag: latest
diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml
index bc05e6f8c8b26ca3baa479e2d88a00f8f5ad2570..fca51a3b6b8b7ccc57f87119bc8617c4990dab30 100644
--- a/roles/kubernetes-apps/meta/main.yml
+++ b/roles/kubernetes-apps/meta/main.yml
@@ -27,12 +27,6 @@ dependencies:
       - apps
       - registry
 
-  - role: kubernetes-apps/metrics
-    when: prometheus_operator_enabled
-    tags:
-      - apps
-      - metrics
-
   # istio role should be last because it takes a long time to initialize and
   # will cause timeouts trying to start other addons.
   - role: kubernetes-apps/istio
diff --git a/roles/kubernetes-apps/metrics/defaults/main.yml b/roles/kubernetes-apps/metrics/defaults/main.yml
deleted file mode 100644
index 72018e6f5f58f6b04921e09c6000ae39d988a386..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/metrics/defaults/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# Prometheus Operator. Needs for k8s metrics. Installed Helm is required.
-prometheus_operator_enabled: false
-
-# K8s cluster metrics. Installed Helm and Prometheus Operators are required.
-k8s_metrics_enabled: false
-
-# Separate namespace for monitoring/metrics
-monitoring_namespace: "monitoring"
diff --git a/roles/kubernetes-apps/metrics/tasks/main.yml b/roles/kubernetes-apps/metrics/tasks/main.yml
deleted file mode 100644
index e2280e98b3ec59c330223592471a493b71a953e8..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/metrics/tasks/main.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- name: Metrics | Make sure Helm is installed
-  command: "{{ bin_dir }}/helm version"
-  register: helm_ready_result
-  until: helm_ready_result|succeeded
-  retries: 4
-  delay: 5
-  when:
-    - prometheus_operator_enabled
-    - inventory_hostname == groups['kube-master'][0]
-
-- name: Metrics | Add coreos repo
-  command: "{{ bin_dir }}/helm repo add coreos https://s3-eu-west-1.amazonaws.com/coreos-charts/stable/"
-  when:
-    - prometheus_operator_enabled
-    - inventory_hostname == groups['kube-master'][0]
-  run_once: true
-
-- name: Metrics | Install Prometheus Operator
-  command: "{{ bin_dir }}/helm upgrade --install prometheus-operator coreos/prometheus-operator --namespace {{ monitoring_namespace }}"
-  when:
-    - prometheus_operator_enabled
-    - inventory_hostname == groups['kube-master'][0]
-  run_once: true
-
-- name: Metrics | Install K8s cluster metrics
-  command: "{{ bin_dir }}/helm upgrade --install kube-prometheus     coreos/kube-prometheus     --namespace {{ monitoring_namespace }}"
-  when:
-    - prometheus_operator_enabled
-    - k8s_metrics_enabled
-    - inventory_hostname == groups['kube-master'][0]
-  run_once: true
diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
index 3884a3a65336c0f22cf33f73e8d5392ee14ca1d8..2589b3610f30f067286fd129b796b30690c48544 100644
--- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
+++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
@@ -34,7 +34,7 @@
     {{ bin_dir }}/kubectl get secrets --all-namespaces
     -o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
     | grep kubernetes.io/service-account-token
-    | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller'
+    | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller|local-volume-provisioner'
   register: tokens_to_delete
   when: needs_rotation
 
diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml
index cf70b49954a1d43c3cf7d2a149bc973ef953db5e..d34131a3a35643b155f1ca52c445be28b6aa2732 100644
--- a/roles/kubernetes/client/tasks/main.yml
+++ b/roles/kubernetes/client/tasks/main.yml
@@ -55,7 +55,7 @@
 - name: Copy kubectl binary to ansible host
   fetch:
     src: "{{ bin_dir }}/kubectl"
-    dest: "{{ bin_dir }}/kubectl"
+    dest: "{{ artifacts_dir }}/kubectl"
     flat: yes
     validate_checksum: no
   become: no
@@ -68,8 +68,6 @@
       #!/bin/bash
       kubectl --kubeconfig=admin.conf $@
     dest: "{{ artifacts_dir }}/kubectl.sh"
-    owner: root
-    group: root
     mode: 0755
   become: no
   run_once: yes
diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml
index 6325bb31cedcdc7b4ddb194d3f72893426c822fe..52b04be503704fdc5d9c26afc513b2a75d2d3024 100644
--- a/roles/kubernetes/master/defaults/main.yml
+++ b/roles/kubernetes/master/defaults/main.yml
@@ -52,7 +52,7 @@ kube_apiserver_admission_control:
       {%- if kube_version | version_compare('v1.9', '<') -%}
       GenericAdmissionWebhook
       {%- else -%}
-      ValidatingAdmissionWebhook
+      MutatingAdmissionWebhook,ValidatingAdmissionWebhook
       {%- endif -%}
   - ResourceQuota
 
@@ -73,7 +73,9 @@ kube_oidc_auth: false
 ## Optional settings for OIDC
 # kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
 # kube_oidc_username_claim: sub
+# kube_oidc_username_prefix: oidc:
 # kube_oidc_groups_claim: groups
+# kube_oidc_groups_prefix: oidc:
 
 ## Variables for custom flags
 apiserver_custom_flags: []
diff --git a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
index 58eaaa66f7bf469b52aa873d96a7e815ea1e8ce6..83bfbb22ad59dea8eef811c7f017132695e3e898 100644
--- a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
@@ -9,6 +9,10 @@
     - {src: apiserver-key.pem, dest: apiserver.key}
     - {src: ca.pem, dest: ca.crt}
     - {src: ca-key.pem, dest: ca.key}
+    - {src: front-proxy-ca.pem, dest: front-proxy-ca.crt}
+    - {src: front-proxy-ca-key.pem, dest: front-proxy-ca.key}
+    - {src: front-proxy-client.pem, dest: front-proxy-client.crt}
+    - {src: front-proxy-client-key.pem, dest: front-proxy-client.key}
     - {src: service-account-key.pem, dest: sa.pub}
     - {src: service-account-key.pem, dest: sa.key}
   register: kubeadm_copy_old_certs
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
index 687ca415d3e21337e74d8e0007b0e0ef46777dec..b589a91766c6f1e6d0b815edb8a524730ad7cf3a 100644
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -73,9 +73,15 @@ spec:
 {%   if kube_oidc_username_claim is defined %}
     - --oidc-username-claim={{ kube_oidc_username_claim }}
 {%   endif %}
+{%   if kube_oidc_username_prefix is defined %}
+    - "--oidc-username-prefix={{ kube_oidc_username_prefix }}"
+{%   endif %}
 {%   if kube_oidc_groups_claim is defined %}
     - --oidc-groups-claim={{ kube_oidc_groups_claim }}
 {%   endif %}
+{%   if kube_oidc_groups_prefix is defined %}
+    - "--oidc-groups-prefix={{ kube_oidc_groups_prefix }}"
+{%   endif %}
 {% endif %}
     - --secure-port={{ kube_apiserver_port }}
     - --insecure-port={{ kube_apiserver_insecure_port }}
@@ -111,7 +117,7 @@ spec:
     - --feature-gates={{ kube_feature_gates|join(',') }}
 {% endif %}
 {% if kube_version | version_compare('v1.9', '>=') %}
-    - --requestheader-client-ca-file={{ kube_cert_dir }}/ca.pem
+    - --requestheader-client-ca-file={{ kube_cert_dir }}/front-proxy-ca.pem
     - --requestheader-allowed-names=front-proxy-client
     - --requestheader-extra-headers-prefix=X-Remote-Extra-
     - --requestheader-group-headers=X-Remote-Group
diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
index a4023365e885342c16cb3a1855082192edf70511..fee223eecfcfa94f0bfcf60890d2ac0fb4dab39d 100644
--- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
@@ -29,6 +29,7 @@ spec:
     - --leader-elect=true
     - --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml
 {% if volume_cross_zone_attachment %}
+    - --use-legacy-policy-config
     - --policy-config-file={{ kube_config_dir }}/kube-scheduler-policy.yaml
 {% endif %}
     - --profiling=false
diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml
index 2cbf56e1d7f8a75e09049e813a0423f352da52f1..35a364d211cd0fdb7fc8748c28dfcecf998f7d3c 100644
--- a/roles/kubernetes/node/defaults/main.yml
+++ b/roles/kubernetes/node/defaults/main.yml
@@ -92,3 +92,48 @@ kube_cadvisor_port: 0
 
 # The read-only port for the Kubelet to serve on with no authentication/authorization.
 kube_read_only_port: 0
+
+# sysctl_file_path to add sysctl conf to
+sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
+
+# For the openstack integration kubelet will need credentials to access
+# openstack apis like nova and cinder. Per default this values will be
+# read from the environment.
+openstack_auth_url: "{{ lookup('env','OS_AUTH_URL')  }}"
+openstack_username: "{{ lookup('env','OS_USERNAME')  }}"
+openstack_password: "{{ lookup('env','OS_PASSWORD')  }}"
+openstack_region: "{{ lookup('env','OS_REGION_NAME')  }}"
+openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true)) }}"
+openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
+openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
+
+# For the vsphere integration, kubelet will need credentials to access
+# vsphere apis
+# Documentation regarding these values can be found
+# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
+vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
+vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
+vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
+vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
+vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
+vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
+vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
+vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
+vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
+
+vsphere_scsi_controller_type: pvscsi
+# vsphere_public_network is name of the network the VMs are joined to
+vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
+
+## When azure is used, you need to also set the following variables.
+## see docs/azure.md for details on how to get these values
+#azure_tenant_id:
+#azure_subscription_id:
+#azure_aad_client_id:
+#azure_aad_client_secret:
+#azure_resource_group:
+#azure_location:
+#azure_subnet_name:
+#azure_security_group_name:
+#azure_vnet_name:
+#azure_route_table_name:
diff --git a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml b/roles/kubernetes/node/tasks/azure-credential-check.yml
similarity index 100%
rename from roles/kubernetes/preinstall/tasks/azure-credential-check.yml
rename to roles/kubernetes/node/tasks/azure-credential-check.yml
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 78e6d92d616b8f9c7b5895578c528bfcf9c6d849..f7520caf85c410ef0aad8410b9c3bbf211480148 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -61,6 +61,7 @@
     name: net.ipv4.ip_local_reserved_ports
     value: "{{ kube_apiserver_node_port_range }}"
     sysctl_set: yes
+    sysctl_file: "{{ sysctl_file_path }}"
     state: present
     reload: yes
   when: kube_apiserver_node_port_range is defined
@@ -96,6 +97,7 @@
   sysctl:
     name: "{{ item }}"
     state: present
+    sysctl_file: "{{ sysctl_file_path }}"
     value: 1
     reload: yes
   when: sysctl_bridge_nf_call_iptables.rc == 0
@@ -118,6 +120,19 @@
   tags:
     - kube-proxy
 
+- name: Persist ip_vs modules
+  copy:
+    dest: /etc/modules-load.d/kube_proxy-ipvs.conf
+    content: |
+      ip_vs
+      ip_vs_rr
+      ip_vs_wrr
+      ip_vs_sh
+      nf_conntrack_ipv4
+  when: kube_proxy_mode == 'ipvs'
+  tags:
+    - kube-proxy
+
 - name: Write proxy manifest
   template:
     src: manifests/kube-proxy.manifest.j2
@@ -134,6 +149,14 @@
   tags:
     - kube-proxy
 
+- include_tasks: "{{ cloud_provider }}-credential-check.yml"
+  when:
+    - cloud_provider is defined
+    - cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
+  tags:
+    - cloud-provider
+    - facts
+
 - name: Write cloud-config
   template:
     src: "{{ cloud_provider }}-cloud-config.j2"
diff --git a/roles/kubernetes/preinstall/tasks/openstack-credential-check.yml b/roles/kubernetes/node/tasks/openstack-credential-check.yml
similarity index 100%
rename from roles/kubernetes/preinstall/tasks/openstack-credential-check.yml
rename to roles/kubernetes/node/tasks/openstack-credential-check.yml
diff --git a/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml b/roles/kubernetes/node/tasks/vsphere-credential-check.yml
similarity index 100%
rename from roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml
rename to roles/kubernetes/node/tasks/vsphere-credential-check.yml
diff --git a/roles/kubernetes/preinstall/templates/azure-cloud-config.j2 b/roles/kubernetes/node/templates/azure-cloud-config.j2
similarity index 100%
rename from roles/kubernetes/preinstall/templates/azure-cloud-config.j2
rename to roles/kubernetes/node/templates/azure-cloud-config.j2
diff --git a/roles/kubernetes/node/templates/kubelet-container.j2 b/roles/kubernetes/node/templates/kubelet-container.j2
index 22671b2c340c2433c7e0423569db65a5ecc29d4d..dcf86c3275c8cd2f5d332212b0eefcc0ce1e7509 100644
--- a/roles/kubernetes/node/templates/kubelet-container.j2
+++ b/roles/kubernetes/node/templates/kubelet-container.j2
@@ -24,6 +24,15 @@
   -v /var/lib/kubelet:/var/lib/kubelet:shared \
   -v /var/lib/cni:/var/lib/cni:shared \
   -v /var/run:/var/run:rw \
+  {# we can run into issues with double mounting /var/lib/kubelet #}
+  {# surely there's a better way to do this #}
+  {% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
+  -v {{ kubelet_flexvolumes_plugins_dir }}:{{ kubelet_flexvolumes_plugins_dir }}:rw \
+  {% endif -%}
+  {% if local_volume_provisioner_enabled -%}
+  -v {{ local_volume_provisioner_base_dir }}:{{ local_volume_provisioner_base_dir }}:rw \
+  -v {{ local_volume_provisioner_mount_dir }}:{{ local_volume_provisioner_mount_dir }}:rw \
+  {% endif %}
   -v {{kube_config_dir}}:{{kube_config_dir}}:ro \
   -v /etc/os-release:/etc/os-release:ro \
   {{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
diff --git a/roles/kubernetes/node/templates/kubelet.docker.service.j2 b/roles/kubernetes/node/templates/kubelet.docker.service.j2
index bba1a5fc4b5824c042dabd32ec84ccab447423d5..c20cf797fabf3370a7b8c72f22ddafe498ca5a13 100644
--- a/roles/kubernetes/node/templates/kubelet.docker.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.docker.service.j2
@@ -23,9 +23,7 @@ ExecStart={{ bin_dir }}/kubelet \
 Restart=always
 RestartSec=10s
 ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet
-{% if kubelet_flexvolumes_plugins_dir is defined %}
 ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
-{% endif %}
 ExecReload={{ docker_bin_dir }}/docker restart kubelet
 
 
diff --git a/roles/kubernetes/node/templates/kubelet.host.service.j2 b/roles/kubernetes/node/templates/kubelet.host.service.j2
index c7dad4e290c99fa6fd55d301099168c3ce6a0329..3584cfcf51e470b21b86ea11323c84ec759ce1f4 100644
--- a/roles/kubernetes/node/templates/kubelet.host.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.host.service.j2
@@ -7,9 +7,7 @@ Wants=docker.socket
 [Service]
 User=root
 EnvironmentFile=-{{kube_config_dir}}/kubelet.env
-{% if kubelet_flexvolumes_plugins_dir is defined %}
 ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
-{% endif %}
 ExecStart={{ bin_dir }}/kubelet \
 		$KUBE_LOGTOSTDERR \
 		$KUBE_LOG_LEVEL \
diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
index 4286d94708a246f5b917e0d425b6fb2083e060bc..b531025396835e6305a20171735a7f4ddb74f094 100644
--- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
@@ -12,10 +12,7 @@ LimitNOFILE=40000
 
 ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet.uuid
 ExecStartPre=-/bin/mkdir -p /var/lib/kubelet
-
-{% if kubelet_flexvolumes_plugins_dir is defined %}
 ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
-{% endif %}
 
 EnvironmentFile={{kube_config_dir}}/kubelet.env
 # stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts
@@ -41,8 +38,17 @@ ExecStart=/usr/bin/rkt run \
         --volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
         --volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
         --volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \
-{% if kubelet_flexvolumes_plugins_dir is defined %}
+{# we can run into issues with double mounting /var/lib/kubelet #}
+{# surely there's a better way to do this #}
+{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
         --volume flexvolumes,kind=host,source={{ kubelet_flexvolumes_plugins_dir }},readOnly=false \
+{% endif -%}
+{% if local_volume_provisioner_enabled %}
+        --volume local-volume-provisioner-base-dir,kind=host,source={{ local_volume_provisioner_base_dir }},readOnly=false \
+{# Not pretty, but needed to avoid double mount #}
+{% if local_volume_provisioner_base_dir not in local_volume_provisioner_mount_dir and local_volume_provisioner_mount_dir not in local_volume_provisioner_base_dir %}
+        --volume local-volume-provisioner-mount-dir,kind=host,source={{ local_volume_provisioner_mount_dir }},readOnly=false \
+{% endif %}
 {% endif %}
 {% if kubelet_load_modules == true %}
         --mount volume=modprobe,target=/usr/sbin/modprobe \
@@ -65,8 +71,17 @@ ExecStart=/usr/bin/rkt run \
         --mount volume=var-lib-kubelet,target=/var/lib/kubelet \
         --mount volume=var-log,target=/var/log \
         --mount volume=hosts,target=/etc/hosts \
-{% if kubelet_flexvolumes_plugins_dir is defined %}
+{# we can run into issues with double mounting /var/lib/kubelet #}
+{# surely there's a better way to do this #}
+{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
         --mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \
+{% endif -%}
+{% if local_volume_provisioner_enabled %}
+        --mount volume=local-volume-provisioner-base-dir,target={{ local_volume_provisioner_base_dir }} \
+{# Not pretty, but needed to avoid double mount #}
+{% if local_volume_provisioner_base_dir not in local_volume_provisioner_mount_dir and local_volume_provisioner_mount_dir not in local_volume_provisioner_base_dir %}
+        --mount volume=local-volume-provisioner-mount-dir,target={{ local_volume_provisioner_mount_dir }} \
+{% endif %}
 {% endif %}
         --stage1-from-dir=stage1-fly.aci \
 {% if kube_hyperkube_image_repo == "docker" %}
diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2
index 5fef2476e0e33525b403574cc67716d98ad89ec7..19100c1a7a04e920b9de5d6877f4af412edea27b 100644
--- a/roles/kubernetes/node/templates/kubelet.standard.env.j2
+++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2
@@ -83,21 +83,21 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 {# Kubelet node labels #}
 {% set role_node_labels = [] %}
 {% if inventory_hostname in groups['kube-master'] %}
-{%   do role_node_labels.append('node-role.kubernetes.io/master=true') %}
+{%   set dummy = role_node_labels.append('node-role.kubernetes.io/master=true') %}
 {%   if not standalone_kubelet|bool %}
-{%     do role_node_labels.append('node-role.kubernetes.io/node=true') %}
+{%     set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
 {%   endif %}
 {% else %}
-{%   do role_node_labels.append('node-role.kubernetes.io/node=true') %}
+{%   set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
 {% endif %}
 {% if inventory_hostname in groups['kube-ingress']|default([]) %}
-{%   do role_node_labels.append('node-role.kubernetes.io/ingress=true') %}
+{%   set dummy = role_node_labels.append('node-role.kubernetes.io/ingress=true') %}
 {% endif %}
 {% set inventory_node_labels = [] %}
 {% if node_labels is defined %}
-{% for labelname, labelvalue in node_labels.iteritems() %}
-{% do inventory_node_labels.append(labelname + '=' + labelvalue) %}
-{% endfor %}
+{%   for labelname, labelvalue in node_labels.iteritems() %}
+{%     set dummy = inventory_node_labels.append('%s=%s'|format(labelname, labelvalue)) %}
+{%   endfor %}
 {% endif %}
 {% set all_node_labels = role_node_labels + inventory_node_labels %}
 
@@ -110,9 +110,7 @@ DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"
 KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
 {% endif %}
 
-{% if kubelet_flexvolumes_plugins_dir is defined %}
 KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
-{% endif %}
 
 # Should this cluster be allowed to run privileged docker containers
 KUBE_ALLOW_PRIV="--allow-privileged=true"
diff --git a/roles/kubernetes/preinstall/templates/openstack-cloud-config.j2 b/roles/kubernetes/node/templates/openstack-cloud-config.j2
similarity index 100%
rename from roles/kubernetes/preinstall/templates/openstack-cloud-config.j2
rename to roles/kubernetes/node/templates/openstack-cloud-config.j2
diff --git a/roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2 b/roles/kubernetes/node/templates/vsphere-cloud-config.j2
similarity index 100%
rename from roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2
rename to roles/kubernetes/node/templates/vsphere-cloud-config.j2
diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml
index 295f101789d728fc1ae3ee96963acc13ee7838e4..4e4b892b17b1f9f702ac5e6eda76e535989d131a 100644
--- a/roles/kubernetes/preinstall/defaults/main.yml
+++ b/roles/kubernetes/preinstall/defaults/main.yml
@@ -8,7 +8,7 @@ epel_enabled: false
 
 common_required_pkgs:
   - python-httplib2
-  - openssl
+  - "{{ (ansible_distribution == 'openSUSE Tumbleweed') | ternary('openssl-1_1_0', 'openssl') }}"
   - curl
   - rsync
   - bash-completion
@@ -23,35 +23,6 @@ disable_ipv6_dns: false
 kube_cert_group: kube-cert
 kube_config_dir: /etc/kubernetes
 
-# For the openstack integration kubelet will need credentials to access
-# openstack apis like nova and cinder. Per default this values will be
-# read from the environment.
-openstack_auth_url: "{{ lookup('env','OS_AUTH_URL')  }}"
-openstack_username: "{{ lookup('env','OS_USERNAME')  }}"
-openstack_password: "{{ lookup('env','OS_PASSWORD')  }}"
-openstack_region: "{{ lookup('env','OS_REGION_NAME')  }}"
-openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true)  }}"
-openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
-openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
-
-# For the vsphere integration, kubelet will need credentials to access
-# vsphere apis
-# Documentation regarding these values can be found
-# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
-vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
-vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
-vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
-vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
-vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
-vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
-vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
-vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
-vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
-
-vsphere_scsi_controller_type: pvscsi
-# vsphere_public_network is name of the network the VMs are joined to
-vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
-
 # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
 # for hostnet pods and infra needs
 resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
@@ -60,3 +31,5 @@ resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
 populate_inventory_to_hosts_file: true
 
 preinstall_selinux_state: permissive
+
+sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
diff --git a/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml b/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml
index 8c0a5f5991a07a1883100b4278dcd1350748bb97..0ab2c9b07dcd6f74156482664de1ff3d78a07a95 100644
--- a/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml
+++ b/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml
@@ -15,7 +15,7 @@
   notify: Preinstall | restart network
   when: dhclientconffile is defined
 
-- name: Configue dhclient hooks for resolv.conf (non-RH)
+- name: Configure dhclient hooks for resolv.conf (non-RH)
   template:
     src: dhclient_dnsupdate.sh.j2
     dest: "{{ dhclienthookfile }}"
@@ -24,7 +24,7 @@
   notify: Preinstall | restart network
   when: ansible_os_family != "RedHat"
 
-- name: Configue dhclient hooks for resolv.conf (RH-only)
+- name: Configure dhclient hooks for resolv.conf (RH-only)
   template:
     src: dhclient_dnsupdate_rh.sh.j2
     dest: "{{ dhclienthookfile }}"
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index aca0c960653dcc50186c8b85166e54d95a344cb3..0a994e8e2295e059b2f59f2b2269fbb24bf565d2 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -3,6 +3,12 @@
   tags:
     - asserts
 
+# This is run before bin_dir is pinned because these tasks are run on localhost
+- import_tasks: pre_upgrade.yml
+  run_once: true
+  tags:
+    - upgrade
+
 - name: Force binaries directory for Container Linux by CoreOS
   set_fact:
     bin_dir: "/opt/bin"
@@ -71,14 +77,6 @@
     - cloud-provider
     - facts
 
-- include_tasks: "{{ cloud_provider }}-credential-check.yml"
-  when:
-    - cloud_provider is defined
-    - cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
-  tags:
-    - cloud-provider
-    - facts
-
 - name: Create cni directories
   file:
     path: "{{ item }}"
@@ -99,6 +97,20 @@
     - contiv
     - bootstrap-os
 
+- name: Create local volume provisioner directories
+  file:
+    path: "{{ item }}"
+    state: directory
+    owner: kube
+  with_items:
+    - "{{ local_volume_provisioner_base_dir }}"
+    - "{{ local_volume_provisioner_mount_dir }}"
+  when:
+    - inventory_hostname in groups['k8s-cluster']
+    - local_volume_provisioner_enabled
+  tags:
+    - persistent_volumes
+
 - import_tasks: resolvconf.yml
   when:
     - dns_mode != 'none'
@@ -146,6 +158,15 @@
     - not is_atomic
   tags: bootstrap-os
 
+- name: Update package management cache (zypper) - SUSE
+  shell: zypper -n --gpg-auto-import-keys ref
+  register: make_cache_output
+  until: make_cache_output|succeeded
+  retries: 4
+  delay: "{{ retry_stagger | random + 3 }}"
+  when:
+    - ansible_pkg_mgr == 'zypper'
+  tags: bootstrap-os
 
 - name: Update package management cache (APT)
   apt:
@@ -224,12 +245,6 @@
   tags:
     - bootstrap-os
 
-- name: set default sysctl file path
-  set_fact:
-    sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
-  tags:
-    - bootstrap-os
-
 - name: Stat sysctl file configuration
   stat:
     path: "{{sysctl_file_path}}"
diff --git a/roles/kubernetes/preinstall/tasks/pre_upgrade.yml b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml
new file mode 100644
index 0000000000000000000000000000000000000000..63cbc9be1112745236f412a8fa14f88f3a5d6b4b
--- /dev/null
+++ b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml
@@ -0,0 +1,28 @@
+---
+- name: "Pre-upgrade | check if old credential dir exists"
+  local_action:
+    module: stat
+    path: "{{ inventory_dir }}/../credentials"
+  vars:
+    ansible_python_interpreter: "/usr/bin/env python"
+  register: old_credential_dir
+  become: no
+
+- name: "Pre-upgrade | check if new credential dir exists"
+  local_action:
+    module: stat
+    path: "{{ inventory_dir }}/credentials"
+  vars:
+    ansible_python_interpreter: "/usr/bin/env python"
+  register: new_credential_dir
+  become: no
+  when: old_credential_dir.stat.exists
+
+- name: "Pre-upgrade | move data from old credential dir to new"
+  local_action: command mv {{ inventory_dir }}/../credentials {{ inventory_dir }}/credentials
+  args:
+    creates: "{{ inventory_dir }}/credentials"
+  vars:
+    ansible_python_interpreter: "/usr/bin/env python"
+  become: no
+  when: old_credential_dir.stat.exists and not new_credential_dir.stat.exists
diff --git a/roles/kubernetes/preinstall/tasks/verify-settings.yml b/roles/kubernetes/preinstall/tasks/verify-settings.yml
index 8f0a2e854735fac7b8220e552e1f6532a74c4fc7..5f647101d8b70a15379c6b6008c5fe6615df14a3 100644
--- a/roles/kubernetes/preinstall/tasks/verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/verify-settings.yml
@@ -12,7 +12,7 @@
 
 - name: Stop if unknown OS
   assert:
-    that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS']
+    that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS', 'openSUSE Leap', 'openSUSE Tumbleweed']
   ignore_errors: "{{ ignore_assert_errors }}"
 
 - name: Stop if unknown network plugin
@@ -94,4 +94,4 @@
   assert:
     that: ansible_kernel.split('-')[0]|version_compare('4.8', '>=')
   when: kube_network_plugin == 'cilium'
-  ignore_errors: "{{ ignore_assert_errors }}"
\ No newline at end of file
+  ignore_errors: "{{ ignore_assert_errors }}"
diff --git a/roles/kubernetes/preinstall/vars/suse.yml b/roles/kubernetes/preinstall/vars/suse.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3f4f9aee9a1c5c55c3c372724785ea757844452d
--- /dev/null
+++ b/roles/kubernetes/preinstall/vars/suse.yml
@@ -0,0 +1,4 @@
+---
+required_pkgs:
+  - device-mapper
+  - ebtables
diff --git a/roles/kubernetes/secrets/defaults/main.yml b/roles/kubernetes/secrets/defaults/main.yml
index f0d10711d306a7aa6cfbcc2a7a96245e0ef22803..cda85eeb27a7a3eb35b01c5aa2a8280261bf2892 100644
--- a/roles/kubernetes/secrets/defaults/main.yml
+++ b/roles/kubernetes/secrets/defaults/main.yml
@@ -1,3 +1,4 @@
 ---
 kube_cert_group: kube-cert
 kube_vault_mount_path: kube
+front_proxy_vault_mount_path: front-proxy
diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh
index 1c34fc69dba0b67e46c1686acb6a3e9a52653e99..2a4b930ea2b1f2b9b0e9ced51991b62ea1c3c816 100755
--- a/roles/kubernetes/secrets/files/make-ssl.sh
+++ b/roles/kubernetes/secrets/files/make-ssl.sh
@@ -72,6 +72,15 @@ else
     openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
 fi
 
+# Front proxy client CA
+if [ -e "$SSLDIR/front-proxy-ca-key.pem" ]; then
+    # Reuse existing front proxy CA
+    cp $SSLDIR/{front-proxy-ca.pem,front-proxy-ca-key.pem} .
+else
+    openssl genrsa -out front-proxy-ca-key.pem 2048 > /dev/null 2>&1
+    openssl req -x509 -new -nodes -key front-proxy-ca-key.pem -days 36500 -out front-proxy-ca.pem -subj "/CN=front-proxy-ca" > /dev/null 2>&1
+fi
+
 gen_key_and_cert() {
     local name=$1
     local subject=$2
@@ -80,6 +89,14 @@ gen_key_and_cert() {
     openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
 }
 
+gen_key_and_cert_front_proxy() {
+    local name=$1
+    local subject=$2
+    openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1
+    openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1
+    openssl x509 -req -in ${name}.csr -CA front-proxy-ca.pem -CAkey front-proxy-ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
+}
+
 # Admins
 if [ -n "$MASTERS" ]; then
 
@@ -105,7 +122,7 @@ if [ -n "$MASTERS" ]; then
     # kube-controller-manager
     gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager"
     # metrics aggregator
-    gen_key_and_cert "front-proxy-client" "/CN=front-proxy-client"
+    gen_key_and_cert_front_proxy "front-proxy-client" "/CN=front-proxy-client"
 
     for host in $MASTERS; do
         cn="${host%%.*}"
diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml
index 4780b14d63e5f37540b5192453477b6430969395..110ffa8981c842a4643e51172b1ca2178a344a7a 100644
--- a/roles/kubernetes/secrets/tasks/check-certs.yml
+++ b/roles/kubernetes/secrets/tasks/check-certs.yml
@@ -48,8 +48,11 @@
        '{{ kube_cert_dir }}/kube-scheduler-key.pem',
        '{{ kube_cert_dir }}/kube-controller-manager.pem',
        '{{ kube_cert_dir }}/kube-controller-manager-key.pem',
+       '{{ kube_cert_dir }}/front-proxy-ca.pem',
+       '{{ kube_cert_dir }}/front-proxy-ca-key.pem',
        '{{ kube_cert_dir }}/front-proxy-client.pem',
        '{{ kube_cert_dir }}/front-proxy-client-key.pem',
+       '{{ kube_cert_dir }}/service-account-key.pem',
        {% for host in groups['kube-master'] %}
        '{{ kube_cert_dir }}/admin-{{ host }}.pem'
        '{{ kube_cert_dir }}/admin-{{ host }}-key.pem'
@@ -71,7 +74,9 @@
       {% for cert in ['apiserver.pem', 'apiserver-key.pem',
                       'kube-scheduler.pem','kube-scheduler-key.pem',
                       'kube-controller-manager.pem','kube-controller-manager-key.pem',
-                      'front-proxy-client.pem','front-proxy-client-key.pem'] -%}
+                      'front-proxy-ca.pem','front-proxy-ca-key.pem',
+                      'front-proxy-client.pem','front-proxy-client-key.pem',
+                      'service-account-key.pem'] -%}
         {% set cert_file = "%s/%s.pem"|format(kube_cert_dir, cert) %}
         {% if not cert_file in existing_certs -%}
         {%- set gen = True -%}
diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
index c39f606ad43f85ea29d7c4c0d7d016891412d378..72ff6b46917d0c2505395b9b1f1640c8050ae0ed 100644
--- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml
+++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
@@ -73,6 +73,8 @@
                        'kube-scheduler-key.pem',
                        'kube-controller-manager.pem',
                        'kube-controller-manager-key.pem',
+                       'front-proxy-ca.pem',
+                       'front-proxy-ca-key.pem',
                        'front-proxy-client.pem',
                        'front-proxy-client-key.pem',
                        'service-account-key.pem',
@@ -85,6 +87,8 @@
                       'admin-{{ inventory_hostname }}-key.pem',
                       'apiserver.pem',
                       'apiserver-key.pem',
+                      'front-proxy-ca.pem',
+                      'front-proxy-ca-key.pem',
                       'front-proxy-client.pem',
                       'front-proxy-client-key.pem',
                       'service-account-key.pem',
diff --git a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml
index cc16b749bed5ff3ea060c48ac0faa86e4d81f10f..8c9d12384cc3c913be104bc7bbb3b2e040bdec96 100644
--- a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml
+++ b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml
@@ -52,6 +52,11 @@
         "{{ hostvars[host]['ip'] }}",
         {%- endif -%}
         {%- endfor -%}
+        {%- if supplementary_addresses_in_ssl_keys is defined -%}
+        {%- for ip_item in supplementary_addresses_in_ssl_keys -%}
+        "{{ ip_item }}",
+        {%- endfor -%}
+        {%- endif -%}
         "127.0.0.1","::1","{{ kube_apiserver_ip }}"
         ]
     issue_cert_path: "{{ item }}"
@@ -98,6 +103,8 @@
 - include_tasks: ../../../vault/tasks/shared/issue_cert.yml
   vars:
     issue_cert_common_name: "front-proxy-client"
+    issue_cert_copy_ca: "{{ item == kube_front_proxy_clients_certs_needed|first }}"
+    issue_cert_ca_filename: front-proxy-ca.pem
     issue_cert_alt_names: "{{ kube_cert_alt_names }}"
     issue_cert_file_group: "{{ kube_cert_group }}"
     issue_cert_file_owner: kube
@@ -110,12 +117,17 @@
         "{{ hostvars[host]['ip'] }}",
         {%- endif -%}
         {%- endfor -%}
+        {%- if supplementary_addresses_in_ssl_keys is defined -%}
+        {%- for ip_item in supplementary_addresses_in_ssl_keys -%}
+        "{{ ip_item }}",
+        {%- endfor -%}
+        {%- endif -%}
         "127.0.0.1","::1","{{ kube_apiserver_ip }}"
         ]
     issue_cert_path: "{{ item }}"
     issue_cert_role: front-proxy-client
     issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
-    issue_cert_mount_path: "{{ kube_vault_mount_path }}"
+    issue_cert_mount_path: "{{ front_proxy_vault_mount_path }}"
   with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}"
   when: inventory_hostname in groups['kube-master']
   notify: set secret_changed
diff --git a/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml b/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml
index d747044484b9d89ed9829031003a8aa41bcc612c..50e1a01e784dde373beff14e9be5277690c12bbb 100644
--- a/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml
+++ b/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml
@@ -32,7 +32,7 @@
     sync_file_hosts: "{{ groups['kube-master'] }}"
     sync_file_is_cert: true
     sync_file_owner: kube
-  with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem"]
+  with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"]
 
 - name: sync_kube_master_certs | Set facts for kube master components sync_file results
   set_fact:
@@ -44,6 +44,18 @@
   set_fact:
     sync_file_results: []
 
+- include_tasks: ../../../vault/tasks/shared/sync_file.yml
+  vars:
+    sync_file: front-proxy-ca.pem
+    sync_file_dir: "{{ kube_cert_dir }}"
+    sync_file_group: "{{ kube_cert_group }}"
+    sync_file_hosts: "{{ groups['kube-master'] }}"
+    sync_file_owner: kube
+
+- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem
+  set_fact:
+    sync_file_results: []
+
 - include_tasks: ../../../vault/tasks/shared/sync_file.yml
   vars:
     sync_file: "{{ item }}"
diff --git a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml
index eec44987f5109ce6d7de88deb7f0748fe4298ffa..cdd5f48fa0785ad4c3142b85ee48e6a110bb6ca7 100644
--- a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml
+++ b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml
@@ -8,6 +8,8 @@
       /etc/pki/ca-trust/source/anchors/kube-ca.crt
       {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
       /etc/ssl/certs/kube-ca.pem
+      {%- elif ansible_os_family == "Suse" -%}
+      /etc/pki/trust/anchors/kube-ca.pem
       {%- endif %}
   tags:
     - facts
@@ -19,9 +21,9 @@
     remote_src: true
   register: kube_ca_cert
 
-- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS)
+- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
   command: update-ca-certificates
-  when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"]
+  when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"]
 
 - name: Gen_certs | update ca-certificates (RedHat)
   command: update-ca-trust extract
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index d6217d654e6011d458fc5b7c29547397672a461a..f297c007e520331b8996ff4211cd95dd3d287034 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -5,7 +5,7 @@ bootstrap_os: none
 
 # Use proxycommand if bastion host is in group all
 # This change obseletes editing ansible.cfg file depending on bastion existance
-ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ ansible_user }}@{{hostvars['bastion']['ansible_host']}} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
+ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
 
 kube_api_anonymous_auth: false
 
@@ -129,6 +129,10 @@ kube_apiserver_insecure_port: 8080
 # Aggregator
 kube_api_aggregator_routing: false
 
+# Docker options
+# Optionally do not run docker role
+manage_docker: true
+
 # Path used to store Docker data
 docker_daemon_graph: "/var/lib/docker"
 
@@ -219,6 +223,10 @@ vault_config_dir: "{{ vault_base_dir }}/config"
 vault_roles_dir: "{{ vault_base_dir }}/roles"
 vault_secrets_dir: "{{ vault_base_dir }}/secrets"
 
+# Local volume provisioner dirs
+local_volume_provisioner_base_dir: /mnt/disks
+local_volume_provisioner_mount_dir: /mnt/disks
+
 ## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
 ## See https://github.com/kubernetes-incubator/kubespray/issues/2141
 ## Set this variable to true to get rid of this issue
diff --git a/roles/network_plugin/weave/defaults/main.yml b/roles/network_plugin/weave/defaults/main.yml
index eecb06171be55c0ad95791c3ab0d72f52112aba1..ab955ebef60913f9d00449f03d03fe6a83c72ac9 100644
--- a/roles/network_plugin/weave/defaults/main.yml
+++ b/roles/network_plugin/weave/defaults/main.yml
@@ -1,7 +1,7 @@
 ---
 # Limits
 weave_memory_limits: 400M
-weave_cpu_limits: 30m
+weave_cpu_limits: 300m
 weave_memory_requests: 64M
 weave_cpu_requests: 10m
 
diff --git a/roles/rkt/tasks/install.yml b/roles/rkt/tasks/install.yml
index 599f9e50e2b1d5e3c3cd22c7b73b29560a2c924b..f881a81fe40c03e34a911c5b22e9cf9c5db419fa 100644
--- a/roles/rkt/tasks/install.yml
+++ b/roles/rkt/tasks/install.yml
@@ -34,3 +34,13 @@
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   when: ansible_os_family == "RedHat"
+
+- name: install rkt pkg on openSUSE
+  zypper:
+    name: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
+    state: present
+  register: rkt_task_result
+  until: rkt_task_result|succeeded
+  retries: 4
+  delay: "{{ retry_stagger | random + 3 }}"
+  when: ansible_os_family == "Suse"
diff --git a/roles/rkt/vars/suse.yml b/roles/rkt/vars/suse.yml
new file mode 100644
index 0000000000000000000000000000000000000000..13149e8fbfeac5d9e4f793588d0fcb0f56b7d72a
--- /dev/null
+++ b/roles/rkt/vars/suse.yml
@@ -0,0 +1,2 @@
+---
+rkt_pkg_name: "rkt-{{ rkt_pkg_version }}.x86_64.rpm"
diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml
index 8e5ad08a08371de29bd6c7b07d517e86dc628ce5..f19c734383614379c379e53ef85727c1c749c39e 100644
--- a/roles/vault/defaults/main.yml
+++ b/roles/vault/defaults/main.yml
@@ -97,6 +97,11 @@ vault_ca_options:
     format: pem
     ttl: "{{ vault_max_lease_ttl }}"
     exclude_cn_from_sans: true
+  front_proxy:
+    common_name: front-proxy
+    format: pem
+    ttl: "{{ vault_max_lease_ttl }}"
+    exclude_cn_from_sans: true
 
 vault_client_headers:
   Accept: "application/json"
@@ -164,11 +169,18 @@ vault_pki_mounts:
           allow_any_name: true
           enforce_hostnames: false
           organization: "system:node-proxier"
+  front_proxy:
+    name: front-proxy
+    default_lease_ttl: "{{ vault_default_lease_ttl }}"
+    max_lease_ttl: "{{ vault_max_lease_ttl }}"
+    description: "Kubernetes Front Proxy CA"
+    cert_dir: "{{ vault_kube_cert_dir }}"
+    roles:
       - name: front-proxy-client
         group: k8s-cluster
-        password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy.creds length=15') }}"
+        password: "{{ lookup('password', inventory_dir + '/credentials/vault/front-proxy-client.creds length=15') }}"
         policy_rules: default
         role_options:
           allow_any_name: true
           enforce_hostnames: false
-          organization: "system:front-proxy"
+          organization: "system:front-proxy"
\ No newline at end of file
diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml
index fdecbdd2afcfcc9b3e41f0f387136312897c101b..7ca82a9c40d76eea28bc5db0387e8e196ce573cd 100644
--- a/roles/vault/tasks/bootstrap/main.yml
+++ b/roles/vault/tasks/bootstrap/main.yml
@@ -57,6 +57,7 @@
     gen_ca_mount_path: "{{ vault_pki_mounts.etcd.name }}"
     gen_ca_vault_headers: "{{ vault_headers }}"
     gen_ca_vault_options: "{{ vault_ca_options.etcd }}"
+    gen_ca_copy_group: "etcd"
   when: inventory_hostname in groups.etcd and vault_etcd_ca_cert_needed
 
 - import_tasks: gen_vault_certs.yml
diff --git a/roles/vault/tasks/cluster/create_mounts.yml b/roles/vault/tasks/cluster/create_mounts.yml
index c6e075698db24908da2f091d25ddd02ce04af0fa..087430942cf4389c972ad84e925c3bfddd9fb283 100644
--- a/roles/vault/tasks/cluster/create_mounts.yml
+++ b/roles/vault/tasks/cluster/create_mounts.yml
@@ -6,8 +6,9 @@
     create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}"
     create_mount_description: "{{ item.description }}"
     create_mount_cert_dir: "{{ item.cert_dir }}"
-    create_mount_config_ca_needed: item.name != vault_pki_mounts.kube.name
+    create_mount_config_ca_needed: item.name != vault_pki_mounts.kube.name and item.name != vault_pki_mounts.front_proxy.name
   with_items:
     - "{{ vault_pki_mounts.vault }}"
     - "{{ vault_pki_mounts.etcd }}"
     - "{{ vault_pki_mounts.kube }}"
+    - "{{ vault_pki_mounts.front_proxy }}"
diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml
index d904c2398b42dabe5a0eb3b854e96f473a26c30d..7f535d0682593286869c7939ec18e728a6437a7d 100644
--- a/roles/vault/tasks/cluster/main.yml
+++ b/roles/vault/tasks/cluster/main.yml
@@ -32,6 +32,15 @@
     gen_ca_mount_path: "{{ vault_pki_mounts.kube.name }}"
     gen_ca_vault_headers: "{{ vault_headers }}"
     gen_ca_vault_options: "{{ vault_ca_options.kube }}"
+    gen_ca_copy_group: "kube-master"
+  when: inventory_hostname in groups.vault
+
+- include_tasks: ../shared/gen_ca.yml
+  vars:
+    gen_ca_cert_dir: "{{ vault_pki_mounts.front_proxy.cert_dir }}"
+    gen_ca_mount_path: "{{ vault_pki_mounts.front_proxy.name }}"
+    gen_ca_vault_headers: "{{ vault_headers }}"
+    gen_ca_vault_options: "{{ vault_ca_options.front_proxy }}"
   when: inventory_hostname in groups.vault
 
 - include_tasks: ../shared/auth_backend.yml
@@ -46,6 +55,7 @@
     - "{{ vault_pki_mounts.vault }}"
     - "{{ vault_pki_mounts.etcd }}"
     - "{{ vault_pki_mounts.kube }}"
+    - "{{ vault_pki_mounts.front_proxy }}"
   loop_control:
     loop_var: mount
   when: inventory_hostname in groups.vault
diff --git a/roles/vault/tasks/shared/gen_ca.yml b/roles/vault/tasks/shared/gen_ca.yml
index 654cc3ff3b3c4876ce5ce6431c1bee14cba40612..77f2f82b9839cfd41db9184a720cfa17df138599 100644
--- a/roles/vault/tasks/shared/gen_ca.yml
+++ b/roles/vault/tasks/shared/gen_ca.yml
@@ -24,9 +24,12 @@
     mode: 0644
   when: vault_ca_gen.status == 200
 
-- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key locally"
+
+- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key to necessary hosts"
   copy:
     content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['private_key'] }}"
     dest: "{{ gen_ca_cert_dir }}/ca-key.pem"
     mode: 0640
   when: vault_ca_gen.status == 200
+  delegate_to: "{{ item }}"
+  with_items: "{{ (groups[gen_ca_copy_group|default('vault')]) | union(groups['vault']) }}"
diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml
index 1ba90ea77b5d7af39284d8c8485ec30d548d82ef..36a42efaaa76c1eb6f45efae25069ac85f4c8be3 100644
--- a/roles/vault/tasks/shared/issue_cert.yml
+++ b/roles/vault/tasks/shared/issue_cert.yml
@@ -6,6 +6,7 @@
 #   issue_cert_alt_name:    Requested Subject Alternative Names, in a list.
 #   issue_cert_common_name: Common Name included in the cert
 #   issue_cert_copy_ca:     Copy issuing CA cert needed
+#   issue_cert_ca_filename: Filename for copied issuing CA cert (default ca.pem)
 #   issue_cert_dir_mode:    Mode of the placed cert directory
 #   issue_cert_file_group:  Group of the placed cert file and directory
 #   issue_cert_file_mode:   Mode of the placed cert file
@@ -100,7 +101,7 @@
 - name: issue_cert | Copy issuing CA cert
   copy:
     content: "{{ issue_cert_result['json']['data']['issuing_ca'] }}\n"
-    dest: "{{ issue_cert_path | dirname }}/ca.pem"
+    dest: "{{ issue_cert_path | dirname }}/{{ issue_cert_ca_filename | default('ca.pem') }}"
     group: "{{ issue_cert_file_group | d('root' )}}"
     mode: "{{ issue_cert_file_mode | d('0644') }}"
     owner: "{{ issue_cert_file_owner | d('root') }}"
diff --git a/scale.yml b/scale.yml
index bcf6c69b00e2a3c8ea71fa3ab8d3988e1b1b079f..3f8613011f5576300ec77ef89e543333342e970f 100644
--- a/scale.yml
+++ b/scale.yml
@@ -28,7 +28,7 @@
   roles:
     - { role: kubespray-defaults}
     - { role: kubernetes/preinstall, tags: preinstall }
-    - { role: docker, tags: docker }
+    - { role: docker, tags: docker, when: manage_docker|default(true) }
     - role: rkt
       tags: rkt
       when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
diff --git a/setup.cfg b/setup.cfg
index 2327160ad35455e4d9362deab56f0c3b1405ce33..ada55fcd94ea86e8884d794531b4348fbcf3a846 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -26,6 +26,7 @@ data_files =
         upgrade-cluster.yml
         scale.yml
         reset.yml
+        remove-node.yml
         extra_playbooks/upgrade-only-k8s.yml
     /usr/share/kubespray/roles = roles/*
     /usr/share/doc/kubespray/ =
diff --git a/tests/Makefile b/tests/Makefile
index 8d17e243c994934b382b34ee5b03317cdd247dc0..30442fb25f7d8d1c63671a6a40560830a23d5e3b 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -1,4 +1,4 @@
-INVENTORY=$(PWD)/../inventory/sample/hosts.ini
+INVENTORY=$(PWD)/../inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
 
 $(HOME)/.ssh/id_rsa:
 	mkdir -p $(HOME)/.ssh
diff --git a/tests/files/gce_centos-weave-kubeadm.yml b/tests/files/gce_centos-weave-kubeadm.yml
index a1c88e97661642e9328c9a2a722b2a77a67465b2..a410be3f2571d03240f318b78dfdfb02e108ed09 100644
--- a/tests/files/gce_centos-weave-kubeadm.yml
+++ b/tests/files/gce_centos-weave-kubeadm.yml
@@ -7,8 +7,6 @@ startup_script: ""
 
 # Deployment settings
 kube_network_plugin: weave
-weave_cpu_limits: "100m"
-weave_cpu_requests: "100m"
 kubeadm_enabled: true
 deploy_netchecker: true
 kubedns_min_replicas: 1
diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml
index c120920116551570ca024e345b3aa010ae540f6e..9e2e1083fe0339ed784d5477b3889f8368995ba7 100644
--- a/tests/files/gce_centos7-flannel-addons.yml
+++ b/tests/files/gce_centos7-flannel-addons.yml
@@ -16,7 +16,5 @@ deploy_netchecker: true
 kubedns_min_replicas: 1
 cloud_provider: gce
 kube_encrypt_secret_data: true
-prometheus_operator_enabled: true
-k8s_metrics_enabled: true
 ingress_nginx_enabled: true
 cert_manager_enabled: true
diff --git a/tests/files/gce_coreos-alpha-weave-ha.yml b/tests/files/gce_coreos-alpha-weave-ha.yml
index 1666e0927faa4a29ae070a5869a991ee4bee13ce..883a67e2ac0db3279fb33dcaa6dd99496c8ff8f0 100644
--- a/tests/files/gce_coreos-alpha-weave-ha.yml
+++ b/tests/files/gce_coreos-alpha-weave-ha.yml
@@ -7,8 +7,6 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
 
 # Deployment settings
 kube_network_plugin: weave
-weave_cpu_limits: "100m"
-weave_cpu_requests: "100m"
 bootstrap_os: coreos
 resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
 deploy_netchecker: true
diff --git a/tests/files/gce_opensuse-canal.yml b/tests/files/gce_opensuse-canal.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9eae57e2e463a028a692875ca0531ece7d6e6275
--- /dev/null
+++ b/tests/files/gce_opensuse-canal.yml
@@ -0,0 +1,12 @@
+# Instance settings
+cloud_image_family: opensuse-leap
+cloud_region: us-central1-c
+mode: default
+
+# Deployment settings
+bootstrap_os: opensuse
+kube_network_plugin: canal
+kubeadm_enabled: true
+deploy_netchecker: true
+kubedns_min_replicas: 1
+cloud_provider: gce
diff --git a/tests/files/gce_rhel7-weave.yml b/tests/files/gce_rhel7-weave.yml
index e6928b7a2508bd813a7f7331102113cb33e34c4e..bfff490daae32e04d99a054cfe1144c3bdbb7d98 100644
--- a/tests/files/gce_rhel7-weave.yml
+++ b/tests/files/gce_rhel7-weave.yml
@@ -5,8 +5,6 @@ mode: default
 
 # Deployment settings
 kube_network_plugin: weave
-weave_cpu_limits: "100m"
-weave_cpu_requests: "100m"
 deploy_netchecker: true
 kubedns_min_replicas: 1
 cloud_provider: gce
diff --git a/tests/files/gce_ubuntu-weave-sep.yml b/tests/files/gce_ubuntu-weave-sep.yml
index 6e701cb233334b78d511f852fa2223469653029d..4598672d10c3a0afeb2ada83d6f1611c2f7035b7 100644
--- a/tests/files/gce_ubuntu-weave-sep.yml
+++ b/tests/files/gce_ubuntu-weave-sep.yml
@@ -6,8 +6,6 @@ mode: separate
 # Deployment settings
 bootstrap_os: ubuntu
 kube_network_plugin: weave
-weave_cpu_limits: "100m"
-weave_cpu_requests: "100m"
 deploy_netchecker: true
 kubedns_min_replicas: 1
 cloud_provider: gce
diff --git a/tests/support/aws.groovy b/tests/support/aws.groovy
index a5ce89b8f0973440cefb4059b5f18ad509ffc4c7..bc13b513a9869b91690edec56860ed8c119ac494 100644
--- a/tests/support/aws.groovy
+++ b/tests/support/aws.groovy
@@ -1,9 +1,9 @@
 def run(username, credentialsId, ami, network_plugin, aws_access, aws_secret) {
-      def inventory_path = pwd() + "/inventory/sample/hosts.ini"
+      def inventory_path = pwd() + "/inventory/sample/${env.CI_JOB_NAME}-${env.BUILD_NUMBER}.ini"
       dir('tests') {
           wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) {
               try {
-                  create_vm("${env.JOB_NAME}-${env.BUILD_NUMBER}", inventory_path, ami, username, network_plugin, aws_access, aws_secret)
+                  create_vm("${env.CI_JOB_NAME}-${env.BUILD_NUMBER}", inventory_path, ami, username, network_plugin, aws_access, aws_secret)
                   install_cluster(inventory_path, credentialsId, network_plugin)
 
                   test_apiserver(inventory_path, credentialsId)
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index 2fa78545f146e0504fd245973892885d970666b6..531b84c06434786a07e73346f71e9a05d06fe023 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -14,7 +14,7 @@
 
 
   - name: Wait for pods to be ready
-    shell: "{{bin_dir}}/kubectl get pods"
+    shell: "{{bin_dir}}/kubectl get pods -n test"
     register: pods
     until:
       - '"ContainerCreating" not in pods.stdout'
@@ -25,18 +25,18 @@
     no_log: true
 
   - name: Get pod names
-    shell: "{{bin_dir}}/kubectl get pods -o json"
+    shell: "{{bin_dir}}/kubectl get pods -n test -o json"
     register: pods
     no_log: true
 
   - name: Get hostnet pods
-    command: "{{bin_dir}}/kubectl get pods -o
+    command: "{{bin_dir}}/kubectl get pods -n test -o
              jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
     register: hostnet_pods
     no_log: true
 
   - name: Get running pods
-    command: "{{bin_dir}}/kubectl get pods -o
+    command: "{{bin_dir}}/kubectl get pods -n test -o
              jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
     register: running_pods
     no_log: true
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index 7acec3083385e82b96c6acbb4472cc3cf9d217ce..9e858acd377b5284c2980910345116952f27666d 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -34,7 +34,7 @@
   roles:
     - { role: kubespray-defaults}
     - { role: kubernetes/preinstall, tags: preinstall }
-    - { role: docker, tags: docker }
+    - { role: docker, tags: docker, when: manage_docker|default(true) }
     - role: rkt
       tags: rkt
       when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"