diff --git a/contrib/terraform/group_vars b/contrib/terraform/group_vars
new file mode 120000
index 0000000000000000000000000000000000000000..febd29cb3f61a7da6d6fd860b15fd179b8933ce6
--- /dev/null
+++ b/contrib/terraform/group_vars
@@ -0,0 +1 @@
+../../inventory/group_vars
\ No newline at end of file
diff --git a/contrib/terraform/openstack/group_vars/all.yml b/contrib/terraform/openstack/group_vars/all.yml
deleted file mode 120000
index eb4e855dc37d42171a5243682a65f73f9d30917f..0000000000000000000000000000000000000000
--- a/contrib/terraform/openstack/group_vars/all.yml
+++ /dev/null
@@ -1 +0,0 @@
-../../../../inventory/group_vars/all.yml
\ No newline at end of file
diff --git a/docs/ansible.md b/docs/ansible.md
index 875cf3766a64cec234c3db3cc4ba0a3a8c6c3dc6..b47f3d7053da85a77784ecdc88eb756eb4c133aa 100644
--- a/docs/ansible.md
+++ b/docs/ansible.md
@@ -67,7 +67,9 @@ Group vars and overriding variables precedence
 ----------------------------------------------
 
 The group variables to control main deployment options are located in the directory ``inventory/group_vars``.
-
+Optional variables are located in the ```inventory/group_vars/all.yml```.
+Mandatory variables that are common for at least one role (or a node group) can be found in the
+```inventory/group_vars/k8s-cluster.yml```.
 There are also role vars for docker, rkt, kubernetes preinstall and master roles.
 According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
 those cannot be overriden from the group vars. In order to override, one should use
diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml
index adb1833610fb8ed7697351beeee384a2bd414187..17652ef508aa293cbf64adf28ebfb2d91e1352d7 100644
--- a/inventory/group_vars/all.yml
+++ b/inventory/group_vars/all.yml
@@ -1,176 +1,60 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-# Kubernetes configuration dirs and system namespace.
-# Those are where all the additional config stuff goes
-# the kubernetes normally puts in /srv/kubernets.
-# This puts them in a sane location and namespace.
-# Editing those values will almost surely break something.
-kube_config_dir: /etc/kubernetes
-kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
-kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
-
-# This is where all the cert scripts and certs will be located
-kube_cert_dir: "{{ kube_config_dir }}/ssl"
-
-# This is where all of the bearer tokens will be stored
-kube_token_dir: "{{ kube_config_dir }}/tokens"
-
-# This is where to save basic auth file
-kube_users_dir: "{{ kube_config_dir }}/users"
-
-## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.5.3
-
-# Where the binaries will be downloaded.
-# Note: ensure that you've enough disk space (about 1G)
-local_release_dir: "/tmp/releases"
-# Random shifts for retrying failed ops like pushing/downloading
-retry_stagger: 5
-
-# Uncomment this line for CoreOS only.
-# Directory where python binary is installed
-# ansible_python_interpreter: "/opt/bin/python"
-
-# This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
-kube_cert_group: kube-cert
-
-# Cluster Loglevel configuration
-kube_log_level: 2
-
-# Kubernetes 1.5 added a new flag to the apiserver to disable anonymous auth. In previos versions, anonymous auth was
-# not implemented. As the new flag defaults to true, we have to explicitly disable it. Change this line if you want the
-# 1.5 default behavior. The flag is actually only added if the used kubernetes version is >= 1.5
-kube_api_anonymous_auth: false
-
-#
-# For some things, kubelet needs to load kernel modules.  For example, dynamic kernel services are needed
-# for mounting persistent volumes into containers.  These may not be loaded by preinstall kubernetes
-# processes.  For example, ceph and rbd backed volumes.  Set to true to allow kubelet to load kernel
-# modules.
-#
-kubelet_load_modules: false
-
-# Users to create for basic auth in Kubernetes API via HTTP
-kube_api_pwd: "changeme"
-kube_users:
-  kube:
-    pass: "{{kube_api_pwd}}"
-    role: admin
-  root:
-    pass: "{{kube_api_pwd}}"
-    role: admin
-
-# Kubernetes cluster name, also will be used as DNS domain
-cluster_name: cluster.local
-# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
-ndots: 2
-# Deploy netchecker app to verify DNS resolve as an HTTP service
-deploy_netchecker: false
-
-# For some environments, each node has a publicly accessible
-# address and an address it should bind services to.  These are
-# really inventory level variables, but described here for consistency.
-#
-# When advertising access, the access_ip will be used, but will defer to
-# ip and then the default ansible ip when unspecified.
-#
-# When binding to restrict access, the ip variable will be used, but will
-# defer to the default ansible ip when unspecified.
-#
-# The ip variable is used for specific address binding, e.g. listen address
-# for etcd.  This is use to help with environments like Vagrant or multi-nic
-# systems where one address should be preferred over another.
-# ip: 10.2.2.2
-#
-# The access_ip variable is used to define how other nodes should access
-# the node.  This is used in flannel to allow other flannel nodes to see
-# this node for example.  The access_ip is really useful AWS and Google
-# environments where the nodes are accessed remotely by the "public" ip,
-# but don't know about that address themselves.
-# access_ip: 1.1.1.1
-
-# Etcd access modes:
-# Enable multiaccess to configure clients to access all of the etcd members directly
-# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
-# This may be the case if clients support and loadbalance multiple etcd servers  natively.
-etcd_multiaccess: true
-
-# Assume there are no internal loadbalancers for apiservers exist and listen on
-# kube_apiserver_port (default 443)
-loadbalancer_apiserver_localhost: true
-
-# Choose network plugin (calico, canal, weave or flannel)
-# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
-kube_network_plugin: calico
-
-# Kubernetes internal network for services, unused block of space.
-kube_service_addresses: 10.233.0.0/18
-
-# internal network. When used, it will assign IP
-# addresses from this range to individual pods.
-# This network must be unused in your network infrastructure!
-kube_pods_subnet: 10.233.64.0/18
-
-# internal network total size (optional). This is the prefix of the
-# entire network. Must be unused in your environment.
-# kube_network_prefix: 18
-
-# internal network node size allocation (optional). This is the size allocated
-# to each node on your network.  With these defaults you should have
-# room for 4096 nodes with 254 pods per node.
-kube_network_node_prefix: 24
-
-# With calico it is possible to distributed routes with border routers of the datacenter.
-peer_with_router: false
-# Warning : enabling router peering will disable calico's default behavior ('node mesh').
-# The subnets of each nodes will be distributed by the datacenter router
-
-# API Server service IP address in Kubernetes internal network.
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
-# The port the API Server will be listening on.
-kube_apiserver_port: 443 # (https)
-kube_apiserver_insecure_port: 8080 # (http)
-# local loadbalancer should use this port instead - default to kube_apiserver_port
-nginx_kube_apiserver_port: "{{ kube_apiserver_port }}"
-
-# Internal DNS configuration.
-# Kubernetes can create and mainatain its own DNS server to resolve service names
-# into appropriate IP addresses. It's highly advisable to run such DNS server,
-# as it greatly simplifies configuration of your applications - you can use
-# service names instead of magic environment variables.
-
-# Can be dnsmasq_kubedns, kubedns or none
-dns_mode: dnsmasq_kubedns
-
-# Can be docker_dns, host_resolvconf or none
-resolvconf_mode: docker_dns
+
+## The access_ip variable is used to define how other nodes should access
+## the node.  This is used in flannel to allow other flannel nodes to see
+## this node for example.  The access_ip is really useful AWS and Google
+## environments where the nodes are accessed remotely by the "public" ip,
+## but don't know about that address themselves.
+#access_ip: 1.1.1.1
+
+### LOADBALANCING AND ACCESS MODES
+## Enable multiaccess to configure etcd clients to access all of the etcd members directly
+## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
+## This may be the case if clients support and loadbalance multiple etcd servers  natively.
+#etcd_multiaccess: true
+
+## External LB example config
+## apiserver_loadbalancer_domain_name: "elb.some.domain"
+#loadbalancer_apiserver:
+#  address: 1.2.3.4
+#  port: 1234
+
+## Internal loadbalancers for apiservers
+#loadbalancer_apiserver_localhost: true
+
+## Local loadbalancer should use this port instead, if defined.
+## Defaults to kube_apiserver_port (443)
+#nginx_kube_apiserver_port: 8443
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules.  For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers.  These may not be loaded by preinstall kubernetes
+## processes.  For example, ceph and rbd backed volumes.  Set to true to allow kubelet to load kernel
+## modules.
+# kubelet_load_modules: false
+
+## Internal network total size. This is the prefix of the                                                                         
+## entire network. Must be unused in your environment.
+#kube_network_prefix: 18
+
+## With calico it is possible to distributed routes with border routers of the datacenter.
+## Warning : enabling router peering will disable calico's default behavior ('node mesh').
+## The subnets of each nodes will be distributed by the datacenter router
+#peer_with_router: false
 
 ## Upstream dns servers used by dnsmasq
 #upstream_dns_servers:
 #  - 8.8.8.8
 #  - 8.8.4.4
 
-dns_domain: "{{ cluster_name }}"
-
-# Ip address of the kubernetes skydns service
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+## There are some changes specific to the cloud providers
+## for instance we need to encapsulate packets with some network plugins
+## If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
+## When openstack is used make sure to source in the openstack credentials
+## like you would do when using nova-client before starting the playbook.
+#cloud_provider:
 
-# There are some changes specific to the cloud providers
-# for instance we need to encapsulate packets with some network plugins
-# If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
-# When openstack is used make sure to source in the openstack credentials
-# like you would do when using nova-client before starting the playbook.
-# When azure is used, you need to also set the following variables.
-# cloud_provider:
-
-# see docs/azure.md for details on how to get these values
+## When azure is used, you need to also set the following variables.
+## see docs/azure.md for details on how to get these values
 #azure_tenant_id:
 #azure_subscription_id:
 #azure_aad_client_id:
@@ -182,46 +66,25 @@ dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address')
 #azure_vnet_name:
 #azure_route_table_name:
 
-
 ## Set these proxy values in order to update docker daemon to use proxies
-# http_proxy: ""
-# https_proxy: ""
-# no_proxy: ""
-
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} --iptables=false"
-docker_bin_dir: "/usr/bin"
+#http_proxy: ""
+#https_proxy: ""
+#no_proxy: ""
 
 ## Uncomment this if you want to force overlay/overlay2 as docker storage driver
 ## Please note that overlay2 is only supported on newer kernels
 #docker_storage_options: -s overlay2
 
-# K8s image pull policy (imagePullPolicy)
-k8s_image_pull_policy: IfNotPresent
-
-# default packages to install within the cluster
-kpm_packages: []
+## Default packages to install within the cluster, f.e:
+#kpm_packages:
 #  - name: kube-system/grafana
 
-# Settings for containerized control plane (etcd/kubelet)
-rkt_version: 1.21.0
-etcd_deployment_type: docker
-kubelet_deployment_type: docker
-vault_deployment_type: docker
-
-efk_enabled: false
 
 ## Certificate Management
 ## This setting determines whether certs are generated via scripts or whether a
 ## cluster of Hashicorp's Vault is started to issue certificates (using etcd
 ## as a backend). Options are "script" or "vault"
-cert_management: script
+#cert_management: script
 
-# Please specify true if you want to perform a kernel upgrade
-kernel_upgrade: false
+## Please specify true if you want to perform a kernel upgrade
+#kernel_upgrade: false
diff --git a/inventory/group_vars/calico-rr.yml b/inventory/group_vars/calico-rr.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5de7d734720a9e44cee0d8f6d167185a91e3a502
--- /dev/null
+++ b/inventory/group_vars/calico-rr.yml
@@ -0,0 +1,33 @@
+## Required for bootstrap-os/preinstall/download roles and setting facts 
+# Valid bootstrap options (required): ubuntu, coreos, centos, none                                                                          
+bootstrap_os: none
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+docker_bin_dir: /usr/bin
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+kube_service_addresses: 10.233.0.0/18 
+kube_apiserver_port: 443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: dnsmasq_kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service                                                                                               
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
diff --git a/inventory/group_vars/etcd.yml b/inventory/group_vars/etcd.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8769967fae923deca5a051d2f3aa4fa90009c3ca
--- /dev/null
+++ b/inventory/group_vars/etcd.yml
@@ -0,0 +1,38 @@
+## Required for bootstrap-os/preinstall/download roles and setting facts 
+# Valid bootstrap options (required): ubuntu, coreos, centos, none                                                                        
+bootstrap_os: none
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+docker_bin_dir: /usr/bin
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# Settings for containerized control plane (etcd/secrets)
+etcd_deployment_type: docker
+cert_management: script
+vault_deployment_type: docker
+
+kube_service_addresses: 10.233.0.0/18 
+kube_apiserver_port: 443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: dnsmasq_kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service                                                                                               
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b7c1552023642a1aca0cc9a2f9dfdb3eadbb7065
--- /dev/null
+++ b/inventory/group_vars/k8s-cluster.yml
@@ -0,0 +1,113 @@
+# Valid bootstrap options (required): ubuntu, coreos, centos, none
+bootstrap_os: none
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# the kubernetes normally puts in /srv/kubernets.
+# This puts them in a sane location and namespace.
+# Editting those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+kube_version: v1.5.3
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changable...
+kube_cert_group: kube-cert
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
+# Users to create for basic auth in Kubernetes API via HTTP
+kube_api_pwd: "changeme"
+kube_users:
+  kube:
+    pass: "{{kube_api_pwd}}"
+    role: admin
+  root:
+    pass: "{{kube_api_pwd}}"
+    role: admin
+
+# Choose network plugin (calico, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: calico
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network.  With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+kube_network_node_prefix: 24
+
+# The port the API Server will be listening on.
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: dnsmasq_kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+## An obvious use case is allowing insecure-registry access
+## to self hosted registries like so:
+docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} --iptables=false"
+docker_bin_dir: "/usr/bin"
+
+# Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: docker
+cert_management: script
+vault_deployment_type: docker
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# Monitoring apps for k8s
+efk_enabled: false
diff --git a/inventory/group_vars/kube-master.yml b/inventory/group_vars/kube-master.yml
new file mode 100644
index 0000000000000000000000000000000000000000..7e75bf859ac350a0f5bb263f1f8339ea0c8d8041
--- /dev/null
+++ b/inventory/group_vars/kube-master.yml
@@ -0,0 +1,9 @@
+# Kubernetes 1.5 added a new flag to the apiserver to disable anonymous auth. In previos versions, anonymous auth was
+# not implemented. As the new flag defaults to true, we have to explicetely disable it. Change this line if you want the
+# 1.5 default behavior. The flag is actually only added if the used kubernetes version is >= 1.5
+kube_api_anonymous_auth: false
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+kube_version: v1.5.3
diff --git a/roles/adduser/defaults/main.yml b/roles/adduser/defaults/main.yml
index b3a69229c15aca990cd255d6c0f0ddf68a4f9daa..ab24b6cf48b4f6a5027950278b3b3304ff315f73 100644
--- a/roles/adduser/defaults/main.yml
+++ b/roles/adduser/defaults/main.yml
@@ -1,4 +1,6 @@
 ---
+kube_cert_group: kube-cert
+
 addusers:
   etcd:
     name: etcd
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index ea95048f657871729372c0b03267d8360c3fc29d..23e87bbbd3047442a377e4dbdebfc6f4530b24c3 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -18,6 +18,7 @@ download_localhost: False
 download_always_pull: False
 
 # Versions
+kube_version: v1.5.3
 etcd_version: v3.0.6
 #TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
 # after migration to container download
diff --git a/roles/kubernetes-apps/kpm/tasks/main.yml b/roles/kubernetes-apps/kpm/tasks/main.yml
index 7e88cc30db2b363d6c6af8409dbaf8363908f63b..9aadc07eab709ac45e40a035cc21754252e8bd7c 100644
--- a/roles/kubernetes-apps/kpm/tasks/main.yml
+++ b/roles/kubernetes-apps/kpm/tasks/main.yml
@@ -4,7 +4,7 @@
     name: "kpm"
     state: "present"
     version: "0.16.1"
-  when: kpm_packages | length > 0
+  when: kpm_packages|default([])| length > 0
 
 - name: manage kubernetes applications
   kpm:
@@ -14,7 +14,7 @@
     version: "{{item.version | default(omit)}}"
     variables: "{{item.variables | default(omit)}}"
     name: "{{item.name}}"
-  with_items: "{{kpm_packages}}"
+  with_items: "{{kpm_packages|default([])}}"
   register: kpmresults
   environment:
     PATH: "{{ ansible_env.PATH }}:{{ bin_dir }}"
diff --git a/roles/kubernetes/node/templates/nginx.conf.j2 b/roles/kubernetes/node/templates/nginx.conf.j2
index 6e8622ed4e52cb63d99358747df00c2e58c9fd78..360b024f80244567779f5b2f1bd0ee3edaa7ea50 100644
--- a/roles/kubernetes/node/templates/nginx.conf.j2
+++ b/roles/kubernetes/node/templates/nginx.conf.j2
@@ -16,7 +16,7 @@ stream {
         }
 
         server {
-            listen        127.0.0.1:{{ nginx_kube_apiserver_port }};
+            listen        127.0.0.1:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }};
             proxy_pass    kube_apiserver;
             proxy_timeout 10m;
             proxy_connect_timeout 1s;
diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml
index 6aa7d7e7cb8915c49c9b0abb8d1d0cec13d953d8..59076c2042b9de80074e3837fa51d51df7ba653f 100644
--- a/roles/kubernetes/preinstall/defaults/main.yml
+++ b/roles/kubernetes/preinstall/defaults/main.yml
@@ -17,6 +17,8 @@ common_required_pkgs:
 # GCE docker repository
 disable_ipv6_dns: false
 
+kube_cert_group: kube-cert
+kube_config_dir: /etc/kubernetes
 
 # For the openstack integration kubelet will need credentials to access
 # openstack apis like nova and cinder. Per default this values will be
@@ -27,9 +29,6 @@ openstack_password: "{{ lookup('env','OS_PASSWORD')  }}"
 openstack_region: "{{ lookup('env','OS_REGION_NAME')  }}"
 openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true)  }}"
 
-# All clients access each node individually, instead of using a load balancer.
-etcd_multiaccess: true
-
 # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
 # for hostnet pods and infra needs
 resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml
index 214aecceff0937b03f6520dc25e3e94923f131d1..2481fcd7fb0e2877a1a257cd75ea9be50826a9ab 100644
--- a/roles/kubernetes/preinstall/tasks/set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/set_facts.yml
@@ -20,8 +20,8 @@
 
 - set_fact:
     kube_apiserver_endpoint: |-
-      {% if not is_kube_master and loadbalancer_apiserver_localhost -%}
-           https://localhost:{{ nginx_kube_apiserver_port }}
+      {% if not is_kube_master and loadbalancer_apiserver_localhost|default(false) -%}
+           https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
       {%- elif is_kube_master and loadbalancer_apiserver is not defined -%}
            http://127.0.0.1:{{ kube_apiserver_insecure_port }}
       {%- else -%}
@@ -57,7 +57,7 @@
       {%- endfor %}
 
 - set_fact:
-    etcd_access_endpoint: "{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
+    etcd_access_endpoint: "{% if etcd_multiaccess|default(true) %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
 
 - set_fact:
     etcd_member_name: |-
diff --git a/roles/kubernetes/secrets/defaults/main.yml b/roles/kubernetes/secrets/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e6177857e33b46a30919ceffcd12fe5cec11011b
--- /dev/null
+++ b/roles/kubernetes/secrets/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+kube_cert_group: kube-cert
diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml
index eefed471f2bae7c79e6030c27dbe82c7b888e0cd..549ece3b30dbcfe4379f324eb44401cbcca83b7f 100644
--- a/roles/network_plugin/calico/tasks/main.yml
+++ b/roles/network_plugin/calico/tasks/main.yml
@@ -223,7 +223,7 @@
    "apiVersion": "v1",
    "metadata": {"node": "{{ inventory_hostname }}",
      "scope": "node",
-     "peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"]) }}"}
+     "peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(hostvars[item]["ansible_default_ipv4.address"]) }}"}
    }'
    | {{ bin_dir }}/calicoctl create --skip-exists -f -
   with_items: "{{ groups['calico-rr'] | default([]) }}"
@@ -245,7 +245,7 @@
          peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster'])
 
 - name: Calico (old) | Configure peering with route reflectors
-  shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ hostvars[item]['calico_rr_ip']|default(hostvars[item]['ip']) }} as {{ local_as | default(global_as_num) }}"
+  shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ hostvars[item]['calico_rr_ip']|default(hostvars[item]['ip'])|default(hostvars[item]['ansible_default_ipv4.address']) }} as {{ local_as | default(global_as_num) }}"
   with_items: "{{ groups['calico-rr'] | default([]) }}"
   when: (legacy_calicoctl and
          peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster']
diff --git a/roles/rkt/defaults/main.yml b/roles/rkt/defaults/main.yml
index 6794429a5e0ec639768813b8d12549cfd261b53f..c73a6fd77874e1cfaac671ab1ef70003c82659df 100644
--- a/roles/rkt/defaults/main.yml
+++ b/roles/rkt/defaults/main.yml
@@ -1,6 +1,6 @@
 ---
 
-rkt_version: 1.12.0
+rkt_version: 1.21.0
 rkt_pkg_version: "{{ rkt_version }}-1"
 rkt_download_src: https://github.com/coreos/rkt
 rkt_download_url: "{{ rkt_download_src }}/releases/download/v{{ rkt_version }}"