diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 259c45614f61ada2ffa5a2261f8600af94a5332e..b7f3f7f4ab1bd92a044a547f8ce668a2017a0586 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -62,6 +62,7 @@ before_script:
   KUBELET_DEPLOYMENT: "docker"
   VAULT_DEPLOYMENT: "docker"
   WEAVE_CPU_LIMIT: "100m"
+  AUTHORIZATION_MODES: "{ 'authorization_modes': [] }"
   MAGIC: "ci check this"
 
 .gce: &gce
@@ -132,6 +133,7 @@ before_script:
       -e local_release_dir=${PWD}/downloads
       -e resolvconf_mode=${RESOLVCONF_MODE}
       -e vault_deployment_type=${VAULT_DEPLOYMENT}
+      -e "${AUTHORIZATION_MODES}"
       --limit "all:!fake_hosts"
       cluster.yml
 
@@ -160,6 +162,7 @@ before_script:
       -e resolvconf_mode=${RESOLVCONF_MODE}
       -e weave_cpu_requests=${WEAVE_CPU_LIMIT}
       -e weave_cpu_limit=${WEAVE_CPU_LIMIT}
+      -e "${AUTHORIZATION_MODES}"
       --limit "all:!fake_hosts"
       $PLAYBOOK;
       fi
@@ -190,6 +193,7 @@ before_script:
       -e etcd_deployment_type=${ETCD_DEPLOYMENT}
       -e kubedns_min_replicas=1
       -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
+      -e "${AUTHORIZATION_MODES}"
       --limit "all:!fake_hosts"
       cluster.yml;
       fi
@@ -232,6 +236,7 @@ before_script:
       -e etcd_deployment_type=${ETCD_DEPLOYMENT}
       -e kubedns_min_replicas=1
       -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
+      -e "${AUTHORIZATION_MODES}"
       --limit "all:!fake_hosts"
       cluster.yml;
       fi
@@ -373,6 +378,15 @@ before_script:
   CLUSTER_MODE: separate
   STARTUP_SCRIPT: ""
 
+.ubuntu_flannel_rbac_variables: &ubuntu_flannel_rbac_variables
+# stage: deploy-gce-special
+  AUTHORIZATION_MODES: "{ 'authorization_modes':  [ 'RBAC' ] }"
+  KUBE_NETWORK_PLUGIN: flannel
+  CLOUD_IMAGE: ubuntu-1604-xenial
+  CLOUD_REGION: europe-west1-b
+  CLUSTER_MODE: separate
+  STARTUP_SCRIPT: ""
+
 # Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
 coreos-calico-sep:
   stage: deploy-gce-part1
@@ -598,6 +612,17 @@ ubuntu-vault-sep:
   except: ['triggers']
   only: ['master', /^pr-.*$/]
 
+ubuntu-flannel-rbac-sep:
+  stage: deploy-gce-special
+  <<: *job
+  <<: *gce
+  variables:
+    <<: *gce_variables
+    <<: *ubuntu_flannel_rbac_variables
+  when: manual
+  except: ['triggers']
+  only: ['master', /^pr-.*$/]
+
 # Premoderated with manual actions
 ci-authorized:
   <<: *job
diff --git a/docs/vars.md b/docs/vars.md
index 4b9da186e0e3c3c39480da80bef17ea699f7ea55..537aa57532fd5fb06fe4b9f451ae716257844a82 100644
--- a/docs/vars.md
+++ b/docs/vars.md
@@ -67,6 +67,11 @@ following default cluster paramters:
   OpenStack (default is unset)
 * *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
   Kubernetes
+* *authorization_modes* - A list of [authorization mode](
+https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
+  that the cluster should be configured for. Defaults to `[]` (i.e. no authorization).
+  Note: `RBAC` is currently in experimental phase, and do not support either calico or
+  vault. Upgrade from non-RBAC to RBAC is not tested.
 
 Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
 private addresses, make sure to pick another values for ``kube_service_addresses``
diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml
index 2787472c878c40c931f777db50e51dab75e27455..d42b2ffed23e2785e6150ccf0ab2a2fc47ca00fd 100644
--- a/roles/kubernetes-apps/ansible/defaults/main.yml
+++ b/roles/kubernetes-apps/ansible/defaults/main.yml
@@ -41,3 +41,7 @@ netchecker_server_memory_requests: 64M
 etcd_cert_dir: "/etc/ssl/etcd/ssl"
 canal_cert_dir: "/etc/canal/certs"
 
+rbac_resources:
+  - sa
+  - clusterrole
+  - clusterrolebinding
diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml
index 4e7236df62d32c55a5292431d171bd0b17db4ece..e7bd934de3e489afb0a20a843e584856de24c53b 100644
--- a/roles/kubernetes-apps/ansible/tasks/main.yml
+++ b/roles/kubernetes-apps/ansible/tasks/main.yml
@@ -13,11 +13,35 @@
     src: "{{item.file}}"
     dest: "{{kube_config_dir}}/{{item.file}}"
   with_items:
-    - {name: kube-dns, file: kubedns-deploy.yml, type: deployment}
-    - {name: kube-dns, file: kubedns-svc.yml, type: svc}
+    - {name: kubedns, file: kubedns-sa.yml, type: sa}
+    - {name: kubedns, file: kubedns-deploy.yml, type: deployment}
+    - {name: kubedns, file: kubedns-svc.yml, type: svc}
+    - {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa}
+    - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole}
+    - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding}
     - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment}
   register: manifests
-  when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
+  when:
+    - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
+    - rbac_enabled or item.type not in rbac_resources
+  tags: dnsmasq
+
+# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns
+- name: Kubernetes Apps | Patch system:kube-dns ClusterRole
+  command: >
+    {{bin_dir}}/kubectl patch clusterrole system:kube-dns
+    --patch='{
+               "rules": [
+                 {
+                   "apiGroups" : [""],
+                   "resources" : ["endpoints", "services"],
+                   "verbs": ["list", "watch", "get"]
+                 }
+               ]
+             }'
+  when:
+    - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
+    - rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
   tags: dnsmasq
 
 - name: Kubernetes Apps | Start Resources
@@ -29,6 +53,7 @@
     filename: "{{kube_config_dir}}/{{item.item.file}}"
     state: "{{item.changed | ternary('latest','present') }}"
   with_items: "{{ manifests.results }}"
+  failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
   when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
   tags: dnsmasq
 
diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
index aae75d0914fff2dd2ab19fd00769a78cb07dafa7..d4bfb7a4f3cb201b2a6b1b77d2f4eff1139a537c 100644
--- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml
+++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
@@ -5,10 +5,15 @@
   with_items:
     - {file: netchecker-agent-ds.yml.j2, type: ds, name: netchecker-agent}
     - {file: netchecker-agent-hostnet-ds.yml.j2, type: ds, name: netchecker-agent-hostnet}
+    - {file: netchecker-server-sa.yml.j2, type: sa, name: netchecker-server}
+    - {file: netchecker-server-clusterrole.yml.j2, type: clusterrole, name: netchecker-server}
+    - {file: netchecker-server-clusterrolebinding.yml.j2, type: clusterrolebinding, name: netchecker-server}
     - {file: netchecker-server-deployment.yml.j2, type: po, name: netchecker-server}
     - {file: netchecker-server-svc.yml.j2, type: svc, name: netchecker-service}
   register: manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when:
+    - inventory_hostname == groups['kube-master'][0]
+    - rbac_enabled or item.type not in rbac_resources
 
 #FIXME: remove if kubernetes/features#124 is implemented
 - name: Kubernetes Apps | Purge old Netchecker daemonsets
@@ -31,4 +36,5 @@
     filename: "{{kube_config_dir}}/{{item.item.file}}"
     state: "{{item.changed | ternary('latest','present') }}"
   with_items: "{{ manifests.results }}"
+  failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
   when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a194426c666e811e0304c351bc1606045f402245
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml
@@ -0,0 +1,32 @@
+# Copyright 2016 The Kubernetes Authors. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: cluster-proportional-autoscaler
+  namespace: {{ system_namespace }}
+rules:
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["list"]
+  - apiGroups: [""]
+    resources: ["replicationcontrollers/scale"]
+    verbs: ["get", "update"]
+  - apiGroups: ["extensions"]
+    resources: ["deployments/scale", "replicasets/scale"]
+    verbs: ["get", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "create"]
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a368ae333b8d8bd18ae9df5a3e81b4ba866fdb57
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml
@@ -0,0 +1,27 @@
+# Copyright 2016 The Kubernetes Authors. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: cluster-proportional-autoscaler
+  namespace: {{ system_namespace }}
+subjects:
+  - kind: ServiceAccount
+    name: cluster-proportional-autoscaler
+    namespace: {{ system_namespace }}
+roleRef:
+  kind: ClusterRole
+  name: cluster-proportional-autoscaler
+  apiGroup: rbac.authorization.k8s.io
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9544a7dd9760d27079bdacda9f42331c56950457
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml
@@ -0,0 +1,19 @@
+# Copyright 2016 The Kubernetes Authors. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+kind: ServiceAccount
+apiVersion: v1
+metadata:
+  name: cluster-proportional-autoscaler
+  namespace: {{ system_namespace }}
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml
index a1d5455adc654b47e793bacc717ec47e6b096cbf..9e046229025c0544072ab90370cee91be2df7672 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml
@@ -16,7 +16,7 @@ apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:
   name: kubedns-autoscaler
-  namespace: kube-system
+  namespace: {{ system_namespace }}
   labels:
     k8s-app: kubedns-autoscaler
     kubernetes.io/cluster-service: "true"
@@ -39,11 +39,13 @@ spec:
                 memory: "10Mi"
         command:
           - /cluster-proportional-autoscaler
-          - --namespace=kube-system
+          - --namespace={{ system_namespace }}
           - --configmap=kubedns-autoscaler
           # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
           - --target=Deployment/kube-dns
           - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
           - --logtostderr=true
           - --v=2
-
+{% if rbac_enabled %}
+      serviceAccountName: cluster-proportional-autoscaler
+{% endif %}
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml
index 3f07aa9058e7232ceb937bed7d295f8298e0d47e..7e4615676f918e10301b5d4d7985a81a67431571 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml
@@ -151,4 +151,6 @@ spec:
             memory: 20Mi
             cpu: 10m
       dnsPolicy: Default  # Don't use cluster DNS.
-
+{% if rbac_enabled %}
+      serviceAccountName: kube-dns
+{% endif %}
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e520ccbfcd9df09fd556ae4427808aa925967668
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: kube-dns
+  namespace: {{ system_namespace }}
+  labels:
+    kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.j2
deleted file mode 100644
index 10a74da846c1dcae6b97c019c65e12f154f1eeb0..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-apiVersion: extensions/v1beta1
-kind: DaemonSet
-metadata:
-  labels:
-    app: netchecker-agent-hostnet
-  name: netchecker-agent-hostnet
-  namespace: {{ netcheck_namespace }}
-spec:
-  template:
-    metadata:
-      name: netchecker-agent-hostnet
-      labels:
-        app: netchecker-agent-hostnet
-    spec:
-      hostNetwork: True
-{% if kube_version | version_compare('v1.6', '>=') %}
-      dnsPolicy: ClusterFirstWithHostNet
-{% endif %}
-      containers:
-        - name: netchecker-agent
-          image: "{{ agent_img }}"
-          env:
-            - name: MY_POD_NAME
-              valueFrom:
-                fieldRef:
-                  fieldPath: metadata.name
-            - name: MY_NODE_NAME
-              valueFrom:
-                fieldRef:
-                  fieldPath: spec.nodeName
-          args:
-            - "-v=5"
-            - "-alsologtostderr=true"
-            - "-serverendpoint=netchecker-service:8081"
-            - "-reportinterval={{ agent_report_interval }}"
-          imagePullPolicy: {{ k8s_image_pull_policy }}
-          resources:
-            limits:
-              cpu: {{ netchecker_agent_cpu_limit }}
-              memory: {{ netchecker_agent_memory_limit }}
-            requests:
-              cpu: {{ netchecker_agent_cpu_requests }}
-              memory: {{ netchecker_agent_memory_requests }}
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..7a8c1d2731d6c4583745dd7ca5847e914930843e
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2
@@ -0,0 +1,9 @@
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: netchecker-server
+  namespace: {{ netcheck_namespace }}
+rules:
+  - apiGroups: [""]
+    resources: ["pods"]
+    verbs: ["list"]
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..54c1eaf942a5a6f4adc3af5bce8bdadbc189cccc
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2
@@ -0,0 +1,13 @@
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: netchecker-server
+  namespace: {{ netcheck_namespace }}
+subjects:
+  - kind: ServiceAccount
+    name: netchecker-server
+    namespace: {{ netcheck_namespace }}
+roleRef:
+  kind: ClusterRole
+  name: netchecker-server
+  apiGroup: rbac.authorization.k8s.io
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
index 6c52352fb5523199c0212cec447851a890967ced..c3dbf3cb589577405f592cd443479797c704d463 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
@@ -31,3 +31,6 @@ spec:
             - "-logtostderr"
             - "-kubeproxyinit"
             - "-endpoint=0.0.0.0:8081"
+{% if rbac_enabled %}
+      serviceAccountName: netchecker-server
+{% endif %}
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..26d15f3a860c3cebef3c6b321818dae073cbb8d3
--- /dev/null
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: netchecker-server
+  namespace: {{ netcheck_namespace }}
+  labels:
+    kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index f12875da2bcab9c950520d228bd1ff81966e6fec..2d26c5a0fd31c737cdbe4e6b4ec7966a9fc6d42c 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -10,10 +10,36 @@
     mode: 0755
   register: helm_container
 
+- name: Helm | Lay Down Helm Manifests (RBAC)
+  template:
+    src: "{{item.file}}"
+    dest: "{{kube_config_dir}}/{{item.file}}"
+  with_items:
+    - {name: tiller, file: tiller-sa.yml, type: sa}
+    - {name: tiller, file: tiller-clusterrolebinding.yml, type: clusterrolebinding}
+  register: manifests
+  when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
+
+- name: Helm | Apply Helm Manifests (RBAC)
+  kube:
+    name: "{{item.item.name}}"
+    namespace: "{{ system_namespace }}"
+    kubectl: "{{bin_dir}}/kubectl"
+    resource: "{{item.item.type}}"
+    filename: "{{kube_config_dir}}/{{item.item.file}}"
+    state: "{{item.changed | ternary('latest','present') }}"
+  with_items: "{{ manifests.results }}"
+  failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
+  when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
+
 - name: Helm | Install/upgrade helm
   command: "{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}"
   when: helm_container.changed
 
+- name: Helm | Patch tiller deployment for RBAC
+  command: kubectl patch deployment tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' -n {{ system_namespace }}
+  when: rbac_enabled
+
 - name: Helm | Set up bash completion
   shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh"
   when: ( helm_container.changed and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] )
diff --git a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0ac9341eebb4a552cb21c758d8826005a2110db7
--- /dev/null
+++ b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml
@@ -0,0 +1,13 @@
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: tiller
+  namespace: {{ system_namespace }}
+subjects:
+  - kind: ServiceAccount
+    name: tiller
+    namespace: {{ system_namespace }}
+roleRef:
+  kind: ClusterRole
+  name: cluster-admin
+  apiGroup: rbac.authorization.k8s.io
diff --git a/roles/kubernetes-apps/helm/templates/tiller-sa.yml b/roles/kubernetes-apps/helm/templates/tiller-sa.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c840f57f8c5ed8be940b99c46fee9763ca45f205
--- /dev/null
+++ b/roles/kubernetes-apps/helm/templates/tiller-sa.yml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: tiller
+  namespace: {{ system_namespace }}
+  labels:
+    kubernetes.io/cluster-service: "true"
diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml
index 785ef43afd62eee7020fdce6c46611e607e3586d..7cfe9cc9aa7b4fa67a73bd5bab215725b2c07e95 100644
--- a/roles/kubernetes/master/defaults/main.yml
+++ b/roles/kubernetes/master/defaults/main.yml
@@ -64,4 +64,4 @@ apiserver_custom_flags: []
 
 controller_mgr_custom_flags: []
 
-scheduler_custom_flags: []
\ No newline at end of file
+scheduler_custom_flags: []
diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml
index dadef4bf5dc9c238f3f540d19f1ab34b7e3ec280..6922e6a518db416d1bd038aa89e6ebc62a5c72dd 100644
--- a/roles/kubernetes/master/tasks/main.yml
+++ b/roles/kubernetes/master/tasks/main.yml
@@ -60,12 +60,11 @@
   when: kubesystem|failed and inventory_hostname == groups['kube-master'][0]
   tags: apps
 
-- name: Write kube-controller-manager manifest
+- name: Write kube-scheduler kubeconfig
   template:
-    src: manifests/kube-controller-manager.manifest.j2
-    dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
-  notify: Master | wait for kube-controller-manager
-  tags: kube-controller-manager
+    src: kube-scheduler-kubeconfig.yaml.j2
+    dest: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
+  tags: kube-scheduler
 
 - name: Write kube-scheduler manifest
   template:
@@ -74,6 +73,19 @@
   notify: Master | wait for kube-scheduler
   tags: kube-scheduler
 
+- name: Write kube-controller-manager kubeconfig
+  template:
+    src: kube-controller-manager-kubeconfig.yaml.j2
+    dest: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
+  tags: kube-controller-manager
+
+- name: Write kube-controller-manager manifest
+  template:
+    src: manifests/kube-controller-manager.manifest.j2
+    dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
+  notify: Master | wait for kube-controller-manager
+  tags: kube-controller-manager
+
 - include: post-upgrade.yml
   tags: k8s-post-upgrade
 
diff --git a/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 b/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..887d022c1f8a737047243b2801fb8b2b92977977
--- /dev/null
+++ b/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Config
+clusters:
+- name: local
+  cluster:
+    certificate-authority: {{ kube_cert_dir }}/ca.pem
+    server: {{ kube_apiserver_endpoint }}
+users:
+- name: kube-controller-manager
+  user:
+    client-certificate: {{ kube_cert_dir }}/kube-controller-manager.pem
+    client-key: {{ kube_cert_dir }}/kube-controller-manager-key.pem
+contexts:
+- context:
+    cluster: local
+    user: kube-controller-manager
+  name: kube-controller-manager-{{ cluster_name }}
+current-context: kube-controller-manager-{{ cluster_name }}
diff --git a/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 b/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..974b72427fd36edfe10a21b224ef67179d4dab2f
--- /dev/null
+++ b/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Config
+clusters:
+- name: local
+  cluster:
+    certificate-authority: {{ kube_cert_dir }}/ca.pem
+    server: {{ kube_apiserver_endpoint }}
+users:
+- name: kube-scheduler
+  user:
+    client-certificate: {{ kube_cert_dir }}/kube-scheduler.pem
+    client-key: {{ kube_cert_dir }}/kube-scheduler-key.pem
+contexts:
+- context:
+    cluster: local
+    user: kube-scheduler
+  name: kube-scheduler-{{ cluster_name }}
+current-context: kube-scheduler-{{ cluster_name }}
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
index bf4979596c34a77646d3cda6781dc02d001eec15..24094fefb9d1591c09dc0b1456091449d0e8711c 100644
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -81,6 +81,9 @@ spec:
 {% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=')  %}
     - --anonymous-auth={{ kube_api_anonymous_auth }}
 {% endif %}
+{% if authorization_modes %}
+    - --authorization-mode={{ authorization_modes|join(',') }}
+{% endif %}
 {% if apiserver_custom_flags is string %}
     - {{ apiserver_custom_flags }}
 {% else %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
index d3f8a23a5d0003a841839b96db221154a48dd5aa..a6b69fa149e7e5989c561cefd419f0f640cde898 100644
--- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
@@ -24,7 +24,7 @@ spec:
     command:
     - /hyperkube
     - controller-manager
-    - --master={{ kube_apiserver_endpoint }}
+    - --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml
     - --leader-elect=true
     - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
     - --root-ca-file={{ kube_cert_dir }}/ca.pem
@@ -35,6 +35,9 @@ spec:
     - --node-monitor-period={{ kube_controller_node_monitor_period }}
     - --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }}
     - --v={{ kube_log_level }}
+{% if rbac_enabled %}
+    - --use-service-account-credentials
+{% endif %}
 {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
     - --cloud-provider={{cloud_provider}}
     - --cloud-config={{ kube_config_dir }}/cloud_config
@@ -61,20 +64,36 @@ spec:
       initialDelaySeconds: 30
       timeoutSeconds: 10
     volumeMounts:
-    - mountPath: {{ kube_cert_dir }}
-      name: ssl-certs-kubernetes
+    - mountPath: /etc/ssl/certs
+      name: ssl-certs-host
+      readOnly: true
+    - mountPath: "{{kube_config_dir}}/ssl"
+      name: etc-kube-ssl
+      readOnly: true
+    - mountPath: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
+      name: kubeconfig
       readOnly: true
 {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere" ] %}
-    - mountPath: {{ kube_config_dir }}/cloud_config
+    - mountPath: "{{ kube_config_dir }}/cloud_config"
       name: cloudconfig
       readOnly: true
 {% endif %}
   volumes:
-  - hostPath:
-      path: {{ kube_cert_dir }}
-    name: ssl-certs-kubernetes
+  - name: ssl-certs-host
+    hostPath:
+{% if ansible_os_family == 'RedHat' %}
+      path: /etc/pki/tls
+{% else %}
+      path: /usr/share/ca-certificates
+{% endif %}
+  - name: etc-kube-ssl
+    hostPath:
+      path: "{{ kube_config_dir }}/ssl"
+  - name: kubeconfig
+    hostPath:
+      path: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
 {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
   - hostPath:
-      path: {{ kube_config_dir }}/cloud_config
+      path: "{{ kube_config_dir }}/cloud_config"
     name: cloudconfig
 {% endif %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
index 441f991eb7f26bf59f9716677168cf24bb83e88f..fdc16bf7fe4d3dcb7a84c4f07f6746820d2d9633 100644
--- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
@@ -2,7 +2,7 @@ apiVersion: v1
 kind: Pod
 metadata:
   name: kube-scheduler
-  namespace: kube-system
+  namespace: {{ system_namespace }}
   labels:
     k8s-app: kube-scheduler
 spec:
@@ -25,7 +25,7 @@ spec:
     - /hyperkube
     - scheduler
     - --leader-elect=true
-    - --master={{ kube_apiserver_endpoint }}
+    - --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml
     - --v={{ kube_log_level }}
 {% if scheduler_custom_flags is string %}
     - {{ scheduler_custom_flags }}
@@ -41,3 +41,27 @@ spec:
         port: 10251
       initialDelaySeconds: 30
       timeoutSeconds: 10
+    volumeMounts:
+    - mountPath: /etc/ssl/certs
+      name: ssl-certs-host
+      readOnly: true
+    - mountPath: "{{ kube_config_dir }}/ssl"
+      name: etc-kube-ssl
+      readOnly: true
+    - mountPath: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
+      name: kubeconfig
+      readOnly: true
+  volumes:
+  - name: ssl-certs-host
+    hostPath:
+{% if ansible_os_family == 'RedHat' %}
+      path: /etc/pki/tls
+{% else %}
+      path: /usr/share/ca-certificates
+{% endif %}
+  - name: etc-kube-ssl
+    hostPath:
+      path: "{{ kube_config_dir }}/ssl"
+  - name: kubeconfig
+    hostPath:
+      path: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml
index cb7a10c6534a4d1718473b53324fbb936bea11b4..ad4cbacf1bc850ff775e81ad8cf8c90b9defe5ab 100644
--- a/roles/kubernetes/node/tasks/install.yml
+++ b/roles/kubernetes/node/tasks/install.yml
@@ -16,7 +16,7 @@
 - include: "install_{{ kubelet_deployment_type }}.yml"
 
 - name: install | Write kubelet systemd init file
-  template: 
+  template:
     src: "kubelet.{{ kubelet_deployment_type }}.service.j2"
     dest: "/etc/systemd/system/kubelet.service"
     backup: "yes"
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index f09845f762f0ba90c8b8aa943c88853733565bce..e0558f8cd0a56687e32d8fa753dfeb3bf2640441 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -30,9 +30,12 @@
 
 - name: write the kubecfg (auth) file for kubelet
   template:
-    src: node-kubeconfig.yaml.j2
-    dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
+    src: "{{ item }}-kubeconfig.yaml.j2"
+    dest: "{{ kube_config_dir }}/{{ item }}-kubeconfig.yaml"
     backup: yes
+  with_items:
+    - node
+    - kube-proxy
   notify: restart kubelet
   tags: kubelet
 
diff --git a/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..18c47cd3ed5cc6de37d33f1b8ee8d0b625444de9
--- /dev/null
+++ b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Config
+clusters:
+- name: local
+  cluster:
+    certificate-authority: {{ kube_cert_dir }}/ca.pem
+    server: {{ kube_apiserver_endpoint }}
+users:
+- name: kube-proxy
+  user:
+    client-certificate: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}.pem
+    client-key: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}-key.pem
+contexts:
+- context:
+    cluster: local
+    user: kube-proxy
+  name: kube-proxy-{{ cluster_name }}
+current-context: kube-proxy-{{ cluster_name }}
diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
index 9b7d5385752fc0e70db9577355c9c63f47078ba9..65feeee65c73fe8d7de7f384db70889872cccb51 100644
--- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
+++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
@@ -25,10 +25,7 @@ spec:
     - /hyperkube
     - proxy
     - --v={{ kube_log_level }}
-    - --master={{ kube_apiserver_endpoint }}
-{% if not is_kube_master %}
-    - --kubeconfig={{kube_config_dir}}/node-kubeconfig.yaml
-{% endif %}
+    - --kubeconfig={{kube_config_dir}}/kube-proxy-kubeconfig.yaml
     - --bind-address={{ ip | default(ansible_default_ipv4.address) }}
     - --cluster-cidr={{ kube_pods_subnet }}
     - --proxy-mode={{ kube_proxy_mode }}
@@ -41,14 +38,14 @@ spec:
     - mountPath: /etc/ssl/certs
       name: ssl-certs-host
       readOnly: true
-    - mountPath: {{kube_config_dir}}/node-kubeconfig.yaml
-      name: "kubeconfig"
+    - mountPath: "{{ kube_config_dir }}/ssl"
+      name: etc-kube-ssl
       readOnly: true
-    - mountPath: {{kube_config_dir}}/ssl
-      name: "etc-kube-ssl"
+    - mountPath: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml"
+      name: kubeconfig
       readOnly: true
     - mountPath: /var/run/dbus
-      name: "var-run-dbus"
+      name: var-run-dbus
       readOnly: false
   volumes:
   - name: ssl-certs-host
@@ -58,12 +55,12 @@ spec:
 {% else %}
       path: /usr/share/ca-certificates
 {% endif %}
-  - name: "kubeconfig"
+  - name: etc-kube-ssl
     hostPath:
-      path: "{{kube_config_dir}}/node-kubeconfig.yaml"
-  - name: "etc-kube-ssl"
+      path: "{{ kube_config_dir }}/ssl"
+  - name: kubeconfig
     hostPath:
-      path: "{{kube_config_dir}}/ssl"
-  - name: "var-run-dbus"
+      path: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml"
+  - name: var-run-dbus
     hostPath:
-      path: "/var/run/dbus"
+      path: /var/run/dbus
diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml
index b154c96f884c4d9815ff7289a189bb4d7aabd390..056f9edcf804f47c072d68bef63aae0a621f0e59 100644
--- a/roles/kubernetes/preinstall/tasks/set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/set_facts.yml
@@ -23,7 +23,7 @@
       {% if not is_kube_master and loadbalancer_apiserver_localhost|default(true) -%}
            https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
       {%- elif is_kube_master -%}
-           http://127.0.0.1:{{ kube_apiserver_insecure_port }}
+           https://127.0.0.1:{{ kube_apiserver_port }}
       {%- else -%}
       {%-   if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
            https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh
index 55ea13d1e0dee106e011ea6f55b4556448654555..e8574cc6b74cc322e5d2791129188061586cbd72 100755
--- a/roles/kubernetes/secrets/files/make-ssl.sh
+++ b/roles/kubernetes/secrets/files/make-ssl.sh
@@ -72,32 +72,47 @@ else
     openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
 fi
 
+gen_key_and_cert() {
+    local name=$1
+    local subject=$2
+    openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1
+    openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1
+    openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
+}
+
 if [ ! -e "$SSLDIR/ca-key.pem" ]; then
-    # kube-apiserver key
-    openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
-    openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
-    openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
+    # kube-apiserver
+    gen_key_and_cert "apiserver" "/CN=kube-apiserver"
     cat ca.pem >> apiserver.pem
+    # kube-scheduler
+    gen_key_and_cert "kube-scheduler" "/CN=system:kube-scheduler"
+    # kube-controller-manager
+    gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager"
 fi
 
+# Admins
 if [ -n "$MASTERS" ]; then
     for host in $MASTERS; do
         cn="${host%%.*}"
-        # admin key
-        openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1
-        openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=kube-admin-${cn}/O=system:masters" > /dev/null 2>&1
-        openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 3650 > /dev/null 2>&1
+        # admin
+        gen_key_and_cert "admin-${host}" "/CN=kube-admin-${cn}/O=system:masters"
+    done
+fi
+
+# Nodes
+if [ -n "$HOSTS" ]; then
+    for host in $HOSTS; do
+        cn="${host%%.*}"
+        gen_key_and_cert "node-${host}" "/CN=system:node:${cn}/O=system:nodes"
     done
 fi
 
-# Nodes and Admin
+# system:kube-proxy
 if [ -n "$HOSTS" ]; then
     for host in $HOSTS; do
         cn="${host%%.*}"
-        # node key
-        openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1
-        openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}" > /dev/null 2>&1
-        openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 3650 > /dev/null 2>&1
+        # kube-proxy
+        gen_key_and_cert "kube-proxy-${host}" "/CN=system:kube-proxy"
     done
 fi
 
diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
index 8df2195bf33ee9b48e297cb920316d866c4b9800..61d9c7826bb98b55d7b9d53f3ccebd4d4facb99e 100644
--- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml
+++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
@@ -56,24 +56,39 @@
 
 - set_fact:
     all_master_certs: "['ca-key.pem',
+                      'apiserver.pem',
+                      'apiserver-key.pem',
+                      'kube-scheduler.pem',
+                      'kube-scheduler-key.pem',
+                      'kube-controller-manager.pem',
+                      'kube-controller-manager-key.pem',
                       {% for node in groups['kube-master'] %}
                       'admin-{{ node }}.pem',
                       'admin-{{ node }}-key.pem',
-                      'apiserver.pem',
-                      'apiserver-key.pem',
                       {% endfor %}]"
     my_master_certs: ['ca-key.pem',
                      'admin-{{ inventory_hostname }}.pem',
                      'admin-{{ inventory_hostname }}-key.pem',
                      'apiserver.pem',
-                     'apiserver-key.pem'
+                     'apiserver-key.pem',
+                     'kube-scheduler.pem',
+                     'kube-scheduler-key.pem',
+                     'kube-controller-manager.pem',
+                     'kube-controller-manager-key.pem',
                      ]
     all_node_certs: "['ca.pem',
                     {% for node in groups['k8s-cluster'] %}
                     'node-{{ node }}.pem',
                     'node-{{ node }}-key.pem',
+                    'kube-proxy-{{ node }}.pem',
+                    'kube-proxy-{{ node }}-key.pem',
                     {% endfor %}]"
-    my_node_certs: ['ca.pem', 'node-{{ inventory_hostname }}.pem', 'node-{{ inventory_hostname }}-key.pem']
+    my_node_certs: ['ca.pem',
+                   'node-{{ inventory_hostname }}.pem',
+                   'node-{{ inventory_hostname }}-key.pem',
+                   'kube-proxy-{{ inventory_hostname }}.pem',
+                   'kube-proxy-{{ inventory_hostname }}-key.pem',
+                   ]
   tags: facts
 
 - name: Gen_certs | Gather master certs
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 6ecdaa9c99bcc54e4f5270b72694dd3aaaecdfa7..8d327856fd754d9c861fca7a78fb313d02543864 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -114,3 +114,9 @@ vault_deployment_type: docker
 k8s_image_pull_policy: IfNotPresent
 efk_enabled: false
 enable_network_policy: false
+
+## List of authorization modes that must be configured for
+## the k8s cluster. Only 'AlwaysAllow','AlwaysDeny', and
+## 'RBAC' modes are tested.
+authorization_modes: []
+rbac_enabled: "{{ 'RBAC' in authorization_modes }}"
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index af3e66601bf54a5936f223828618e8639b559964..77140ba6a6e54b8f67a45d4d15884cb8655dc2ff 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -79,11 +79,10 @@
     - /etc/dnsmasq.d-available
     - /etc/etcd.env
     - /etc/calico
+    - /etc/weave.env
     - /opt/cni
     - /etc/dhcp/dhclient.d/zdnsupdate.sh
     - /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
-    - "{{ bin_dir }}/kubelet"
-    - "{{ bin_dir }}/kubernetes-scripts"
     - /run/flannel
     - /etc/flannel
     - /run/kubernetes
@@ -92,6 +91,15 @@
     - /etc/ssl/certs/kube-ca.pem
     - /etc/ssl/certs/etcd-ca.pem
     - /var/log/pods/
+    - "{{ bin_dir }}/kubelet"
+    - "{{ bin_dir }}/etcd-scripts"
+    - "{{ bin_dir }}/etcd"
+    - "{{ bin_dir }}/etcdctl"
+    - "{{ bin_dir }}/kubernetes-scripts"
+    - "{{ bin_dir }}/kubectl"
+    - "{{ bin_dir }}/helm"
+    - "{{ bin_dir }}/calicoctl"
+    - "{{ bin_dir }}/weave"
   tags: ['files']