From ddffdb63bfcc65a1731a16d316ce10d4903e3261 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= <ak@patientsky.com>
Date: Thu, 6 Dec 2018 11:33:38 +0100
Subject: [PATCH] Remove non-kubeadm deployment (#3811)

* Remove non-kubeadm deployment

* More cleanup

* More cleanup

* More cleanup

* More cleanup

* Fix gitlab

* Try stop gce first before absent to make the delete process work

* More cleanup

* Fix bug with checking if kubeadm has already run

* Fix bug with checking if kubeadm has already run

* More fixes

* Fix test

* fix

* Fix gitlab checkout untill kubespray 2.8 is on quay

* Fixed

* Add upgrade path from non-kubeadm to kubeadm. Revert ssl path

* Readd secret checking

* Do gitlab checks from v2.7.0 test upgrade path to 2.8.0

* fix typo

* Fix CI jobs to kubeadm again. Fix broken hyperkube path

* Fix gitlab

* Fix rotate tokens

* More fixes

* More fixes

* Fix tokens
---
 .gitlab-ci.yml                                |   9 +-
 cluster.yml                                   |  18 +-
 contrib/dind/kubespray-dind.yaml              |   1 -
 .../test-some_distros-kube_router_combo.env   |   6 +-
 contrib/dind/test-some_distros-most_CNIs.env  |  10 +-
 docs/cri-o.md                                 |   3 -
 docs/kube-router.md                           |  30 +--
 docs/vars.md                                  |   3 -
 inventory/sample/group_vars/all/all.yml       |   8 +-
 roles/download/defaults/main.yml              |  15 +-
 .../ansible/tasks/cleanup_dns.yml             |   2 -
 .../rotate_tokens/tasks/main.yml              |  10 +-
 roles/kubernetes/client/tasks/main.yml        |  19 --
 roles/kubernetes/kubeadm/tasks/main.yml       |   2 +-
 roles/kubernetes/master/handlers/main.yml     |  13 +-
 .../tasks/kubeadm-cleanup-old-certs.yml       |   2 +-
 .../kubernetes/master/tasks/kubeadm-setup.yml |  12 +-
 roles/kubernetes/master/tasks/main.yml        |  15 +-
 .../master/tasks/static-pod-setup.yml         |  59 -----
 roles/kubernetes/master/tasks/users-file.yml  |   1 -
 ...kube-controller-manager-kubeconfig.yaml.j2 |  18 --
 .../kube-scheduler-kubeconfig.yaml.j2         |  18 --
 .../templates/kubeadm-config.v1alpha1.yaml.j2 |   2 +-
 .../templates/kubeadm-config.v1alpha2.yaml.j2 |   2 +-
 .../templates/kubeadm-config.v1alpha3.yaml.j2 |   2 +-
 .../templates/kubectl-kubeconfig.yaml.j2      |  18 --
 .../manifests/kube-apiserver.manifest.j2      | 237 ------------------
 .../kube-controller-manager.manifest.j2       | 132 ----------
 .../manifests/kube-scheduler.manifest.j2      |  82 ------
 roles/kubernetes/node/defaults/main.yml       |   9 -
 roles/kubernetes/node/meta/main.yml           |   6 -
 roles/kubernetes/node/tasks/install.yml       |  40 ++-
 .../kubernetes/node/tasks/install_docker.yml  |   9 -
 roles/kubernetes/node/tasks/install_host.yml  |  30 ---
 roles/kubernetes/node/tasks/install_rkt.yml   |  32 ---
 roles/kubernetes/node/tasks/main.yml          |  37 +--
 roles/kubernetes/node/tasks/pre_upgrade.yml   |   4 +-
 .../templates/kube-proxy-kubeconfig.yaml.j2   |  18 --
 .../node/templates/kubelet-container.j2       |  43 ----
 .../node/templates/kubelet.docker.service.j2  |  31 ---
 .../node/templates/kubelet.rkt.service.j2     | 120 ---------
 .../node/templates/kubelet.standard.env.j2    | 151 -----------
 .../manifests/kube-proxy.manifest.j2          | 110 --------
 .../preinstall/tasks/0020-verify-settings.yml |   4 +-
 .../preinstall/tasks/0040-set_facts.yml       |   1 -
 roles/kubernetes/secrets/defaults/main.yml    |   2 -
 roles/kubernetes/secrets/files/certs/.gitkeep |   0
 roles/kubernetes/secrets/handlers/main.yml    |  15 --
 roles/kubernetes/secrets/meta/main.yml        |   1 -
 .../kubernetes/secrets/tasks/check-certs.yml  |  82 ------
 .../secrets/tasks/gen_certs_script.yml        | 227 -----------------
 roles/kubernetes/secrets/tasks/main.yml       | 109 --------
 .../kubernetes/secrets/tasks/upd_ca_trust.yml |  30 ---
 .../secrets/templates/make-ssl.sh.j2          | 151 -----------
 .../secrets/templates/openssl-master.conf.j2  |  42 ----
 .../secrets/templates/openssl-node.conf.j2    |  20 --
 roles/kubespray-defaults/defaults/main.yaml   |  20 +-
 roles/remove-node/pre-remove/tasks/main.yml   |   2 +-
 roles/upgrade/post-upgrade/tasks/main.yml     |   2 +-
 .../win_nodes/kubernetes_patch/tasks/main.yml |   8 +-
 scale.yml                                     |  15 +-
 tests/cloud_playbooks/delete-gce.yml          |  15 ++
 tests/files/gce_ubuntu-flannel-ha.yml         |   2 +-
 tests/testcases/010_check-apiserver.yml       |   1 -
 upgrade-cluster.yml                           |  15 +-
 65 files changed, 111 insertions(+), 2042 deletions(-)
 delete mode 100644 roles/kubernetes/master/tasks/static-pod-setup.yml
 delete mode 100644 roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2
 delete mode 100644 roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2
 delete mode 100644 roles/kubernetes/master/templates/kubectl-kubeconfig.yaml.j2
 delete mode 100644 roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
 delete mode 100644 roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
 delete mode 100644 roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
 delete mode 100644 roles/kubernetes/node/meta/main.yml
 delete mode 100644 roles/kubernetes/node/tasks/install_docker.yml
 delete mode 100644 roles/kubernetes/node/tasks/install_host.yml
 delete mode 100644 roles/kubernetes/node/tasks/install_rkt.yml
 delete mode 100644 roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2
 delete mode 100644 roles/kubernetes/node/templates/kubelet-container.j2
 delete mode 100644 roles/kubernetes/node/templates/kubelet.docker.service.j2
 delete mode 100644 roles/kubernetes/node/templates/kubelet.rkt.service.j2
 delete mode 100644 roles/kubernetes/node/templates/kubelet.standard.env.j2
 delete mode 100644 roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
 delete mode 100644 roles/kubernetes/secrets/defaults/main.yml
 delete mode 100644 roles/kubernetes/secrets/files/certs/.gitkeep
 delete mode 100644 roles/kubernetes/secrets/handlers/main.yml
 delete mode 100644 roles/kubernetes/secrets/meta/main.yml
 delete mode 100644 roles/kubernetes/secrets/tasks/check-certs.yml
 delete mode 100644 roles/kubernetes/secrets/tasks/gen_certs_script.yml
 delete mode 100644 roles/kubernetes/secrets/tasks/main.yml
 delete mode 100644 roles/kubernetes/secrets/tasks/upd_ca_trust.yml
 delete mode 100755 roles/kubernetes/secrets/templates/make-ssl.sh.j2
 delete mode 100644 roles/kubernetes/secrets/templates/openssl-master.conf.j2
 delete mode 100644 roles/kubernetes/secrets/templates/openssl-node.conf.j2

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 32ed71954..845f28ca1 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -24,7 +24,6 @@ variables:
   IDEMPOT_CHECK: "false"
   RESET_CHECK: "false"
   UPGRADE_TEST: "false"
-  KUBEADM_ENABLED: "false"
   LOG_LEVEL: "-vv"
 
 # asia-east1-a
@@ -89,11 +88,11 @@ before_script:
     - echo ${PWD}
     - echo "${STARTUP_SCRIPT}"
     - cd tests && make create-${CI_PLATFORM} -s ; cd -
+    #- git fetch --all && git checkout v2.7.0
 
     # Check out latest tag if testing upgrade
     # Uncomment when gitlab kubespray repo has tags
-    #- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
-    - test "${UPGRADE_TEST}" != "false" && git checkout 53d87e53c5899d4ea2904ab7e3883708dd6363d3
+    - test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
     # Checkout the CI vars file so it is available
     - test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
     # Workaround https://github.com/kubernetes-sigs/kubespray/issues/2021
@@ -137,9 +136,7 @@ before_script:
 
     # Tests Cases
     ## Test Master API
-    - >
-      ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
-      -e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
+    - ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
 
     ## Ping the between 2 pod
     - ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
diff --git a/cluster.yml b/cluster.yml
index 61efa4902..61e103963 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -13,20 +13,6 @@
   vars:
     ansible_connection: local
 
-- hosts: localhost
-  gather_facts: false
-  tasks:
-    - name: deploy warning for non kubeadm
-      debug:
-        msg: "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
-      when: not kubeadm_enabled and not skip_non_kubeadm_warning
-
-    - name: deploy cluster for non kubeadm
-      pause:
-        prompt: "Are you sure you want to deploy cluster using the deprecated non-kubeadm mode."
-        echo: no
-      when: not kubeadm_enabled and not skip_non_kubeadm_warning
-
 - hosts: bastion[0]
   gather_facts: False
   roles:
@@ -96,7 +82,7 @@
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults}
-    - { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
+    - { role: kubernetes/kubeadm, tags: kubeadm}
     - { role: network_plugin, tags: network }
 
 - hosts: kube-master[0]
@@ -104,7 +90,7 @@
   roles:
     - { role: kubespray-defaults}
     - { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
-    - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"], when: "kubeadm_enabled" }
+    - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"]}
 
 - hosts: kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
diff --git a/contrib/dind/kubespray-dind.yaml b/contrib/dind/kubespray-dind.yaml
index 02cd16989..6386bcf31 100644
--- a/contrib/dind/kubespray-dind.yaml
+++ b/contrib/dind/kubespray-dind.yaml
@@ -1,7 +1,6 @@
 # kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
 # See contrib/dind/README.md
 kube_api_anonymous_auth: true
-kubeadm_enabled: true
 
 kubelet_fail_swap_on: false
 
diff --git a/contrib/dind/test-some_distros-kube_router_combo.env b/contrib/dind/test-some_distros-kube_router_combo.env
index 4b0767b96..f2677129b 100644
--- a/contrib/dind/test-some_distros-kube_router_combo.env
+++ b/contrib/dind/test-some_distros-kube_router_combo.env
@@ -1,8 +1,6 @@
 DISTROS=(debian centos)
 NETCHECKER_HOST=${NODES[0]}
 EXTRAS=(
-  'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":false}'
-  'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":true}'
-  'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":false}'
-  'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":true}'
+  'kube_network_plugin=kube-router {"kube_router_run_service_proxy":false}'
+  'kube_network_plugin=kube-router {"kube_router_run_service_proxy":true}'
 )
diff --git a/contrib/dind/test-some_distros-most_CNIs.env b/contrib/dind/test-some_distros-most_CNIs.env
index 830695e5f..2fb185c15 100644
--- a/contrib/dind/test-some_distros-most_CNIs.env
+++ b/contrib/dind/test-some_distros-most_CNIs.env
@@ -1,8 +1,8 @@
 DISTROS=(debian centos)
 EXTRAS=(
-  'kube_network_plugin=calico {"kubeadm_enabled":true}'
-  'kube_network_plugin=canal {"kubeadm_enabled":true}'
-  'kube_network_plugin=cilium {"kubeadm_enabled":true}'
-  'kube_network_plugin=flannel {"kubeadm_enabled":true}'
-  'kube_network_plugin=weave {"kubeadm_enabled":true}'
+  'kube_network_plugin=calico {}'
+  'kube_network_plugin=canal {}'
+  'kube_network_plugin=cilium {}'
+  'kube_network_plugin=flannel {}'
+  'kube_network_plugin=weave {}'
 )
diff --git a/docs/cri-o.md b/docs/cri-o.md
index 43391768a..8004f5cc2 100644
--- a/docs/cri-o.md
+++ b/docs/cri-o.md
@@ -15,8 +15,6 @@ Use cri-o instead of docker, set following variable:
 #### all.yml
 
 ```
-kubeadm_enabled: true
-...
 download_container: false
 skip_downloads: false
 ```
@@ -28,4 +26,3 @@ etcd_deployment_type: host
 kubelet_deployment_type: host
 container_manager: crio
 ```
-
diff --git a/docs/kube-router.md b/docs/kube-router.md
index 5d6598746..adc58cf9f 100644
--- a/docs/kube-router.md
+++ b/docs/kube-router.md
@@ -62,34 +62,6 @@ You can change the default configuration by overriding `kube_router_...` variabl
 these are named to follow `kube-router` command-line options as per
 <https://www.kube-router.io/docs/user-guide/#try-kube-router-with-cluster-installers>.
 
-## Caveats
-
-### kubeadm_enabled: true
-
-If you want to set `kube-router` to replace `kube-proxy`
-(`--run-service-proxy=true`) while using `kubeadm_enabled`,
-then 'kube-proxy` DaemonSet will be removed *after* kubeadm finishes
-running, as it's not possible to skip kube-proxy install in kubeadm flags
-and/or config, see https://github.com/kubernetes/kubeadm/issues/776.
-
-Given above, if `--run-service-proxy=true` is needed it would be
-better to void `kubeadm_enabled` i.e. set:
-
-```
-kubeadm_enabled: false
-kube_router_run_service_proxy: true
-
-```
-
-If for some reason you do want/need to set `kubeadm_enabled`, removing
-it afterwards behave better if kube-proxy is set to ipvs mode, i.e. set:
-
-```
-kubeadm_enabled: true
-kube_router_run_service_proxy: true
-kube_proxy_mode: ipvs
-```
-
 ## Advanced BGP Capabilities
 https://github.com/cloudnativelabs/kube-router#advanced-bgp-capabilities
 
@@ -105,4 +77,4 @@ Next options will set up annotations for kube-router, using `kubectl annotate` c
 kube_router_annotations_master: []
 kube_router_annotations_node: []
 kube_router_annotations_all: []
-```
\ No newline at end of file
+```
diff --git a/docs/vars.md b/docs/vars.md
index d37aa0ed0..5a76d58f0 100644
--- a/docs/vars.md
+++ b/docs/vars.md
@@ -20,9 +20,6 @@ Some variables of note include:
   string)
 * *etcd_version* - Specify version of ETCD to use
 * *ipip* - Enables Calico ipip encapsulation by default
-* *hyperkube_image_repo* - Specify the Docker repository where Hyperkube
-  resides
-* *hyperkube_image_tag* - Specify the Docker tag where Hyperkube resides
 * *kube_network_plugin* - Sets k8s network plugin (default Calico)
 * *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
 * *kube_version* - Specify a given Kubernetes hyperkube version
diff --git a/inventory/sample/group_vars/all/all.yml b/inventory/sample/group_vars/all/all.yml
index 6e835ca90..b9b8a09aa 100644
--- a/inventory/sample/group_vars/all/all.yml
+++ b/inventory/sample/group_vars/all/all.yml
@@ -42,16 +42,10 @@ bin_dir: /usr/local/bin
 ## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
 ## When openstack is used make sure to source in the openstack credentials
 ## like you would do when using nova-client before starting the playbook.
-## Note: The 'external' cloud provider is not supported. 
+## Note: The 'external' cloud provider is not supported.
 ## TODO(riverzhang): https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager
 #cloud_provider:
 
-## kubeadm deployment mode
-kubeadm_enabled: true
-
-# Skip alert information
-skip_non_kubeadm_warning: false
-
 ## Set these proxy values in order to update package manager and docker daemon to use proxies
 #http_proxy: ""
 #https_proxy: ""
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index 80ebb3f12..3bc9e0134 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -136,8 +136,6 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
 calico_policy_image_tag: "{{ calico_policy_version }}"
 calico_rr_image_repo: "quay.io/calico/routereflector"
 calico_rr_image_tag: "{{ calico_rr_version }}"
-hyperkube_image_repo: "{{ kube_image_repo }}/hyperkube-{{ image_arch }}"
-hyperkube_image_tag: "{{ kube_version }}"
 pod_infra_image_repo: "gcr.io/google_containers/pause-{{ image_arch }}"
 pod_infra_image_tag: "{{ pod_infra_version }}"
 install_socat_image_repo: "xueshanf/install-socat"
@@ -272,7 +270,7 @@ downloads:
       - k8s-cluster
 
   kubeadm:
-    enabled: "{{ kubeadm_enabled }}"
+    enabled: true
     file: true
     version: "{{ kubeadm_version }}"
     dest: "{{local_release_dir}}/kubeadm"
@@ -284,20 +282,11 @@ downloads:
     groups:
       - k8s-cluster
 
-  hyperkube:
-    enabled: "{{ kubeadm_enabled == false }}"
-    container: true
-    repo: "{{ hyperkube_image_repo }}"
-    tag: "{{ hyperkube_image_tag }}"
-    sha256: "{{ hyperkube_digest_checksum|default(None) }}"
-    groups:
-      - k8s-cluster
-
   hyperkube_file:
     enabled: true
     file: true
     version: "{{ kube_version }}"
-    dest: "{{local_release_dir}}/hyperkube"
+    dest: "{{ local_release_dir }}/hyperkube"
     sha256: "{{ hyperkube_binary_checksum }}"
     url: "{{ hyperkube_download_url }}"
     unarchive: false
diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
index fdb7bfca5..ee6ba3203 100644
--- a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
+++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
@@ -21,7 +21,6 @@
     resource: "deploy"
     state: absent
   when:
-    - kubeadm_enabled|default(false)
     - kubeadm_init is defined
     - kubeadm_init.changed|default(false)
     - inventory_hostname == groups['kube-master'][0]
@@ -50,7 +49,6 @@
     - 'deploy'
     - 'svc'
   when:
-    - kubeadm_enabled|default(false)
     - kubeadm_init is defined
     - kubeadm_init.changed|default(false)
     - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
index 5d0abf0f1..9c51b4ca0 100644
--- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
+++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: Rotate Tokens | Get default token name
-  shell: "{{ bin_dir }}/kubectl get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token"
+  shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token"
   register: default_token
   changed_when: false
   until: default_token.rc == 0
@@ -8,7 +8,7 @@
   retries: 5
 
 - name: Rotate Tokens | Get default token data
-  command: "{{ bin_dir }}/kubectl get secrets {{ default_token.stdout }} -ojson"
+  command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets {{ default_token.stdout }} -ojson"
   register: default_token_data
   changed_when: false
 
@@ -31,7 +31,7 @@
 # instead of filtering manually
 - name: Rotate Tokens | Get all serviceaccount tokens to expire
   shell: >-
-    {{ bin_dir }}/kubectl get secrets --all-namespaces
+    {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets --all-namespaces
     -o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
     | grep kubernetes.io/service-account-token
     | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|tiller|local-volume-provisioner'
@@ -39,10 +39,10 @@
   when: needs_rotation
 
 - name: Rotate Tokens | Delete expired tokens
-  command: "{{ bin_dir }}/kubectl delete secrets -n {{ item.split(' ')[0] }} {{ item.split(' ')[1] }}"
+  command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete secrets -n {{ item.split(' ')[0] }} {{ item.split(' ')[1] }}"
   with_items: "{{ tokens_to_delete.stdout_lines }}"
   when: needs_rotation
 
 - name: Rotate Tokens | Delete pods in system namespace
-  command: "{{ bin_dir }}/kubectl delete pods -n kube-system --all"
+  command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete pods -n kube-system --all"
   when: needs_rotation
diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml
index c45ccb68f..5c5b3d251 100644
--- a/roles/kubernetes/client/tasks/main.yml
+++ b/roles/kubernetes/client/tasks/main.yml
@@ -10,25 +10,6 @@
   tags:
     - facts
 
-- name: Gather certs for admin kubeconfig
-  slurp:
-    src: "{{ item }}"
-  register: admin_certs
-  with_items:
-    - "{{ kube_cert_dir }}/ca.pem"
-    - "{{ kube_cert_dir }}/admin-{{ inventory_hostname }}.pem"
-    - "{{ kube_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
-  when: not kubeadm_enabled|d(false)|bool
-
-- name: Write admin kubeconfig
-  template:
-    src: admin.conf.j2
-    dest: "{{ kube_config_dir }}/admin.conf"
-    owner: root
-    group: "{{ kube_cert_group }}"
-    mode: 0640
-  when: not kubeadm_enabled|d(false)|bool
-
 - name: Create kube config dir
   file:
     path: "/root/.kube"
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index 1a2470d46..29b866a97 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -124,7 +124,7 @@
 # FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776
 # is fixed
 - name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
-  shell: "{{ bin_dir }}/kubectl delete daemonset -n kube-system kube-proxy"
+  shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
   delegate_to: "{{groups['kube-master']|first}}"
   run_once: true
   when:
diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml
index 5d9e37e35..a2df028c6 100644
--- a/roles/kubernetes/master/handlers/main.yml
+++ b/roles/kubernetes/master/handlers/main.yml
@@ -91,13 +91,16 @@
   command: /bin/true
   notify:
     - Master | set secret_changed to true
-    - Master | clear kubeconfig for root user
+    - Master | Copy new kubeconfig for root user
 
 - name: Master | set secret_changed to true
   set_fact:
     secret_changed: true
 
-- name: Master | clear kubeconfig for root user
-  file:
-    path: /root/.kube/config
-    state: absent
+- name: Master | Copy new kubeconfig for root user
+  copy:
+    src: "{{ kube_config_dir }}/admin.conf"
+    dest: "/root/.kube/config"
+    remote_src: yes
+    mode: "0600"
+    backup: yes
diff --git a/roles/kubernetes/master/tasks/kubeadm-cleanup-old-certs.yml b/roles/kubernetes/master/tasks/kubeadm-cleanup-old-certs.yml
index 2f3af53e0..2abe55ec8 100644
--- a/roles/kubernetes/master/tasks/kubeadm-cleanup-old-certs.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-cleanup-old-certs.yml
@@ -1,7 +1,7 @@
 ---
 - name: kubeadm | Retrieve files to purge
   find:
-    paths: "{{kube_cert_dir }}"
+    paths: "{{ kube_cert_dir }}"
     patterns: '*.pem'
   register: files_to_purge_for_kubeadm
 
diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml
index e0c13fefa..c826d9c71 100644
--- a/roles/kubernetes/master/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml
@@ -26,19 +26,22 @@
   file:
     path: "{{ kube_config_dir }}/admin.conf"
     state: absent
-  when: not kubeadm_already_run.stat.exists
+  when:
+    - not kubeadm_already_run.stat.exists
 
 - name: kubeadm | Delete old static pods
   file:
     path: "{{ kube_config_dir }}/manifests/{{item}}.manifest"
     state: absent
   with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler", "kube-proxy"]
-  when: old_apiserver_cert.stat.exists
+  when:
+    - old_apiserver_cert.stat.exists
 
 - name: kubeadm | Forcefully delete old static pods
   shell: "docker ps -f name=k8s_{{item}} -q | xargs --no-run-if-empty docker rm -f"
   with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
-  when: old_apiserver_cert.stat.exists
+  when:
+    - old_apiserver_cert.stat.exists
 
 - name: kubeadm | aggregate all SANs
   set_fact:
@@ -220,7 +223,8 @@
 
 - name: kubeadm | cleanup old certs if necessary
   import_tasks: kubeadm-cleanup-old-certs.yml
-  when: old_apiserver_cert.stat.exists
+  when:
+    - old_apiserver_cert.stat.exists
 
 - name: kubeadm | Remove taint for master with node role
   command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"
diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml
index a249e4164..f7faf43f8 100644
--- a/roles/kubernetes/master/tasks/main.yml
+++ b/roles/kubernetes/master/tasks/main.yml
@@ -4,12 +4,14 @@
     - k8s-pre-upgrade
 
 - import_tasks: users-file.yml
-  when: kube_basic_auth|default(true)
+  when:
+    - kube_basic_auth|default(true)
 
 - import_tasks: encrypt-at-rest.yml
-  when: kube_encrypt_secret_data
+  when:
+    - kube_encrypt_secret_data
 
-- name: install | Copy kubectl binary from download dir
+- name: Install | Copy kubectl binary from download dir
   synchronize:
     src: "{{ local_release_dir }}/hyperkube"
     dest: "{{ bin_dir }}/kubectl"
@@ -57,10 +59,5 @@
     kube_apiserver_enable_admission_plugins: "{{ kube_apiserver_enable_admission_plugins | difference(['SecurityContextDeny']) | union(['PodSecurityPolicy']) | unique }}"
   when: podsecuritypolicy_enabled
 
-- name: Include kubeadm setup if enabled
+- name: Include kubeadm setup
   import_tasks: kubeadm-setup.yml
-  when: kubeadm_enabled|bool|default(false)
-
-- name: Include static pod setup if not using kubeadm
-  import_tasks: static-pod-setup.yml
-  when: not kubeadm_enabled|bool|default(false)
diff --git a/roles/kubernetes/master/tasks/static-pod-setup.yml b/roles/kubernetes/master/tasks/static-pod-setup.yml
deleted file mode 100644
index 33b28e637..000000000
--- a/roles/kubernetes/master/tasks/static-pod-setup.yml
+++ /dev/null
@@ -1,59 +0,0 @@
----
-- name: Create audit-policy directory
-  file:
-    path: "{{ audit_policy_file | dirname }}"
-    state: directory
-  tags:
-    - kube-apiserver
-  when: kubernetes_audit|default(false)
-
-- name: Write api audit policy yaml
-  template:
-    src: apiserver-audit-policy.yaml.j2
-    dest: "{{ audit_policy_file }}"
-  notify: Master | Restart apiserver
-  tags:
-    - kube-apiserver
-  when: kubernetes_audit|default(false)
-
-- name: Write kube-apiserver manifest
-  template:
-    src: manifests/kube-apiserver.manifest.j2
-    dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
-  notify: Master | Restart apiserver
-  tags:
-    - kube-apiserver
-
-- meta: flush_handlers
-
-- name: Write kube-scheduler kubeconfig
-  template:
-    src: kube-scheduler-kubeconfig.yaml.j2
-    dest: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
-  tags:
-    - kube-scheduler
-
-- name: Write kube-scheduler manifest
-  template:
-    src: manifests/kube-scheduler.manifest.j2
-    dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest"
-  notify: Master | Restart kube-scheduler
-  tags:
-    - kube-scheduler
-
-- name: Write kube-controller-manager kubeconfig
-  template:
-    src: kube-controller-manager-kubeconfig.yaml.j2
-    dest: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
-  tags:
-    - kube-controller-manager
-
-- name: Write kube-controller-manager manifest
-  template:
-    src: manifests/kube-controller-manager.manifest.j2
-    dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
-  notify: Master | Restart kube-controller-manager
-  tags:
-    - kube-controller-manager
-
-- meta: flush_handlers
diff --git a/roles/kubernetes/master/tasks/users-file.yml b/roles/kubernetes/master/tasks/users-file.yml
index e8425d1bc..7c94f6e2e 100644
--- a/roles/kubernetes/master/tasks/users-file.yml
+++ b/roles/kubernetes/master/tasks/users-file.yml
@@ -12,4 +12,3 @@
     dest: "{{ kube_users_dir }}/known_users.csv"
     mode: 0640
     backup: yes
-  notify: Master | set secret_changed
diff --git a/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 b/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2
deleted file mode 100644
index 887d022c1..000000000
--- a/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: Config
-clusters:
-- name: local
-  cluster:
-    certificate-authority: {{ kube_cert_dir }}/ca.pem
-    server: {{ kube_apiserver_endpoint }}
-users:
-- name: kube-controller-manager
-  user:
-    client-certificate: {{ kube_cert_dir }}/kube-controller-manager.pem
-    client-key: {{ kube_cert_dir }}/kube-controller-manager-key.pem
-contexts:
-- context:
-    cluster: local
-    user: kube-controller-manager
-  name: kube-controller-manager-{{ cluster_name }}
-current-context: kube-controller-manager-{{ cluster_name }}
diff --git a/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 b/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2
deleted file mode 100644
index 974b72427..000000000
--- a/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: Config
-clusters:
-- name: local
-  cluster:
-    certificate-authority: {{ kube_cert_dir }}/ca.pem
-    server: {{ kube_apiserver_endpoint }}
-users:
-- name: kube-scheduler
-  user:
-    client-certificate: {{ kube_cert_dir }}/kube-scheduler.pem
-    client-key: {{ kube_cert_dir }}/kube-scheduler-key.pem
-contexts:
-- context:
-    cluster: local
-    user: kube-scheduler
-  name: kube-scheduler-{{ cluster_name }}
-current-context: kube-scheduler-{{ cluster_name }}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
index 8adc777fd..ee780cd5e 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
@@ -174,7 +174,7 @@ apiServerCertSANs:
 {% for san in  apiserver_sans.split(' ') | unique %}
   - {{ san }}
 {% endfor %}
-certificatesDir: {{ kube_config_dir }}/ssl
+certificatesDir: {{ kube_cert_dir }}
 imageRepository: {{ kube_image_repo }}
 unifiedControlPlaneImage: ""
 {% if kube_override_hostname|default('') %}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
index 53e1703ce..87e5a961e 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
@@ -192,7 +192,7 @@ apiServerCertSANs:
 {% for san in apiserver_sans.split(' ') | unique %}
   - {{ san }}
 {% endfor %}
-certificatesDir: {{ kube_config_dir }}/ssl
+certificatesDir: {{ kube_cert_dir }}
 imageRepository: {{ kube_image_repo }}
 unifiedControlPlaneImage: ""
 nodeRegistration:
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2
index adedb850d..13053ae0b 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2
@@ -47,7 +47,7 @@ apiServerCertSANs:
 {% for san in  apiserver_sans.split(' ') | unique %}
   - {{ san }}
 {% endfor %}
-certificatesDir: {{ kube_config_dir }}/ssl
+certificatesDir: {{ kube_cert_dir }}
 imageRepository: {{ kube_image_repo }}
 unifiedControlPlaneImage: ""
 apiServerExtraArgs:
diff --git a/roles/kubernetes/master/templates/kubectl-kubeconfig.yaml.j2 b/roles/kubernetes/master/templates/kubectl-kubeconfig.yaml.j2
deleted file mode 100644
index a9800d3ac..000000000
--- a/roles/kubernetes/master/templates/kubectl-kubeconfig.yaml.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: Config
-current-context: kubectl-to-{{ cluster_name }}
-preferences: {}
-clusters:
-- cluster:
-    certificate-authority-data: {{ kube_node_cert|b64encode }}
-    server: {{ kube_apiserver_endpoint }}
-  name: {{ cluster_name }}
-contexts:
-- context:
-    cluster: {{ cluster_name }}
-    user: kubectl
-  name: kubectl-to-{{ cluster_name }}
-users:
-- name: kubectl
-  user:
-    token: {{ kubectl_token }}
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
deleted file mode 100644
index 926151928..000000000
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ /dev/null
@@ -1,237 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
-  name: kube-apiserver
-  namespace: kube-system
-  labels:
-    k8s-app: kube-apiserver
-    kubespray: v2
-  annotations:
-    kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
-    kubespray.apiserver-cert/serial: "{{ apiserver_cert_serial }}"
-spec:
-  hostNetwork: true
-{% if kube_version is version('v1.6', '>=')  %}
-  dnsPolicy: ClusterFirst
-{% endif %}
-{% if kube_version is version('v1.11.1', '>=') %}
-  priorityClassName: system-node-critical
-{% endif %}
-  containers:
-  - name: kube-apiserver
-    image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
-    imagePullPolicy: {{ k8s_image_pull_policy }}
-    resources:
-      limits:
-        cpu: {{ kube_apiserver_cpu_limit }}
-        memory: {{ kube_apiserver_memory_limit }}
-      requests:
-        cpu: {{ kube_apiserver_cpu_requests }}
-        memory: {{ kube_apiserver_memory_requests }}
-    command:
-    - /hyperkube
-    - apiserver
-{% if kubernetes_audit %}
-    - --audit-log-path={{ audit_log_path }}
-    - --audit-log-maxage={{ audit_log_maxage }}
-    - --audit-log-maxbackup={{ audit_log_maxbackups }}
-    - --audit-log-maxsize={{ audit_log_maxsize }}
-    - --audit-policy-file={{ audit_policy_file }}
-{% endif %}
-    - --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
-    - --etcd-servers={{ etcd_access_addresses }}
-{%   if etcd_events_cluster_enabled %}
-    - --etcd-servers-overrides=/events#{{ etcd_events_access_addresses_semicolon }}
-{% endif %}
-{%   if kube_version is version('v1.9', '<')  %}
-    - --etcd-quorum-read=true
-{% endif %}
-    - --etcd-cafile={{ etcd_cert_dir }}/ca.pem
-    - --etcd-certfile={{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
-    - --etcd-keyfile={{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
-{% if kube_apiserver_insecure_port|string != "0" %}
-    - --insecure-bind-address={{ kube_apiserver_insecure_bind_address }}
-{% endif %}
-    - --bind-address={{ kube_apiserver_bind_address }}
-    - --apiserver-count={{ kube_apiserver_count }}
-{% if kube_version is version('v1.9', '>=') %}
-    - --endpoint-reconciler-type=lease
-{% endif %}
-{% if kube_version is version('v1.10', '<') %}
-    - --admission-control={{ kube_apiserver_admission_control | join(',') }}
-{% else %}
-{% if kube_apiserver_enable_admission_plugins|length > 0 %}
-    - --enable-admission-plugins={{ kube_apiserver_enable_admission_plugins | join(',') }}
-{% endif %}
-{% if kube_apiserver_disable_admission_plugins|length > 0 %}
-    - --disable-admission-plugins={{ kube_apiserver_disable_admission_plugins | join(',') }}
-{% endif %}
-{% endif %}
-    - --service-cluster-ip-range={{ kube_service_addresses }}
-    - --service-node-port-range={{ kube_apiserver_node_port_range }}
-    - --client-ca-file={{ kube_cert_dir }}/ca.pem
-    - --profiling={{ kube_profiling }}
-    - --repair-malformed-updates=false
-    - --kubelet-client-certificate={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem
-    - --kubelet-client-key={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem
-    - --service-account-lookup=true
-    - --kubelet-preferred-address-types={{ kubelet_preferred_address_types }}
-    - --request-timeout={{ kube_apiserver_request_timeout }}
-{% if kube_basic_auth|default(true) %}
-    - --basic-auth-file={{ kube_users_dir }}/known_users.csv
-{% endif %}
-    - --tls-cert-file={{ kube_cert_dir }}/apiserver.pem
-    - --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
-{% if kube_token_auth|default(true) %}
-    - --token-auth-file={{ kube_token_dir }}/known_tokens.csv
-{% endif %}
-    - --service-account-key-file={{ kube_cert_dir }}/service-account-key.pem
-{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
-    - --oidc-issuer-url={{ kube_oidc_url }}
-    - --oidc-client-id={{ kube_oidc_client_id }}
-{%   if kube_oidc_ca_file is defined %}
-    - --oidc-ca-file={{ kube_oidc_ca_file }}
-{%   endif %}
-{%   if kube_oidc_username_claim is defined %}
-    - --oidc-username-claim={{ kube_oidc_username_claim }}
-{%   endif %}
-{%   if kube_oidc_username_prefix is defined %}
-    - "--oidc-username-prefix={{ kube_oidc_username_prefix }}"
-{%   endif %}
-{%   if kube_oidc_groups_claim is defined %}
-    - --oidc-groups-claim={{ kube_oidc_groups_claim }}
-{%   endif %}
-{%   if kube_oidc_groups_prefix is defined %}
-    - "--oidc-groups-prefix={{ kube_oidc_groups_prefix }}"
-{%   endif %}
-{% endif %}
-    - --secure-port={{ kube_apiserver_port }}
-    - --insecure-port={{ kube_apiserver_insecure_port }}
-    - --storage-backend={{ kube_apiserver_storage_backend }}
-{% if kube_api_runtime_config is defined %}
-{%   for conf in kube_api_runtime_config %}
-    - --runtime-config={{ conf }}
-{%   endfor %}
-{% endif %}
-{% if enable_network_policy %}
-{%   if kube_version is version('v1.8', '<')  %}
-    - --runtime-config=extensions/v1beta1/networkpolicies=true
-{%   endif %}
-{% endif %}
-    - --v={{ kube_log_level }}
-    - --allow-privileged=true
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
-    - --cloud-provider={{ cloud_provider }}
-    - --cloud-config={{ kube_config_dir }}/cloud_config
-{% endif %}
-{% if kube_api_anonymous_auth is defined and kube_version is version('v1.5', '>=')  %}
-    - --anonymous-auth={{ kube_api_anonymous_auth }}
-{% endif %}
-{% if authorization_modes %}
-    - --authorization-mode={{ authorization_modes|join(',') }}
-{% endif %}
-{% if kube_encrypt_secret_data %}
-    - --experimental-encryption-provider-config={{ kube_config_dir }}/ssl/secrets_encryption.yaml
-{% endif %}
-{% if kube_feature_gates %}
-    - --feature-gates={{ kube_feature_gates|join(',') }}
-{% endif %}
-{% if kube_version is version('v1.9', '>=') %}
-    - --requestheader-client-ca-file={{ kube_cert_dir }}/{{ kube_front_proxy_ca }}
-{# FIXME(mattymo): Vault certs do not work with front-proxy-client #}
-{% if cert_management == "vault" %}
-    - --requestheader-allowed-names=
-{% else %}
-    - --requestheader-allowed-names=front-proxy-client
-{% endif %}
-    - --requestheader-extra-headers-prefix=X-Remote-Extra-
-    - --requestheader-group-headers=X-Remote-Group
-    - --requestheader-username-headers=X-Remote-User
-    - --enable-aggregator-routing={{ kube_api_aggregator_routing }}
-    - --proxy-client-cert-file={{ kube_cert_dir }}/front-proxy-client.pem
-    - --proxy-client-key-file={{ kube_cert_dir }}/front-proxy-client-key.pem
-{% else %}
-    - --proxy-client-cert-file={{ kube_cert_dir }}/apiserver.pem
-    - --proxy-client-key-file={{ kube_cert_dir }}/apiserver-key.pem
-{% endif %}
-{% if apiserver_custom_flags is string %}
-    - {{ apiserver_custom_flags }}
-{% else %}
-{% for flag in apiserver_custom_flags %}
-    - {{ flag }}
-{% endfor %}
-{% endif %}
-    livenessProbe:
-      httpGet:
-        host: 127.0.0.1
-        path: /healthz
-{% if kube_apiserver_insecure_port|int == 0 %}
-        port: {{ kube_apiserver_port }}
-        scheme: HTTPS
-{% else %}
-        port: {{ kube_apiserver_insecure_port }}
-{% endif %}
-      failureThreshold: 8
-      initialDelaySeconds: 15
-      periodSeconds: 10
-      successThreshold: 1
-      timeoutSeconds: 15
-    volumeMounts:
-    - mountPath: {{ kube_config_dir }}
-      name: kubernetes-config
-      readOnly: true
-    - mountPath: /etc/ssl
-      name: ssl-certs-host
-      readOnly: true
-{% for dir in ssl_ca_dirs %}
-    - mountPath: {{ dir }}
-      name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
-      readOnly: true
-{% endfor %}
-    - mountPath: {{ etcd_cert_dir }}
-      name: etcd-certs
-      readOnly: true
-{% if cloud_provider is defined and cloud_provider == 'aws' and ansible_os_family == 'RedHat' %}
-    - mountPath: /etc/ssl/certs/ca-bundle.crt
-      name: rhel-ca-bundle
-      readOnly: true
-{% endif %}
-{% if kubernetes_audit %}
-{% if audit_log_path != "-" %}
-    - mountPath: {{ audit_log_mountpath }}
-      name: {{ audit_log_name }}
-      Writable: true
-{% endif %}
-    - mountPath: {{ audit_policy_mountpath }}
-      name: {{ audit_policy_name }}
-{% endif %}
-  volumes:
-  - hostPath:
-      path: {{ kube_config_dir }}
-    name: kubernetes-config
-  - name: ssl-certs-host
-    hostPath:
-      path: /etc/ssl
-{% for dir in ssl_ca_dirs %}
-  - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
-    hostPath:
-      path: {{ dir }}
-{% endfor %}
-  - hostPath:
-      path: {{ etcd_cert_dir }}
-    name: etcd-certs
-{% if cloud_provider is defined and cloud_provider == 'aws' and ansible_os_family == 'RedHat' %}
-  - hostPath:
-      path: /etc/ssl/certs/ca-bundle.crt
-    name: rhel-ca-bundle
-{% endif %}
-{% if kubernetes_audit %}
-{% if audit_log_path != "-" %}
-  - hostPath:
-      path: {{ audit_log_hostpath }}
-    name: {{ audit_log_name }}
-{% endif %}
-  - hostPath:
-      path: {{ audit_policy_hostpath }}
-    name: {{ audit_policy_name }}
-{% endif %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
deleted file mode 100644
index 8046b9b94..000000000
--- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
+++ /dev/null
@@ -1,132 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
-  name: kube-controller-manager
-  namespace: kube-system
-  labels:
-    k8s-app: kube-controller-manager
-  annotations:
-    kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
-    kubespray.controller-manager-cert/serial: "{{ controller_manager_cert_serial }}"
-spec:
-  hostNetwork: true
-{% if kube_version is version('v1.6', '>=') %}
-  dnsPolicy: ClusterFirst
-{% endif %}
-{% if kube_version is version('v1.11.1', '>=') %}
-  priorityClassName: system-node-critical
-{% endif %}
-  containers:
-  - name: kube-controller-manager
-    image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
-    imagePullPolicy: {{ k8s_image_pull_policy }}
-    resources:
-      limits:
-        cpu: {{ kube_controller_cpu_limit }}
-        memory: {{ kube_controller_memory_limit }}
-      requests:
-        cpu: {{ kube_controller_cpu_requests }}
-        memory: {{ kube_controller_memory_requests }}
-    command:
-    - /hyperkube
-    - controller-manager
-    - --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml
-    - --leader-elect=true
-    - --service-account-private-key-file={{ kube_cert_dir }}/service-account-key.pem
-    - --root-ca-file={{ kube_cert_dir }}/ca.pem
-    - --cluster-signing-cert-file={{ kube_cert_dir }}/ca.pem
-    - --cluster-signing-key-file={{ kube_cert_dir }}/ca-key.pem
-    - --enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }}
-    - --node-monitor-grace-period={{ kube_controller_node_monitor_grace_period }}
-    - --node-monitor-period={{ kube_controller_node_monitor_period }}
-    - --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }}
-    - --profiling={{ kube_profiling }}
-    - --terminated-pod-gc-threshold={{ kube_controller_terminated_pod_gc_threshold }}
-    - --v={{ kube_log_level }}
-{% if rbac_enabled %}
-    - --use-service-account-credentials=true
-{% endif %}
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
-    - --cloud-provider={{cloud_provider}}
-    - --cloud-config={{ kube_config_dir }}/cloud_config
-{% elif cloud_provider is defined and cloud_provider in ["external", "oci"] %}
-    - --cloud-provider=external
-{% endif %}
-{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %}
-    - --configure-cloud-routes=true
-{% else %}
-    - --configure-cloud-routes=false
-{% endif %}
-{% if kube_network_plugin is defined and kube_network_plugin in ["cloud", "flannel", "canal", "cilium", "kube-router"] %}
-    - --allocate-node-cidrs=true
-    - --cluster-cidr={{ kube_pods_subnet }}
-    - --service-cluster-ip-range={{ kube_service_addresses }}
-    - --node-cidr-mask-size={{ kube_network_node_prefix }}
-{% endif %}
-{% if kube_feature_gates %}
-    - --feature-gates={{ kube_feature_gates|join(',') }}
-{% endif %}
-{% if controller_mgr_custom_flags is string %}
-    - {{ controller_mgr_custom_flags }}
-{% else %}
-{%   for flag in controller_mgr_custom_flags %}
-    - {{ flag }}
-{%   endfor %}
-{% endif %}
-    livenessProbe:
-      httpGet:
-        host: 127.0.0.1
-        path: /healthz
-        port: 10252
-      initialDelaySeconds: 30
-      timeoutSeconds: 10
-    volumeMounts:
-    - mountPath: /etc/ssl
-      name: ssl-certs-host
-      readOnly: true
-{% for dir in ssl_ca_dirs %}
-    - mountPath: {{ dir }}
-      name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
-      readOnly: true
-{% endfor %}
-    - mountPath: "{{kube_config_dir}}/ssl"
-      name: etc-kube-ssl
-      readOnly: true
-    - mountPath: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
-      name: kubeconfig
-      readOnly: true
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
-    - mountPath: "{{ kube_config_dir }}/cloud_config"
-      name: cloudconfig
-      readOnly: true
-{% endif %}
-{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined and openstack_cacert != "" %}
-    - mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
-      name: openstackcacert
-      readOnly: true
-{% endif %}
-  volumes:
-  - name: ssl-certs-host
-    hostPath:
-      path: /etc/ssl
-{% for dir in ssl_ca_dirs %}
-  - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
-    hostPath:
-      path: {{ dir }}
-{% endfor %}
-  - name: etc-kube-ssl
-    hostPath:
-      path: "{{ kube_config_dir }}/ssl"
-  - name: kubeconfig
-    hostPath:
-      path: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
-  - hostPath:
-      path: "{{ kube_config_dir }}/cloud_config"
-    name: cloudconfig
-{% endif %}
-{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined and openstack_cacert != "" %}
-  - hostPath:
-      path: "{{ kube_config_dir }}/openstack-cacert.pem"
-    name: openstackcacert
-{% endif %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
deleted file mode 100644
index ebe258200..000000000
--- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
+++ /dev/null
@@ -1,82 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
-  name: kube-scheduler
-  namespace: kube-system
-  labels:
-    k8s-app: kube-scheduler
-  annotations:
-    kubespray.scheduler-cert/serial: "{{ scheduler_cert_serial }}"
-spec:
-  hostNetwork: true
-{% if kube_version is version('v1.6', '>=') %}
-  dnsPolicy: ClusterFirst
-{% endif %}
-{% if kube_version is version('v1.11.1', '>=') %}
-  priorityClassName: system-node-critical
-{% endif %}
-  containers:
-  - name: kube-scheduler
-    image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
-    imagePullPolicy: {{ k8s_image_pull_policy }}
-    resources:
-      limits:
-        cpu: {{ kube_scheduler_cpu_limit }}
-        memory: {{ kube_scheduler_memory_limit }}
-      requests:
-        cpu: {{ kube_scheduler_cpu_requests }}
-        memory: {{ kube_scheduler_memory_requests }}
-    command:
-    - /hyperkube
-    - scheduler
-    - --leader-elect=true
-    - --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml
-    - --profiling={{ kube_profiling }}
-    - --v={{ kube_log_level }}
-{% if kube_feature_gates %}
-    - --feature-gates={{ kube_feature_gates|join(',') }}
-{% endif %}
-{% if scheduler_custom_flags is string %}
-    - {{ scheduler_custom_flags }}
-{% else %}
-{%   for flag in scheduler_custom_flags %}
-    - {{ flag }}
-{%   endfor %}
-{% endif %}
-    livenessProbe:
-      httpGet:
-        host: 127.0.0.1
-        path: /healthz
-        port: 10251
-      initialDelaySeconds: 30
-      timeoutSeconds: 10
-    volumeMounts:
-    - mountPath: /etc/ssl
-      name: ssl-certs-host
-      readOnly: true
-{% for dir in ssl_ca_dirs %}
-    - mountPath: {{ dir }}
-      name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
-      readOnly: true
-{% endfor %}
-    - mountPath: "{{ kube_config_dir }}/ssl"
-      name: etc-kube-ssl
-      readOnly: true
-    - mountPath: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
-      name: kubeconfig
-      readOnly: true
-  volumes:
-  - name: ssl-certs-host
-    hostPath:
-      path: /etc/ssl
-{% for dir in ssl_ca_dirs %}
-  - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
-    hostPath:
-      path: {{ dir }}
-{% endfor %}
-  - name: etc-kube-ssl
-    hostPath:
-      path: "{{ kube_config_dir }}/ssl"
-  - name: kubeconfig
-    hostPath:
-      path: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml
index 58d29d434..b6b6f9ea6 100644
--- a/roles/kubernetes/node/defaults/main.yml
+++ b/roles/kubernetes/node/defaults/main.yml
@@ -1,7 +1,4 @@
 ---
-# Valid options: docker (default), rkt, or host
-kubelet_deployment_type: host
-
 # change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
 kube_apiserver_insecure_bind_address: 127.0.0.1
 
@@ -90,12 +87,6 @@ kubelet_custom_flags: []
 ## Support custom flags to be passed to kubelet only on nodes, not masters
 kubelet_node_custom_flags: []
 
-# This setting is used for rkt based kubelet for deploying hyperkube
-# from a docker based registry ( controls --insecure and docker:// )
-## Empty value for quay.io containers
-## docker for docker registry containers
-kube_hyperkube_image_repo: ""
-
 # If non-empty, will use this string as identification instead of the actual hostname
 kube_override_hostname: >-
   {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
diff --git a/roles/kubernetes/node/meta/main.yml b/roles/kubernetes/node/meta/main.yml
deleted file mode 100644
index 00c3c9fe2..000000000
--- a/roles/kubernetes/node/meta/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-dependencies:
-  - role: kubernetes/secrets
-    when: not kubeadm_enabled
-    tags:
-      - k8s-secrets
diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml
index ceeaa442b..0cbd4bc9d 100644
--- a/roles/kubernetes/node/tasks/install.yml
+++ b/roles/kubernetes/node/tasks/install.yml
@@ -1,11 +1,4 @@
 ---
-- name: Set kubelet deployment to host if kubeadm is enabled
-  set_fact:
-    kubelet_deployment_type: host
-  when: kubeadm_enabled
-  tags:
-    - kubeadm
-
 - name: install | Copy kubeadm binary from download dir
   synchronize:
     src: "{{ local_release_dir }}/kubeadm"
@@ -15,7 +8,6 @@
     owner: no
     group: no
   delegate_to: "{{ inventory_hostname }}"
-  when: kubeadm_enabled
   tags:
     - kubeadm
 
@@ -24,15 +16,41 @@
     path: "{{ bin_dir }}/kubeadm"
     mode: "0755"
     state: file
-  when: kubeadm_enabled
   tags:
     - kubeadm
 
-- include_tasks: "install_{{ kubelet_deployment_type }}.yml"
+- name: install | Copy kubelet binary from download dir
+  synchronize:
+    src: "{{ local_release_dir }}/hyperkube"
+    dest: "{{ bin_dir }}/kubelet"
+    compress: no
+    perms: yes
+    owner: no
+    group: no
+  delegate_to: "{{ inventory_hostname }}"
+  tags:
+    - hyperkube
+    - upgrade
+  notify: restart kubelet
+
+- name: install | Set kubelet binary permissions
+  file:
+    path: "{{ bin_dir }}/kubelet"
+    mode: "0755"
+    state: file
+  tags:
+    - hyperkube
+    - upgrade
+
+- name: install | Copy socat wrapper for Container Linux
+  command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}"
+  args:
+    creates: "{{ bin_dir }}/socat"
+  when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
 
 - name: install | Write kubelet systemd init file
   template:
-    src: "kubelet.{{ kubelet_deployment_type }}.service.j2"
+    src: "kubelet.host.service.j2"
     dest: "/etc/systemd/system/kubelet.service"
     backup: "yes"
   notify: restart kubelet
diff --git a/roles/kubernetes/node/tasks/install_docker.yml b/roles/kubernetes/node/tasks/install_docker.yml
deleted file mode 100644
index b74511bdd..000000000
--- a/roles/kubernetes/node/tasks/install_docker.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: install | Install kubelet launch script
-  template:
-    src: kubelet-container.j2
-    dest: "{{ bin_dir }}/kubelet"
-    owner: kube
-    mode: 0755
-    backup: yes
-  notify: restart kubelet
diff --git a/roles/kubernetes/node/tasks/install_host.yml b/roles/kubernetes/node/tasks/install_host.yml
deleted file mode 100644
index 3ec1f1800..000000000
--- a/roles/kubernetes/node/tasks/install_host.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-
-- name: install | Copy kubelet binary from download dir
-  synchronize:
-    src: "{{ local_release_dir }}/hyperkube"
-    dest: "{{ bin_dir }}/kubelet"
-    compress: no
-    perms: yes
-    owner: no
-    group: no
-  delegate_to: "{{ inventory_hostname }}"
-  tags:
-    - hyperkube
-    - upgrade
-  notify: restart kubelet
-
-- name: install | Set kubelet binary permissions
-  file:
-    path: "{{ bin_dir }}/kubelet"
-    mode: "0755"
-    state: file
-  tags:
-    - hyperkube
-    - upgrade
-
-- name: install | Copy socat wrapper for Container Linux
-  command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}"
-  args:
-    creates: "{{ bin_dir }}/socat"
-  when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
diff --git a/roles/kubernetes/node/tasks/install_rkt.yml b/roles/kubernetes/node/tasks/install_rkt.yml
deleted file mode 100644
index 22f9c7e81..000000000
--- a/roles/kubernetes/node/tasks/install_rkt.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- name: Trust kubelet container
-  command: >-
-    /usr/bin/rkt trust
-    --skip-fingerprint-review
-    --root
-    {{ item }}
-  register: kubelet_rkt_trust_result
-  until: kubelet_rkt_trust_result.rc == 0
-  with_items:
-    - "https://quay.io/aci-signing-key"
-    - "https://coreos.com/dist/pubkeys/aci-pubkeys.gpg"
-  retries: 4
-  delay: "{{ retry_stagger | random + 3 }}"
-  changed_when: false
-
-- name: create kubelet working directory
-  file:
-    state: directory
-    path: /var/lib/kubelet
-
-- name: Create kubelet service systemd directory
-  file:
-    path: /etc/systemd/system/kubelet.service.d
-    state: directory
-
-- name: Write kubelet proxy drop-in
-  template:
-    src: http-proxy.conf.j2
-    dest: /etc/systemd/system/kubelet.service.d/http-proxy.conf
-  when: http_proxy is defined or https_proxy is defined
-  notify: restart kubelet
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 83454f0c7..bea7d4097 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -22,16 +22,6 @@
   tags:
     - nginx
 
-- name: Write kubelet config file (non-kubeadm)
-  template:
-    src: kubelet.standard.env.j2
-    dest: "{{ kube_config_dir }}/kubelet.env"
-    backup: yes
-  when: not kubeadm_enabled
-  notify: restart kubelet
-  tags:
-    - kubelet
-
 - name: Make sure dynamic kubelet configuration directory is writeable
   file:
     path: "{{ dynamic_kubelet_configuration_dir }}"
@@ -44,25 +34,11 @@
     src: kubelet.kubeadm.env.j2
     dest: "{{ kube_config_dir }}/kubelet.env"
     backup: yes
-  when: kubeadm_enabled
   notify: restart kubelet
   tags:
     - kubelet
     - kubeadm
 
-- name: write the kubecfg (auth) file for kubelet
-  template:
-    src: "{{ item }}-kubeconfig.yaml.j2"
-    dest: "{{ kube_config_dir }}/{{ item }}-kubeconfig.yaml"
-    backup: yes
-  with_items:
-    - node
-    - kube-proxy
-  when: not kubeadm_enabled
-  notify: restart kubelet
-  tags:
-    - kubelet
-
 - name: Ensure nodePort range is reserved
   sysctl:
     name: net.ipv4.ip_local_reserved_ports
@@ -142,26 +118,17 @@
   tags:
     - kube-proxy
 
-- name: Write proxy manifest
-  template:
-    src: manifests/kube-proxy.manifest.j2
-    dest: "{{ kube_manifest_dir }}/kube-proxy.manifest"
-  when:
-    - not (kubeadm_enabled or kube_proxy_remove)
-  tags:
-    - kube-proxy
-
 - name: Purge proxy manifest for kubeadm or if proxy services being provided by other means, e.g. network_plugin
   file:
     path: "{{ kube_manifest_dir }}/kube-proxy.manifest"
     state: absent
   when:
-    - kubeadm_enabled or kube_proxy_remove
+    - kube_proxy_remove
   tags:
     - kube-proxy
 
 - name: Cleanup kube-proxy leftovers from node
-  command: "{{ docker_bin_dir }}/docker run --rm --privileged -v /lib/modules:/lib/modules --net=host {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} kube-proxy --cleanup"
+  command: "{{ local_release_dir }}/hyperkube kube-proxy --cleanup"
   when:
     - kube_proxy_remove
   # `kube-proxy --cleanup`, being Ok as per shown WARNING, still returns 255 from above run (?)
diff --git a/roles/kubernetes/node/tasks/pre_upgrade.yml b/roles/kubernetes/node/tasks/pre_upgrade.yml
index 6d24b006b..2191d6fbd 100644
--- a/roles/kubernetes/node/tasks/pre_upgrade.yml
+++ b/roles/kubernetes/node/tasks/pre_upgrade.yml
@@ -16,7 +16,7 @@
   service:
     name: kubelet
     state: stopped
-  when: kubelet_deployment_type == 'host' and kubelet_container_check.rc == 0
+  when: kubelet_container_check.rc == 0
 
 - name: "Pre-upgrade | ensure kubelet container is removed if using host deployment"
   command: docker rm -fv kubelet
@@ -26,4 +26,4 @@
   retries: 4
   until: remove_kubelet_container.rc == 0
   delay: 5
-  when: kubelet_deployment_type == 'host' and kubelet_container_check.rc == 0
\ No newline at end of file
+  when: kubelet_container_check.rc == 0
diff --git a/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2
deleted file mode 100644
index 18c47cd3e..000000000
--- a/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: Config
-clusters:
-- name: local
-  cluster:
-    certificate-authority: {{ kube_cert_dir }}/ca.pem
-    server: {{ kube_apiserver_endpoint }}
-users:
-- name: kube-proxy
-  user:
-    client-certificate: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}.pem
-    client-key: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}-key.pem
-contexts:
-- context:
-    cluster: local
-    user: kube-proxy
-  name: kube-proxy-{{ cluster_name }}
-current-context: kube-proxy-{{ cluster_name }}
diff --git a/roles/kubernetes/node/templates/kubelet-container.j2 b/roles/kubernetes/node/templates/kubelet-container.j2
deleted file mode 100644
index c8ffbfce3..000000000
--- a/roles/kubernetes/node/templates/kubelet-container.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-{{ docker_bin_dir }}/docker run \
-  --net=host \
-  --pid=host \
-  --privileged \
-  --name=kubelet \
-  --restart=on-failure:5 \
-  --memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }} \
-  --cpu-shares={{ kube_cpu_reserved|regex_replace('m', '')  }} \
-  -v /dev:/dev:rw \
-  -v /etc/cni:/etc/cni:ro \
-  -v /opt/cni:/opt/cni:ro \
-  -v /etc/ssl:/etc/ssl:ro \
-  -v /etc/resolv.conf:/etc/resolv.conf \
-  {% for dir in ssl_ca_dirs -%}
-  -v {{ dir }}:{{ dir }}:ro \
-  {% endfor -%}
-  {% if kubelet_load_modules -%}
-  -v /lib/modules:/lib/modules:ro \
-  {% endif -%}
-  -v /sys:/sys:ro \
-  -v {{ docker_daemon_graph }}:{{ docker_daemon_graph }}:rw \
-  -v /var/log:/var/log:rw \
-  -v /var/lib/kubelet:/var/lib/kubelet:shared \
-  -v /var/lib/calico:/var/lib/calico:shared \
-  -v /var/lib/cni:/var/lib/cni:shared \
-  -v /var/run:/var/run:rw \
-  {# we can run into issues with double mounting /var/lib/kubelet #}
-  {# surely there's a better way to do this #}
-  {% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
-  -v {{ kubelet_flexvolumes_plugins_dir }}:{{ kubelet_flexvolumes_plugins_dir }}:rw \
-  {% endif -%}
-  {% if local_volume_provisioner_enabled -%}
-  {% for class in local_volume_provisioner_storage_classes -%}
-  -v {{ class.host_dir }}:{{ class.host_dir }}:rw \
-  -v {{ class.mount_dir }}:{{ class.mount_dir }}:rw \
-  {% endfor -%}
-  {% endif %}
-  -v {{kube_config_dir}}:{{kube_config_dir}}:ro \
-  -v /etc/os-release:/etc/os-release:ro \
-  {{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
-  ./hyperkube kubelet \
-  "$@"
diff --git a/roles/kubernetes/node/templates/kubelet.docker.service.j2 b/roles/kubernetes/node/templates/kubelet.docker.service.j2
deleted file mode 100644
index c20cf797f..000000000
--- a/roles/kubernetes/node/templates/kubelet.docker.service.j2
+++ /dev/null
@@ -1,31 +0,0 @@
-[Unit]
-Description=Kubernetes Kubelet Server
-Documentation=https://github.com/GoogleCloudPlatform/kubernetes
-After=docker.service
-Wants=docker.socket
-
-[Service]
-User=root
-EnvironmentFile={{kube_config_dir}}/kubelet.env
-ExecStart={{ bin_dir }}/kubelet \
-		$KUBE_LOGTOSTDERR \
-		$KUBE_LOG_LEVEL \
-		$KUBELET_API_SERVER \
-		$KUBELET_ADDRESS \
-		$KUBELET_PORT \
-		$KUBELET_HOSTNAME \
-		$KUBE_ALLOW_PRIV \
-		$KUBELET_ARGS \
-		$DOCKER_SOCKET \
-		$KUBELET_NETWORK_PLUGIN \
-		$KUBELET_VOLUME_PLUGIN \
-		$KUBELET_CLOUDPROVIDER
-Restart=always
-RestartSec=10s
-ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet
-ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
-ExecReload={{ docker_bin_dir }}/docker restart kubelet
-
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
deleted file mode 100644
index ec1dc4975..000000000
--- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2
+++ /dev/null
@@ -1,120 +0,0 @@
-[Unit]
-Description=Kubernetes Kubelet Server
-Documentation=https://github.com/GoogleCloudPlatform/kubernetes
-Wants=network.target
-
-[Service]
-User=root
-Restart=on-failure
-RestartSec=10s
-TimeoutStartSec=0
-LimitNOFILE=40000
-
-ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet.uuid
-ExecStartPre=-/bin/mkdir -p /var/lib/kubelet
-ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
-
-EnvironmentFile={{kube_config_dir}}/kubelet.env
-# stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts
-ExecStart=/usr/bin/rkt run \
-{% if kubelet_load_modules == true %}
-        --volume lib-modules,kind=host,source=/lib/modules \
-{% endif %}
-        --volume os-release,kind=host,source=/etc/os-release,readOnly=true \
-        --volume hosts,kind=host,source=/etc/hosts,readOnly=true \
-        --volume dns,kind=host,source=/etc/resolv.conf \
-        --volume etc-kubernetes,kind=host,source={{ kube_config_dir }},readOnly=false \
-        --volume etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
-        --volume etcd-ssl,kind=host,source={{ etcd_config_dir }},readOnly=true \
-        --volume run,kind=host,source=/run,readOnly=false \
-        {% for dir in ssl_ca_dirs -%}
-        --volume {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }},kind=host,source={{ dir }},readOnly=true \
-        {% endfor -%}
-        --volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \
-        --volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \
-        --volume var-log,kind=host,source=/var/log \
-{% if kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium", "kube-router"] %}
-        --volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
-        --volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
-        --volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \
-{% endif %}
-{% if kube_network_plugin in ["calico", "canal"] %}
-        --volume var-lib-calico,kind=host,source=/var/lib/calico,readOnly=false \
-{% endif %}
-{# we can run into issues with double mounting /var/lib/kubelet #}
-{# surely there's a better way to do this #}
-{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
-        --volume flexvolumes,kind=host,source={{ kubelet_flexvolumes_plugins_dir }},readOnly=false \
-{% endif -%}
-{% if local_volume_provisioner_enabled %}
-{% for class in local_volume_provisioner_storage_classes %}
-        --volume local-volume-provisioner-base-dir,kind=host,source={{ class.host_dir }},readOnly=false \
-{# Not pretty, but needed to avoid double mount #}
-{% if class.host_dir not in class.mount_dir and class.mount_dir not in class.host_dir %}
-        --volume local-volume-provisioner-mount-dir,kind=host,source={{ class.mount_dir }},readOnly=false \
-{% endif %}
-{% endfor %}
-{% endif %}
-{% if kubelet_load_modules == true %}
-        --mount volume=lib-modules,target=/lib/modules \
-{% endif %}
-        --mount volume=etc-cni,target=/etc/cni \
-        --mount volume=opt-cni,target=/opt/cni \
-        --mount volume=var-lib-cni,target=/var/lib/cni \
-{% if kube_network_plugin in ["calico", "canal"] %}
-        --mount volume=var-lib-calico,target=/var/lib/calico \
-{% endif %}
-        --mount volume=os-release,target=/etc/os-release \
-        --mount volume=dns,target=/etc/resolv.conf \
-        --mount volume=etc-kubernetes,target={{ kube_config_dir }} \
-        --mount volume=etc-ssl-certs,target=/etc/ssl/certs \
-        --mount volume=etcd-ssl,target={{ etcd_config_dir }} \
-        --mount volume=run,target=/run \
-        {% for dir in ssl_ca_dirs -%}
-        --mount volume={{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }},target={{ dir }} \
-        {% endfor -%}
-        --mount volume=var-lib-docker,target=/var/lib/docker \
-        --mount volume=var-lib-kubelet,target=/var/lib/kubelet \
-        --mount volume=var-log,target=/var/log \
-        --mount volume=hosts,target=/etc/hosts \
-{# we can run into issues with double mounting /var/lib/kubelet #}
-{# surely there's a better way to do this #}
-{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
-        --mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \
-{% endif -%}
-{% if local_volume_provisioner_enabled %}
-{% for class in local_volume_provisioner_storage_classes %}
-        --mount volume=local-volume-provisioner-base-dir,target={{ class.host_dir }} \
-{# Not pretty, but needed to avoid double mount #}
-{% if class.host_dir not in class.mount_dir and class.mount_dir not in class.host_dir %}
-        --mount volume=local-volume-provisioner-mount-dir,target={{ class.mount_dir }} \
-{% endif %}
-{% endfor %}
-{% endif %}
-        --stage1-from-dir=stage1-fly.aci \
-{% if kube_hyperkube_image_repo == "docker" %}
-        --insecure-options=image \
-        docker://{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \
-{% else %}
-        {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \
-{% endif %}
-        --uuid-file-save=/var/run/kubelet.uuid \
-        --debug --exec=/kubelet -- \
-                $KUBE_LOGTOSTDERR \
-                $KUBE_LOG_LEVEL \
-                $KUBELET_API_SERVER \
-                $KUBELET_ADDRESS \
-                $KUBELET_PORT \
-                $KUBELET_HOSTNAME \
-                $KUBE_ALLOW_PRIV \
-                $KUBELET_ARGS \
-                $DOCKER_SOCKET \
-                $KUBELET_REGISTER_NODE \
-                $KUBELET_NETWORK_PLUGIN \
-                $KUBELET_VOLUME_PLUGIN \
-                $KUBELET_CLOUDPROVIDER
-
-ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet.uuid
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2
deleted file mode 100644
index fa6d6c3f3..000000000
--- a/roles/kubernetes/node/templates/kubelet.standard.env.j2
+++ /dev/null
@@ -1,151 +0,0 @@
-# logging to stderr means we get it in the systemd journal
-KUBE_LOGTOSTDERR="--logtostderr=true"
-KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
-# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
-KUBELET_ADDRESS="--address={{ kubelet_bind_address }} --node-ip={{ kubelet_address }}"
-# The port for the info server to serve on
-# KUBELET_PORT="--port=10250"
-{% if kube_override_hostname|default('') %}
-# You may leave this blank to use the actual hostname
-KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
-{% endif %}
-{# Base kubelet args #}
-{% set kubelet_args_base %}
---pod-manifest-path={{ kube_manifest_dir }} \
-{% if kube_version is version('v1.12.0', '<') %}
---cadvisor-port={{ kube_cadvisor_port }} \
-{% endif %}
---pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
---node-status-update-frequency={{ kubelet_status_update_frequency }} \
-{% if container_manager == 'docker' and kube_version is version('v1.12.0', '<') %}
---docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
-{% endif %}
---client-ca-file={{ kube_cert_dir }}/ca.pem \
---tls-cert-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem \
---tls-private-key-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
---anonymous-auth=false \
---read-only-port={{ kube_read_only_port }} \
-{% if kube_version is version('v1.6', '>=') %}
-{# flag got removed with 1.7.0 #}
-{% if kube_version is version('v1.7', '<') %}
---enable-cri={{ kubelet_enable_cri }} \
-{% endif %}
-{% if container_manager == 'crio' %}
---container-runtime=remote \
---container-runtime-endpoint=/var/run/crio/crio.sock \
-{% endif %}
---cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
---cgroups-per-qos={{ kubelet_cgroups_per_qos }} \
---max-pods={{ kubelet_max_pods }} \
-{% if kube_version is version('v1.8', '<') %}
---experimental-fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
-{% else %}
---fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
-{% endif %}
-{% if kubelet_authentication_token_webhook %}
---authentication-token-webhook \
-{% endif %}
-{% if kubelet_authorization_mode_webhook %}
---authorization-mode=Webhook \
-{% endif %}
-{% if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
---cgroup-driver=systemd \
-{% endif %}
---enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} {% endif %}{% endset %}
-
-{# DNS settings for kubelet #}
-{% if dns_mode in ['kubedns', 'coredns'] %}
-{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
-{% elif dns_mode == 'coredns_dual' %}
-{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }},{{ skydns_server_secondary }}{% endset %}
-{% elif dns_mode == 'dnsmasq_kubedns' %}
-{% set kubelet_args_cluster_dns %}--cluster-dns={{ dnsmasq_dns_server }}{% endset %}
-{% elif dns_mode == 'manual' %}
-{% set kubelet_args_cluster_dns %}--cluster-dns={{ manual_dns_server }}{% endset %}
-{% else %}
-{% set kubelet_args_cluster_dns %}{% endset %}
-{% endif %}
-{% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
-
-{# Location of the apiserver #}
-{% if kube_version is version('v1.8', '<') %}
-{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %}
-{% else %}
-{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml{% endset %}
-{% endif %}
-
-{% set role_node_taints = [] %}
-{% if standalone_kubelet|bool %}
-{# We are on a master-only host. Make the master unschedulable in this case. #}
-{% if kube_version is version('v1.6', '>=') %}
-{# Set taints on the master so that it's unschedulable by default. Use node-role.kubernetes.io/master taint like kubeadm. #}
-{% set dummy = role_node_taints.append('node-role.kubernetes.io/master=:NoSchedule') %}
-{% else %}
-{# --register-with-taints was added in 1.6 so just register unschedulable if Kubernetes < 1.6 #}
-{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-schedulable=false{% endset %}
-{% endif %}
-{% endif %}
-{% set all_node_taints = node_taints|default([]) + role_node_taints %}
-
-{# Node reserved CPU/memory #}
-{% if is_kube_master|bool %}
-{% set kube_reserved %}--kube-reserved cpu={{ kube_master_cpu_reserved }},memory={{ kube_master_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
-{% else %}
-{% set kube_reserved %}--kube-reserved cpu={{ kube_cpu_reserved }},memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
-{% endif %}
-
-{# Kubelet node labels #}
-{% set role_node_labels = [] %}
-{% if inventory_hostname in groups['kube-master'] %}
-{%   set dummy = role_node_labels.append("node-role.kubernetes.io/master=''") %}
-{%   if not standalone_kubelet|bool %}
-{%     set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
-{%   endif %}
-{% else %}
-{%   set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
-{% endif %}
-{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
-{%   if inventory_hostname in nvidia_gpu_nodes %}
-{%     set dummy = role_node_labels.append('nvidia.com/gpu=true')  %}
-{%   endif %}
-{% endif %}
-{% set inventory_node_labels = [] %}
-{% if node_labels is defined and node_labels is mapping %}
-{%   for labelname, labelvalue in node_labels.items() %}
-{%     set dummy = inventory_node_labels.append('%s=%s'|format(labelname, labelvalue)) %}
-{%   endfor %}
-{% endif %}
-{% set all_node_labels = role_node_labels + inventory_node_labels %}
-
-{# Kubelet node taints for gpu #}
-{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
-{%   if inventory_hostname in nvidia_gpu_nodes %}
-{%     set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-with-taints=nvidia.com/gpu=:NoSchedule{% endset %}
-{%   endif %}
-{% endif %}
-
-KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} {% if all_node_taints %}--register-with-taints={{ all_node_taints | join(',') }} {% endif %}--node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube-node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
-
-{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium", "kube-router"] %}
-KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
-{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
-DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"
-{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
-KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
-{% endif %}
-
-KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
-
-# Should this cluster be allowed to run privileged docker containers
-KUBE_ALLOW_PRIV="--allow-privileged=true"
-{% if cloud_provider is defined and cloud_provider in ["openstack", "vsphere", "aws"] %}
-KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
-{% elif cloud_provider is defined and cloud_provider in ["azure"] %}
-KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config --azure-container-registry-config={{ kube_config_dir }}/cloud_config"
-{% elif cloud_provider is defined and cloud_provider in ["oci", "external"] %}
-KUBELET_CLOUDPROVIDER="--cloud-provider=external"
-{% else %}
-KUBELET_CLOUDPROVIDER=""
-{% endif %}
-
-PATH={{ bin_dir }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
deleted file mode 100644
index b40963b3e..000000000
--- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
+++ /dev/null
@@ -1,110 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
-  name: kube-proxy
-  namespace: kube-system
-  labels:
-    k8s-app: kube-proxy
-  annotations:
-    kubespray.kube-proxy-cert/serial: "{{ kube_proxy_cert_serial }}"
-spec:
-  hostNetwork: true
-{% if kube_version is version('v1.6', '>=') %}
-  dnsPolicy: ClusterFirst
-{% endif %}
-  nodeSelector:
-    beta.kubernetes.io/os: linux
-{% if kube_version is version('v1.11.1', '>=') %}
-  priorityClassName: system-node-critical
-{% endif %}
-  containers:
-  - name: kube-proxy
-    image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
-    imagePullPolicy: {{ k8s_image_pull_policy }}
-    resources:
-      limits:
-        cpu: {{ kube_proxy_cpu_limit }}
-        memory: {{ kube_proxy_memory_limit }}
-      requests:
-        cpu: {{ kube_proxy_cpu_requests }}
-        memory: {{ kube_proxy_memory_requests }}
-    livenessProbe:
-      httpGet:
-        host: 127.0.0.1
-        path: /healthz
-        port: 10256
-      failureThreshold: 8
-      initialDelaySeconds: 15
-      periodSeconds: 10
-      successThreshold: 1
-      timeoutSeconds: 15
-    command:
-    - /hyperkube
-    - proxy
-    - --v={{ kube_log_level }}
-    - --kubeconfig={{kube_config_dir}}/kube-proxy-kubeconfig.yaml
-    - --bind-address={{ ip | default(ansible_default_ipv4.address) }}
-    - --cluster-cidr={{ kube_pods_subnet }}
-    - --proxy-mode={{ kube_proxy_mode }}
-    - --oom-score-adj=-998
-    - --healthz-bind-address={{ kube_proxy_healthz_bind_address }}
-    - --resource-container=""
-{% if kube_proxy_nodeport_addresses %}
-    - --nodeport-addresses={{ kube_proxy_nodeport_addresses_cidr }}
-{% endif %}
-{% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %}
-    - --masquerade-all
-{% elif kube_proxy_mode == 'ipvs' %}
-    - --masquerade-all
-{% if kube_version is version('v1.10', '<') %}
-    - --feature-gates=SupportIPVSProxyMode=true
-{% endif %}
-    - --ipvs-min-sync-period=5s
-    - --ipvs-sync-period=5s
-    - --ipvs-scheduler=rr
-{% endif %}
-    securityContext:
-      privileged: true
-    volumeMounts:
-    - mountPath: /etc/ssl/certs
-      name: ssl-certs-host
-      readOnly: true
-    - mountPath: "{{ kube_config_dir }}/ssl"
-      name: etc-kube-ssl
-      readOnly: true
-    - mountPath: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml"
-      name: kubeconfig
-      readOnly: true
-    - mountPath: /var/run/dbus
-      name: var-run-dbus
-      readOnly: false
-    - mountPath: /lib/modules
-      name: lib-modules
-      readOnly: true
-    - mountPath: /run/xtables.lock
-      name: xtables-lock
-      readOnly: false
-  volumes:
-  - name: ssl-certs-host
-    hostPath:
-{% if ansible_os_family == 'RedHat' %}
-      path: /etc/pki/tls
-{% else %}
-      path: /usr/share/ca-certificates
-{% endif %}
-  - name: etc-kube-ssl
-    hostPath:
-      path: "{{ kube_config_dir }}/ssl"
-  - name: kubeconfig
-    hostPath:
-      path: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml"
-  - name: var-run-dbus
-    hostPath:
-      path: /var/run/dbus
-  - hostPath:
-      path: /lib/modules
-    name: lib-modules
-  - hostPath:
-      path: /run/xtables.lock
-      type: FileOrCreate
-    name: xtables-lock
diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
index 881f850df..c779ff94a 100644
--- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
@@ -44,7 +44,6 @@
     msg: "{{item.value}} isn't a bool"
   run_once: yes
   with_items:
-    - { name: kubeadm_enabled, value: "{{ kubeadm_enabled }}" }
     - { name: download_run_once, value: "{{ download_run_once }}" }
     - { name: deploy_netchecker, value: "{{ deploy_netchecker }}" }
     - { name: download_always_pull, value: "{{ download_always_pull }}" }
@@ -141,6 +140,8 @@
   register: calico_version_on_server
   run_once: yes
   delegate_to: "{{ groups['kube-master'][0] }}"
+  when:
+    - kube_network_plugin == 'calico'
 
 - name: "Check that calico version is enough for upgrade"
   assert:
@@ -148,6 +149,7 @@
       - calico_version_on_server.stdout is version('v2.6.5', '>=')
     msg: "Your version of calico is not fresh enough for upgrade. Minimum version v2.6.5"
   when:
+    - kube_network_plugin == 'calico'
     - 'calico_version_on_server.stdout is defined'
     - 'calico_version_on_server.stdout != ""'
     - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
index f0d3001de..db3906432 100644
--- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml
@@ -170,7 +170,6 @@
   set_fact:
     kube_proxy_mode: 'ipvs'
   when:
-    - kubeadm_enabled
     - kube_proxy_remove
   tags:
     - facts
diff --git a/roles/kubernetes/secrets/defaults/main.yml b/roles/kubernetes/secrets/defaults/main.yml
deleted file mode 100644
index e6177857e..000000000
--- a/roles/kubernetes/secrets/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-kube_cert_group: kube-cert
diff --git a/roles/kubernetes/secrets/files/certs/.gitkeep b/roles/kubernetes/secrets/files/certs/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
diff --git a/roles/kubernetes/secrets/handlers/main.yml b/roles/kubernetes/secrets/handlers/main.yml
deleted file mode 100644
index f6f12a003..000000000
--- a/roles/kubernetes/secrets/handlers/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- name: set secret_changed
-  command: /bin/true
-  notify:
-    - set secret_changed to true
-    - clear kubeconfig for root user
-
-- name: set secret_changed to true
-  set_fact:
-    secret_changed: true
-
-- name: clear kubeconfig for root user
-  file:
-    path: /root/.kube/config
-    state: absent
diff --git a/roles/kubernetes/secrets/meta/main.yml b/roles/kubernetes/secrets/meta/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/kubernetes/secrets/meta/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml
deleted file mode 100644
index 52ec6c6cc..000000000
--- a/roles/kubernetes/secrets/tasks/check-certs.yml
+++ /dev/null
@@ -1,82 +0,0 @@
----
-- name: "Check_certs | check if the certs have already been generated on first master"
-  find:
-    paths: "{{ kube_cert_dir }}"
-    patterns: "*.pem"
-    get_checksum: true
-  delegate_to: "{{groups['kube-master'][0]}}"
-  register: kubecert_master
-  run_once: true
-
-- name: "Check_certs | Set default value for 'sync_certs', 'gen_certs', and 'secret_changed' to false"
-  set_fact:
-    sync_certs: false
-    gen_certs: false
-    secret_changed: false
-
-- name: "Check_certs | Set 'gen_certs' to true"
-  set_fact:
-    gen_certs: true
-  when: "not item in kubecert_master.files|map(attribute='path') | list"
-  run_once: true
-  with_items: >-
-       ['{{ kube_cert_dir }}/ca.pem',
-       '{{ kube_cert_dir }}/apiserver.pem',
-       '{{ kube_cert_dir }}/apiserver-key.pem',
-       '{{ kube_cert_dir }}/kube-scheduler.pem',
-       '{{ kube_cert_dir }}/kube-scheduler-key.pem',
-       '{{ kube_cert_dir }}/kube-controller-manager.pem',
-       '{{ kube_cert_dir }}/kube-controller-manager-key.pem',
-       '{{ kube_cert_dir }}/front-proxy-ca.pem',
-       '{{ kube_cert_dir }}/front-proxy-ca-key.pem',
-       '{{ kube_cert_dir }}/front-proxy-client.pem',
-       '{{ kube_cert_dir }}/front-proxy-client-key.pem',
-       '{{ kube_cert_dir }}/service-account-key.pem',
-       {% for host in groups['kube-master'] %}
-       '{{ kube_cert_dir }}/admin-{{ host }}.pem',
-       '{{ kube_cert_dir }}/admin-{{ host }}-key.pem'
-       {% if not loop.last %}{{','}}{% endif %}
-       {% endfor %},
-       {% for host in groups['k8s-cluster'] %}
-       '{{ kube_cert_dir }}/node-{{ host }}.pem',
-       '{{ kube_cert_dir }}/node-{{ host }}-key.pem',
-       '{{ kube_cert_dir }}/kube-proxy-{{ host }}.pem',
-       '{{ kube_cert_dir }}/kube-proxy-{{ host }}-key.pem'
-       {% if not loop.last %}{{','}}{% endif %}
-       {% endfor %}]
-
-- name: "Check_certs | Set 'gen_master_certs' to true"
-  set_fact:
-    gen_master_certs: |-
-      {%- set gen = False -%}
-      {% set existing_certs = kubecert_master.files|map(attribute='path')|list|sort %}
-      {% for cert in ['apiserver.pem', 'apiserver-key.pem',
-                      'kube-scheduler.pem','kube-scheduler-key.pem',
-                      'kube-controller-manager.pem','kube-controller-manager-key.pem',
-                      'front-proxy-ca.pem','front-proxy-ca-key.pem',
-                      'front-proxy-client.pem','front-proxy-client-key.pem',
-                      'service-account-key.pem'] -%}
-        {% set cert_file = "%s/%s.pem"|format(kube_cert_dir, cert) %}
-        {% if not cert_file in existing_certs -%}
-        {%- set gen = True -%}
-        {% endif -%}
-      {% endfor %}
-      {{ gen }}
-  run_once: true
-
-- name: "Check_certs | Set 'gen_node_certs' to true"
-  set_fact:
-    gen_node_certs: |-
-      {
-      {% set existing_certs = kubecert_master.files|map(attribute='path')|list|sort %}
-      {% for host in groups['k8s-cluster'] -%}
-        {% set host_cert = "%s/node-%s-key.pem"|format(kube_cert_dir, host) %}
-        {% set kube_proxy_cert = "%s/kube-proxy-%s-key.pem"|format(kube_cert_dir, host) %}
-        {% if host_cert in existing_certs and kube_proxy_cert in existing_certs -%}
-        "{{ host }}": False,
-        {% else -%}
-        "{{ host }}": True,
-        {% endif -%}
-      {% endfor %}
-      }
-  run_once: true
diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
deleted file mode 100644
index ea243f4db..000000000
--- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml
+++ /dev/null
@@ -1,227 +0,0 @@
----
-- name: "Gen_certs | Create kubernetes config directory (on {{groups['kube-master'][0]}})"
-  file:
-    path: "{{ kube_config_dir }}"
-    state: directory
-    owner: kube
-  run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
-  when: gen_certs|default(false)
-  tags:
-    - kubelet
-    - k8s-secrets
-    - kube-controller-manager
-    - kube-apiserver
-    - apps
-    - network
-    - master
-    - node
-
-- name: "Gen_certs | Create kubernetes script directory (on {{groups['kube-master'][0]}})"
-  file:
-    path: "{{ kube_script_dir }}"
-    state: directory
-    owner: kube
-  run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
-  when: gen_certs|default(false)
-  tags:
-    - k8s-secrets
-
-- name: Gen_certs | write masters openssl config
-  template:
-    src: "openssl-master.conf.j2"
-    dest: "{{ kube_config_dir }}/openssl-master.conf"
-  run_once: yes
-  delegate_to: "{{ groups['kube-master']|first }}"
-  when: gen_certs|default(false)
-
-- name: Gen_certs | write nodes openssl config
-  template:
-    src: "openssl-node.conf.j2"
-    dest: "{{ kube_config_dir }}/{{ inventory_hostname }}-openssl.conf"
-  delegate_to: "{{ groups['kube-master']|first }}"
-  when: gen_certs|default(false) and inventory_hostname in groups['k8s-cluster']
-
-- name: Gen_certs | copy certs generation script
-  template:
-    src: "make-ssl.sh.j2"
-    dest: "{{ kube_script_dir }}/make-ssl.sh"
-    mode: 0700
-  run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
-  when: gen_certs|default(false)
-
-- name: Gen_certs | run master cert generation script
-  command: "{{ kube_script_dir }}/make-ssl.sh -f {{ kube_config_dir }}/openssl-master.conf -d {{ kube_cert_dir }}"
-  environment:
-    - MASTERS: "{% for m in groups['kube-master'] %}
-                  {% if gen_master_certs|default(false) %}
-                    {{ m }}
-                  {% endif %}
-                {% endfor %}"
-  delegate_to: "{{ groups['kube-master']|first }}"
-  run_once: true
-  when: gen_certs|default(false)
-  notify: set secret_changed
-
-- name: Gen_certs | run nodes cert generation script
-  command: "{{ kube_script_dir }}/make-ssl.sh -f {{ kube_config_dir }}/{{ inventory_hostname }}-openssl.conf -d {{ kube_cert_dir }}"
-  environment:
-    - HOSTS: "{{ inventory_hostname }}"
-  delegate_to: "{{ groups['kube-master']|first }}"
-  when: gen_certs|default(false) and inventory_hostname in groups['k8s-cluster']
-  notify: set secret_changed
-
-- set_fact:
-    all_master_certs: "['ca-key.pem',
-                       'apiserver.pem',
-                       'apiserver-key.pem',
-                       'kube-scheduler.pem',
-                       'kube-scheduler-key.pem',
-                       'kube-controller-manager.pem',
-                       'kube-controller-manager-key.pem',
-                       'front-proxy-ca.pem',
-                       'front-proxy-ca-key.pem',
-                       'front-proxy-client.pem',
-                       'front-proxy-client-key.pem',
-                       'service-account-key.pem',
-                       {% for node in groups['kube-master'] %}
-                       'admin-{{ node }}.pem',
-                       'admin-{{ node }}-key.pem',
-                      {% endfor %}]"
-    my_master_certs: ['ca-key.pem',
-                      'admin-{{ inventory_hostname }}.pem',
-                      'admin-{{ inventory_hostname }}-key.pem',
-                      'apiserver.pem',
-                      'apiserver-key.pem',
-                      'front-proxy-ca.pem',
-                      'front-proxy-ca-key.pem',
-                      'front-proxy-client.pem',
-                      'front-proxy-client-key.pem',
-                      'service-account-key.pem',
-                      'kube-scheduler.pem',
-                      'kube-scheduler-key.pem',
-                      'kube-controller-manager.pem',
-                      'kube-controller-manager-key.pem']
-    all_node_certs: "['ca.pem',
-                    {% for node in groups['k8s-cluster'] %}
-                    'node-{{ node }}.pem',
-                    'node-{{ node }}-key.pem',
-                    'kube-proxy-{{ node }}.pem',
-                    'kube-proxy-{{ node }}-key.pem',
-                    {% endfor %}]"
-    my_node_certs: ['ca.pem',
-                    'node-{{ inventory_hostname }}.pem',
-                    'node-{{ inventory_hostname }}-key.pem',
-                    'kube-proxy-{{ inventory_hostname }}.pem',
-                    'kube-proxy-{{ inventory_hostname }}-key.pem']
-  tags:
-    - facts
-
-- name: "Check certs | check if a cert already exists on node"
-  find:
-    paths: "{{ kube_cert_dir }}"
-    patterns: "*.pem"
-    get_checksum: true
-  register: kubecert_node
-  when: inventory_hostname != groups['kube-master'][0]
-
-- name: "Check_certs | Set 'sync_certs' to true on masters"
-  set_fact:
-    sync_certs: true
-  when: inventory_hostname in groups['kube-master'] and
-        inventory_hostname != groups['kube-master'][0] and
-        (not item in kubecert_node.files | map(attribute='path') | map("basename") | list or
-        kubecert_node.files | selectattr("path", "equalto", '%s/%s'|format(kube_cert_dir, item)) | map(attribute="checksum")|first|default('') != kubecert_master.files | selectattr("path", "equalto", '%s/%s'|format(kube_cert_dir, item)) | map(attribute="checksum")|first|default(''))
-  with_items:
-    - "{{ my_master_certs + all_node_certs }}"
-
-- name: "Check_certs | Set 'sync_certs' to true on nodes"
-  set_fact:
-    sync_certs: true
-  when: inventory_hostname in groups['kube-node'] and
-        inventory_hostname != groups['kube-master'][0] and
-        (not item in kubecert_node.files | map(attribute='path') | map("basename") | list or
-        kubecert_node.files | selectattr("path", "equalto", '%s/%s'|format(kube_cert_dir, item)) | map(attribute="checksum")|first|default('') != kubecert_master.files | selectattr("path", "equalto", '%s/%s'|format(kube_cert_dir, item)) | map(attribute="checksum")|first|default(''))
-  with_items:
-    - "{{ my_node_certs }}"
-
-- name: Gen_certs | Gather master certs
-  shell: "tar cfz - -C {{ kube_cert_dir }} -T /dev/stdin <<< {{ my_master_certs|join(' ') }} {{ all_node_certs|join(' ') }} | base64 --wrap=0"
-  args:
-    executable: /bin/bash
-  no_log: true
-  register: master_cert_data
-  check_mode: no
-  delegate_to: "{{groups['kube-master'][0]}}"
-  when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
-        inventory_hostname != groups['kube-master'][0]
-
-- name: Gen_certs | Gather node certs
-  shell: "tar cfz - -C {{ kube_cert_dir }} -T /dev/stdin <<< {{ my_node_certs|join(' ') }} | base64 --wrap=0"
-  args:
-    executable: /bin/bash
-  no_log: true
-  register: node_cert_data
-  check_mode: no
-  delegate_to: "{{groups['kube-master'][0]}}"
-  when: inventory_hostname in groups['kube-node'] and
-        sync_certs|default(false) and
-        inventory_hostname != groups['kube-master'][0]
-
-# NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k
-# char limit when using shell command
-
-# FIXME(mattymo): Use tempfile module in ansible 2.3
-- name: Gen_certs | Prepare tempfile for unpacking certs on masters
-  command: mktemp /tmp/certsXXXXX.tar.gz
-  register: cert_tempfile
-  when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
-        inventory_hostname != groups['kube-master'][0]
-
-- name: Gen_certs | Write master certs to tempfile
-  copy:
-    content: "{{master_cert_data.stdout}}"
-    dest: "{{cert_tempfile.stdout}}"
-    owner: root
-    mode: "0600"
-  when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
-        inventory_hostname != groups['kube-master'][0]
-
-- name: Gen_certs | Unpack certs on masters
-  shell: "base64 -d < {{ cert_tempfile.stdout }} | tar xz -C {{ kube_cert_dir }}"
-  no_log: true
-  changed_when: false
-  check_mode: no
-  when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
-        inventory_hostname != groups['kube-master'][0]
-  notify: set secret_changed
-
-- name: Gen_certs | Cleanup tempfile on masters
-  file:
-    path: "{{cert_tempfile.stdout}}"
-    state: absent
-  when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
-        inventory_hostname != groups['kube-master'][0]
-
-- name: Gen_certs | Copy certs on nodes
-  shell: "base64 -d <<< '{{node_cert_data.stdout|quote}}' | tar xz -C {{ kube_cert_dir }}"
-  args:
-    executable: /bin/bash
-  no_log: true
-  changed_when: false
-  check_mode: no
-  when: inventory_hostname in groups['kube-node'] and
-        sync_certs|default(false) and
-        inventory_hostname != groups['kube-master'][0]
-  notify: set secret_changed
-
-- name: Gen_certs | check certificate permissions
-  file:
-    path: "{{ kube_cert_dir }}"
-    group: "{{ kube_cert_group }}"
-    state: directory
-    owner: kube
-    mode: "u=rwX,g-rwx,o-rwx"
-    recurse: yes
diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml
deleted file mode 100644
index ea5f604c5..000000000
--- a/roles/kubernetes/secrets/tasks/main.yml
+++ /dev/null
@@ -1,109 +0,0 @@
----
-- import_tasks: check-certs.yml
-  tags:
-    - k8s-secrets
-    - k8s-gen-certs
-    - facts
-
-- name: Make sure the certificate directory exits
-  file:
-    path: "{{ kube_cert_dir }}"
-    state: directory
-    mode: o-rwx
-    group: "{{ kube_cert_group }}"
-
-#
-# The following directory creates make sure that the directories
-# exist on the first master for cases where the first master isn't
-# being run.
-#
-- name: "Gen_certs | Create kubernetes config directory (on {{groups['kube-master'][0]}})"
-  file:
-    path: "{{ kube_config_dir }}"
-    state: directory
-    owner: kube
-  run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
-  when: gen_certs|default(false)
-  tags:
-    - kubelet
-    - k8s-secrets
-    - kube-controller-manager
-    - kube-apiserver
-    - apps
-    - network
-    - master
-    - node
-
-- name: "Gen_certs | Create kubernetes script directory (on {{groups['kube-master'][0]}})"
-  file:
-    path: "{{ kube_script_dir }}"
-    state: directory
-    owner: kube
-  run_once: yes
-  delegate_to: "{{groups['kube-master'][0]}}"
-  when: gen_certs|default(false)
-  tags:
-    - k8s-secrets
-
-- include_tasks: "gen_certs_script.yml"
-  when:
-    - cert_management |d('script') == 'script'
-  tags:
-    - k8s-secrets
-    - k8s-gen-certs
-
-- import_tasks: upd_ca_trust.yml
-  tags:
-    - k8s-secrets
-    - k8s-gen-certs
-
-- name: "Gen_certs | Get certificate serials on kube masters"
-  shell: "openssl x509 -in {{ kube_cert_dir }}/{{ item }} -noout -serial | cut -d= -f2"
-  register: "master_certificate_serials"
-  changed_when: false
-  with_items:
-    - "admin-{{ inventory_hostname }}.pem"
-    - "apiserver.pem"
-    - "kube-controller-manager.pem"
-    - "kube-scheduler.pem"
-  when: inventory_hostname in groups['kube-master']
-  tags:
-    - master
-    - kubelet
-    - node
-
-- name: "Gen_certs | set kube master certificate serial facts"
-  set_fact:
-    etcd_admin_cert_serial: "{{ master_certificate_serials.results[0].stdout|default() }}"
-    apiserver_cert_serial: "{{ master_certificate_serials.results[1].stdout|default() }}"
-    controller_manager_cert_serial: "{{ master_certificate_serials.results[2].stdout|default() }}"
-    scheduler_cert_serial: "{{ master_certificate_serials.results[3].stdout|default() }}"
-  when: inventory_hostname in groups['kube-master']
-  tags:
-    - master
-    - kubelet
-    - node
-
-- name: "Gen_certs | Get certificate serials on kube nodes"
-  shell: "openssl x509 -in {{ kube_cert_dir }}/{{ item }} -noout -serial | cut -d= -f2"
-  register: "node_certificate_serials"
-  changed_when: false
-  with_items:
-    - "node-{{ inventory_hostname }}.pem"
-    - "kube-proxy-{{ inventory_hostname }}.pem"
-  when:
-    - inventory_hostname in groups['k8s-cluster']
-  tags:
-    - node
-    - kube-proxy
-
-- name: "Gen_certs | set kube node certificate serial facts"
-  set_fact:
-    kubelet_cert_serial: "{{ node_certificate_serials.results[0].stdout|default() }}"
-    kube_proxy_cert_serial: "{{ node_certificate_serials.results[1].stdout|default() }}"
-  when: inventory_hostname in groups['k8s-cluster']
-  tags:
-    - kubelet
-    - node
-    - kube-proxy
diff --git a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml
deleted file mode 100644
index cdd5f48fa..000000000
--- a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: Gen_certs | target ca-certificates path
-  set_fact:
-    ca_cert_path: |-
-      {% if ansible_os_family == "Debian" -%}
-      /usr/local/share/ca-certificates/kube-ca.crt
-      {%- elif ansible_os_family == "RedHat" -%}
-      /etc/pki/ca-trust/source/anchors/kube-ca.crt
-      {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
-      /etc/ssl/certs/kube-ca.pem
-      {%- elif ansible_os_family == "Suse" -%}
-      /etc/pki/trust/anchors/kube-ca.pem
-      {%- endif %}
-  tags:
-    - facts
-
-- name: Gen_certs | add CA to trusted CA dir
-  copy:
-    src: "{{ kube_cert_dir }}/ca.pem"
-    dest: "{{ ca_cert_path }}"
-    remote_src: true
-  register: kube_ca_cert
-
-- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
-  command: update-ca-certificates
-  when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"]
-
-- name: Gen_certs | update ca-certificates (RedHat)
-  command: update-ca-trust extract
-  when: kube_ca_cert.changed and ansible_os_family == "RedHat"
diff --git a/roles/kubernetes/secrets/templates/make-ssl.sh.j2 b/roles/kubernetes/secrets/templates/make-ssl.sh.j2
deleted file mode 100755
index c99465b74..000000000
--- a/roles/kubernetes/secrets/templates/make-ssl.sh.j2
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/bin/bash
-
-# Author: Smana smainklh@gmail.com
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -o errexit
-set -o pipefail
-
-usage()
-{
-    cat << EOF
-Create self signed certificates
-
-Usage : $(basename $0) -f <config> [-d <ssldir>]
-      -h | --help         : Show this message
-      -f | --config       : Openssl configuration file
-      -d | --ssldir       : Directory where the certificates will be installed
-
-      Environmental variables MASTERS and HOSTS should be set to generate keys
-      for each host.
-
-           ex :
-           MASTERS=node1 HOSTS="node1 node2" $(basename $0) -f openssl.conf -d /srv/ssl
-EOF
-}
-
-# Options parsing
-while (($#)); do
-    case "$1" in
-        -h | --help)   usage;   exit 0;;
-        -f | --config) CONFIG=${2}; shift 2;;
-        -d | --ssldir) SSLDIR="${2}"; shift 2;;
-        *)
-            usage
-            echo "ERROR : Unknown option"
-            exit 3
-        ;;
-    esac
-done
-
-if [ -z ${CONFIG} ]; then
-    echo "ERROR: the openssl configuration file is missing. option -f"
-    exit 1
-fi
-if [ -z ${SSLDIR} ]; then
-    SSLDIR="/etc/kubernetes/certs"
-fi
-
-tmpdir=$(mktemp -d /tmp/kubernetes_cacert.XXXXXX)
-trap 'rm -rf "${tmpdir}"' EXIT
-cd "${tmpdir}"
-
-mkdir -p "${SSLDIR}"
-
-# Root CA
-if [ -e "$SSLDIR/ca-key.pem" ]; then
-    # Reuse existing CA
-    cp $SSLDIR/{ca.pem,ca-key.pem} .
-else
-    openssl genrsa -out ca-key.pem {{certificates_key_size}} > /dev/null 2>&1
-    openssl req -x509 -new -nodes -key ca-key.pem -days {{certificates_duration}} -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
-fi
-
-# Front proxy client CA
-if [ -e "$SSLDIR/front-proxy-ca-key.pem" ]; then
-    # Reuse existing front proxy CA
-    cp $SSLDIR/{front-proxy-ca.pem,front-proxy-ca-key.pem} .
-else
-    openssl genrsa -out front-proxy-ca-key.pem {{certificates_key_size}} > /dev/null 2>&1
-    openssl req -x509 -new -nodes -key front-proxy-ca-key.pem -days {{certificates_duration}} -out front-proxy-ca.pem -subj "/CN=front-proxy-ca" > /dev/null 2>&1
-fi
-
-gen_key_and_cert() {
-    local name=$1
-    local subject=$2
-    openssl genrsa -out ${name}-key.pem {{certificates_key_size}} > /dev/null 2>&1
-    openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1
-    openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days {{certificates_duration}} -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
-}
-
-gen_key_and_cert_front_proxy() {
-    local name=$1
-    local subject=$2
-    openssl genrsa -out ${name}-key.pem {{certificates_key_size}} > /dev/null 2>&1
-    openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1
-    openssl x509 -req -in ${name}.csr -CA front-proxy-ca.pem -CAkey front-proxy-ca-key.pem -CAcreateserial -out ${name}.pem -days {{certificates_duration}} -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
-}
-
-# Admins
-if [ -n "$MASTERS" ]; then
-
-    # service-account
-    # If --service-account-private-key-file was previously configured to use apiserver-key.pem then copy that to the new dedicated service-account signing key location to avoid disruptions
-    if [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then
-       cp $SSLDIR/apiserver-key.pem $SSLDIR/service-account-key.pem
-    fi
-    # Generate dedicated service account signing key if one doesn't exist
-    if ! [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then
-        openssl genrsa -out service-account-key.pem {{certificates_key_size}} > /dev/null 2>&1
-    fi
-
-    # kube-apiserver
-    # Generate only if we don't have existing ca and apiserver certs
-    if ! [ -e "$SSLDIR/ca-key.pem" ] || ! [ -e "$SSLDIR/apiserver-key.pem" ]; then
-      gen_key_and_cert "apiserver" "/CN=kube-apiserver"
-      cat ca.pem >> apiserver.pem
-    fi
-    # If any host requires new certs, just regenerate scheduler and controller-manager master certs
-    # kube-scheduler
-    gen_key_and_cert "kube-scheduler" "/CN=system:kube-scheduler"
-    # kube-controller-manager
-    gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager"
-    # metrics aggregator
-    gen_key_and_cert_front_proxy "front-proxy-client" "/CN=front-proxy-client"
-
-    for host in $MASTERS; do
-        cn="${host}"
-        # admin
-        gen_key_and_cert "admin-${host}" "/CN=kube-admin-${cn}/O=system:masters"
-    done
-fi
-
-# Nodes
-if [ -n "$HOSTS" ]; then
-    for host in $HOSTS; do
-        cn="${host}"
-        gen_key_and_cert "node-${host}" "/CN=system:node:${cn,,}/O=system:nodes"
-    done
-fi
-
-# system:node-proxier
-if [ -n "$HOSTS" ]; then
-    for host in $HOSTS; do
-        # kube-proxy
-        gen_key_and_cert "kube-proxy-${host}" "/CN=system:kube-proxy/O=system:node-proxier"
-    done
-fi
-
-# Install certs
-mv *.pem ${SSLDIR}/
diff --git a/roles/kubernetes/secrets/templates/openssl-master.conf.j2 b/roles/kubernetes/secrets/templates/openssl-master.conf.j2
deleted file mode 100644
index 38902aeef..000000000
--- a/roles/kubernetes/secrets/templates/openssl-master.conf.j2
+++ /dev/null
@@ -1,42 +0,0 @@
-{% set counter = {'dns': 6,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req]
-req_extensions = v3_req
-distinguished_name = req_distinguished_name
-[req_distinguished_name]
-[ v3_req ]
-basicConstraints = CA:FALSE
-keyUsage = nonRepudiation, digitalSignature, keyEncipherment
-subjectAltName = @alt_names
-[alt_names]
-DNS.1 = kubernetes
-DNS.2 = kubernetes.default
-DNS.3 = kubernetes.default.svc
-DNS.4 = kubernetes.default.svc.{{ dns_domain }}
-DNS.5 = localhost
-{% for host in groups['kube-master'] %}
-DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }}
-{% endfor %}
-{% if apiserver_loadbalancer_domain_name is defined  %}
-DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }}
-{% endif %}
-{% for host in groups['kube-master'] %}
-{% if hostvars[host]['access_ip'] is defined  %}
-IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
-{% endif %}
-IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }}
-{% endfor %}
-{% if kube_apiserver_ip is defined  %}
-IP.{{ counter["ip"] }} = {{ kube_apiserver_ip }}{{ increment(counter, 'ip') }}
-{% endif %}
-{% if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined  %}
-IP.{{ counter["ip"] }} = {{ loadbalancer_apiserver.address }}{{ increment(counter, 'ip') }}
-{% endif %}
-{% if supplementary_addresses_in_ssl_keys is defined %}
-{% for addr in supplementary_addresses_in_ssl_keys %}
-{% if addr | ipaddr %}
-IP.{{ counter["ip"] }} = {{ addr }}{{ increment(counter, 'ip') }}
-{% else %}
-DNS.{{ counter["dns"] }} = {{ addr }}{{ increment(counter, 'dns') }}
-{% endif %}
-{% endfor %}
-{% endif %}
-IP.{{ counter["ip"] }} = 127.0.0.1
diff --git a/roles/kubernetes/secrets/templates/openssl-node.conf.j2 b/roles/kubernetes/secrets/templates/openssl-node.conf.j2
deleted file mode 100644
index f625f6d76..000000000
--- a/roles/kubernetes/secrets/templates/openssl-node.conf.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{% set counter = {'dns': 6,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req]
-req_extensions = v3_req
-distinguished_name = req_distinguished_name
-[req_distinguished_name]
-[ v3_req ]
-basicConstraints = CA:FALSE
-keyUsage = nonRepudiation, digitalSignature, keyEncipherment
-subjectAltName = @alt_names
-[alt_names]
-DNS.1 = kubernetes
-DNS.2 = kubernetes.default
-DNS.3 = kubernetes.default.svc
-DNS.4 = kubernetes.default.svc.{{ dns_domain }}
-DNS.5 = localhost
-DNS.{{ counter["dns"] }} = {{ inventory_hostname }}{{ increment(counter, 'dns') }}
-{% if hostvars[inventory_hostname]['access_ip'] is defined  %}
-IP.{{ counter["ip"] }} = {{ hostvars[inventory_hostname]['access_ip'] }}{{ increment(counter, 'ip') }}
-{% endif %}
-IP.{{ counter["ip"] }} = {{ hostvars[inventory_hostname]['ip'] | default(hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }}
-IP.{{ counter["ip"] }} = 127.0.0.1
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 7446cb93c..50d058626 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -226,14 +226,10 @@ docker_options: >-
 
 # Settings for containerized control plane (etcd/kubelet/secrets)
 etcd_deployment_type: docker
-kubelet_deployment_type: docker
 cert_management: script
 
 helm_deployment_type: host
 
-# Enable kubeadm deployment
-kubeadm_enabled: true
-
 # Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
 kubeconfig_localhost: false
 # Download kubectl onto the host that runs Ansible in {{ bin_dir }}
@@ -282,7 +278,7 @@ openstack_cacert: "{{ lookup('env','OS_CACERT') }}"
 ## the k8s cluster. Only 'AlwaysAllow', 'AlwaysDeny', 'Node' and
 ## 'RBAC' modes are tested. Order is important.
 authorization_modes: ['Node', 'RBAC']
-rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
+rbac_enabled: "{{ 'RBAC' in authorization_modes }}"
 
 # When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelet’s HTTPS endpoint
 kubelet_authentication_token_webhook: true
@@ -395,18 +391,8 @@ kube_apiserver_endpoint: |-
   {%- endif %}
 kube_apiserver_insecure_endpoint: >-
   http://{{ kube_apiserver_insecure_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_insecure_port }}
-kube_apiserver_client_cert: |-
-  {% if kubeadm_enabled -%}
-  {{ kube_cert_dir }}/ca.crt
-  {%- else -%}
-  {{ kube_cert_dir }}/apiserver.pem
-  {%- endif %}
-kube_apiserver_client_key: |-
-  {% if kubeadm_enabled -%}
-  {{ kube_cert_dir }}/ca.key
-  {%- else -%}
-  {{ kube_cert_dir }}/apiserver-key.pem
-  {%- endif %}
+kube_apiserver_client_cert: "{{ kube_cert_dir }}/ca.crt"
+kube_apiserver_client_key: "{{ kube_cert_dir }}/ca.key"
 
 # Set to true to deploy etcd-events cluster
 etcd_events_cluster_enabled: false
diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml
index 5db5fa13a..8a39ba120 100644
--- a/roles/remove-node/pre-remove/tasks/main.yml
+++ b/roles/remove-node/pre-remove/tasks/main.yml
@@ -2,7 +2,7 @@
 
 - name: remove-node | Drain node except daemonsets resource
   command: >-
-    {{ bin_dir }}/kubectl drain
+    {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf drain
       --force
       --ignore-daemonsets
       --grace-period {{ drain_grace_period }}
diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml
index 3d16489ff..d5d59025c 100644
--- a/roles/upgrade/post-upgrade/tasks/main.yml
+++ b/roles/upgrade/post-upgrade/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: Uncordon node
-  command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}"
+  command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf uncordon {{ inventory_hostname }}"
   delegate_to: "{{ groups['kube-master'][0] }}"
   when:
     - needs_cordoning|default(false)
diff --git a/roles/win_nodes/kubernetes_patch/tasks/main.yml b/roles/win_nodes/kubernetes_patch/tasks/main.yml
index 89574cbff..368ff890c 100644
--- a/roles/win_nodes/kubernetes_patch/tasks/main.yml
+++ b/roles/win_nodes/kubernetes_patch/tasks/main.yml
@@ -15,11 +15,11 @@
         dest: "{{ kubernetes_user_manifests_path }}/hostnameOverride-patch.json"
 
     - name: Check current command for kube-proxy daemonset
-      shell: "{{bin_dir}}/kubectl get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.containers[0].command}'"
+      shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.containers[0].command}'"
       register: current_kube_proxy_command
 
     - name: Apply hostnameOverride patch for kube-proxy daemonset
-      shell: "{{bin_dir}}/kubectl patch ds kube-proxy --namespace=kube-system --type=json -p \"$(cat hostnameOverride-patch.json)\""
+      shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf patch ds kube-proxy --namespace=kube-system --type=json -p \"$(cat hostnameOverride-patch.json)\""
       args:
         chdir: "{{ kubernetes_user_manifests_path }}"
       register: patch_kube_proxy_command
@@ -43,11 +43,11 @@
 
     # Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
     - name: Check current nodeselector for kube-proxy daemonset
-      shell: "{{bin_dir}}/kubectl get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'"
+      shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'"
       register: current_kube_proxy_state
 
     - name: Apply nodeselector patch for kube-proxy daemonset
-      shell: "{{bin_dir}}/kubectl patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\""
+      shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\""
       args:
         chdir: "{{ kubernetes_user_manifests_path }}"
       register: patch_kube_proxy_state
diff --git a/scale.yml b/scale.yml
index 348ec28ba..84bd638d2 100644
--- a/scale.yml
+++ b/scale.yml
@@ -13,19 +13,6 @@
   vars:
     ansible_connection: local
 
-- hosts: localhost
-  tasks:
-    - name: deploy warning for non kubeadm
-      debug:
-        msg: "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
-      when: not kubeadm_enabled and not skip_non_kubeadm_warning
-
-    - name: deploy cluster for non kubeadm
-      pause:
-        prompt: "Are you sure you want to deploy cluster using the deprecated non-kubeadm mode."
-        echo: no
-      when: not kubeadm_enabled and not skip_non_kubeadm_warning
-
 - hosts: bastion[0]
   gather_facts: False
   roles:
@@ -66,6 +53,6 @@
     - { role: download, tags: download, when: "not skip_downloads" }
     - { role: etcd, tags: etcd, etcd_cluster_setup: false }
     - { role: kubernetes/node, tags: node }
-    - { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
+    - { role: kubernetes/kubeadm, tags: kubeadm }
     - { role: network_plugin, tags: network }
   environment: "{{proxy_env}}"
diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml
index fa0561082..53d0164c1 100644
--- a/tests/cloud_playbooks/delete-gce.yml
+++ b/tests/cloud_playbooks/delete-gce.yml
@@ -18,6 +18,21 @@
           k8s-{{test_name}}-1,k8s-{{test_name}}-2
           {%- endif -%}
 
+    - name: stop gce instances
+      gce:
+        instance_names: "{{instance_names}}"
+        image: "{{ cloud_image | default(omit) }}"
+        service_account_email: "{{ gce_service_account_email }}"
+        pem_file: "{{ gce_pem_file | default(omit)}}"
+        credentials_file: "{{gce_credentials_file | default(omit)}}"
+        project_id: "{{ gce_project_id }}"
+        zone: "{{cloud_region | default('europe-west1-b')}}"
+        state: 'stopped'
+      async: 120
+      poll: 3
+      retries: 3
+      register: gce
+
     - name: delete gce instances
       gce:
         instance_names: "{{instance_names}}"
diff --git a/tests/files/gce_ubuntu-flannel-ha.yml b/tests/files/gce_ubuntu-flannel-ha.yml
index 600489bb8..e4f50e5f3 100644
--- a/tests/files/gce_ubuntu-flannel-ha.yml
+++ b/tests/files/gce_ubuntu-flannel-ha.yml
@@ -6,7 +6,7 @@ mode: ha
 
 # Deployment settings
 kube_network_plugin: flannel
-kubeadm_enabled: false
+kubeadm_enabled: true
 skip_non_kubeadm_warning: true
 deploy_netchecker: true
 dns_min_replicas: 1
diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml
index 22f9cab6c..4e8c11ff5 100644
--- a/tests/testcases/010_check-apiserver.yml
+++ b/tests/testcases/010_check-apiserver.yml
@@ -9,4 +9,3 @@
       password: "{{ lookup('password', credentials_dir + '/kube_user.creds length=15 chars=ascii_letters,digits') }}"
       validate_certs: no
       status_code: 200,401
-    when: not kubeadm_enabled|default(false)
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index cddc2e959..47835ff90 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -13,19 +13,6 @@
   vars:
     ansible_connection: local
 
-- hosts: localhost
-  tasks:
-    - name: deploy warning for non kubeadm
-      debug:
-        msg: "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
-      when: not kubeadm_enabled and not skip_non_kubeadm_warning
-
-    - name: deploy cluster for non kubeadm
-      pause:
-        prompt: "Are you sure you want to deploy cluster using the deprecated non-kubeadm mode."
-        echo: no
-      when: not kubeadm_enabled and not skip_non_kubeadm_warning
-
 - hosts: bastion[0]
   gather_facts: False
   roles:
@@ -109,7 +96,7 @@
     - { role: kubespray-defaults}
     - { role: upgrade/pre-upgrade, tags: pre-upgrade }
     - { role: kubernetes/node, tags: node }
-    - { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
+    - { role: kubernetes/kubeadm, tags: kubeadm }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
   environment: "{{proxy_env}}"
 
-- 
GitLab