From 6744726089245c724b6927d419064a84551931e2 Mon Sep 17 00:00:00 2001
From: Matthew Mosesohn <mmosesohn@mirantis.com>
Date: Wed, 13 Sep 2017 19:00:51 +0100
Subject: [PATCH] kubeadm support (#1631)

* kubeadm support

* move k8s master to a subtask
* disable k8s secrets when using kubeadm
* fix etcd cert serial var
* move simple auth users to master role
* make a kubeadm-specific env file for kubelet
* add non-ha CI job

* change ci boolean vars to json format

* fixup

* Update create-gce.yml

* Update create-gce.yml

* Update create-gce.yml
---
 .gitlab-ci.yml                                | 78 +++++++++++++++----
 cluster.yml                                   | 11 +++
 inventory/group_vars/all.yml                  |  6 ++
 library/kube.py                               |  9 ++-
 roles/download/defaults/main.yml              | 14 +++-
 roles/etcd/tasks/main.yml                     |  2 +-
 roles/kubernetes-apps/ansible/tasks/main.yml  | 12 +++
 roles/kubernetes/kubeadm/tasks/main.yml       | 41 ++++++++++
 .../kubeadm/templates/kubeadm-client.conf.j2  |  6 ++
 roles/kubernetes/master/defaults/main.yml     |  4 +
 roles/kubernetes/master/handlers/main.yml     |  4 +
 .../kubernetes/master/tasks/kubeadm-setup.yml | 35 +++++++++
 roles/kubernetes/master/tasks/main.yml        | 68 +++-------------
 .../master/tasks/static-pod-setup.yml         | 61 +++++++++++++++
 roles/kubernetes/master/tasks/users-file.yml  | 14 ++++
 .../templates/known_users.csv.j2              |  0
 .../master/templates/kubeadm-config.yaml.j2   | 67 ++++++++++++++++
 .../manifests/kube-apiserver.manifest.j2      |  2 +-
 .../kube-controller-manager.manifest.j2       |  2 +-
 roles/kubernetes/node/meta/main.yml           |  5 ++
 roles/kubernetes/node/tasks/install.yml       | 12 +++
 roles/kubernetes/node/tasks/main.yml          | 16 +++-
 .../node/templates/kubelet.host.service.j2    |  2 +-
 .../node/templates/kubelet.kubeadm.env.j2     | 57 ++++++++++++++
 .../{kubelet.j2 => kubelet.standard.env.j2}   |  0
 roles/kubernetes/preinstall/tasks/main.yml    | 25 ++----
 roles/kubernetes/preinstall/vars/centos.yml   |  1 +
 roles/kubernetes/preinstall/vars/debian.yml   |  1 +
 roles/kubernetes/preinstall/vars/fedora.yml   |  1 +
 roles/kubernetes/preinstall/vars/redhat.yml   |  1 +
 roles/kubernetes/preinstall/vars/ubuntu.yml   |  7 ++
 roles/kubernetes/secrets/tasks/main.yml       | 17 +---
 roles/kubespray-defaults/defaults/main.yaml   |  4 +
 .../calico/templates/calico-node.yml.j2       |  2 +-
 upgrade-cluster.yml                           |  2 +
 35 files changed, 469 insertions(+), 120 deletions(-)
 create mode 100644 roles/kubernetes/kubeadm/tasks/main.yml
 create mode 100644 roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2
 create mode 100644 roles/kubernetes/master/tasks/kubeadm-setup.yml
 create mode 100644 roles/kubernetes/master/tasks/static-pod-setup.yml
 create mode 100644 roles/kubernetes/master/tasks/users-file.yml
 rename roles/kubernetes/{secrets => master}/templates/known_users.csv.j2 (100%)
 create mode 100644 roles/kubernetes/master/templates/kubeadm-config.yaml.j2
 create mode 100644 roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
 rename roles/kubernetes/node/templates/{kubelet.j2 => kubelet.standard.env.j2} (100%)
 create mode 100644 roles/kubernetes/preinstall/vars/ubuntu.yml

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 7080c7c67..c27f07bca 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -53,6 +53,7 @@ before_script:
   IDEMPOT_CHECK: "false"
   RESET_CHECK: "false"
   UPGRADE_TEST: "false"
+  KUBEADM_ENABLED: "false"
   RESOLVCONF_MODE: docker_dns
   LOG_LEVEL: "-vv"
   ETCD_DEPLOYMENT: "docker"
@@ -117,9 +118,9 @@ before_script:
       -e bootstrap_os=${BOOTSTRAP_OS}
       -e cert_management=${CERT_MGMT:-script}
       -e cloud_provider=gce
-      -e deploy_netchecker=true
-      -e download_localhost=${DOWNLOAD_LOCALHOST}
-      -e download_run_once=${DOWNLOAD_RUN_ONCE}
+      -e "{deploy_netchecker: true}"
+      -e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
+      -e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
       -e etcd_deployment_type=${ETCD_DEPLOYMENT}
       -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
       -e kubedns_min_replicas=1
@@ -127,6 +128,9 @@ before_script:
       -e local_release_dir=${PWD}/downloads
       -e resolvconf_mode=${RESOLVCONF_MODE}
       -e vault_deployment_type=${VAULT_DEPLOYMENT}
+      -e weave_cpu_requests=${WEAVE_CPU_LIMIT}
+      -e weave_cpu_limit=${WEAVE_CPU_LIMIT}
+      -e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
       -e "${AUTHORIZATION_MODES}"
       --limit "all:!fake_hosts"
       cluster.yml
@@ -144,17 +148,19 @@ before_script:
       -e ansible_ssh_user=${SSH_USER}
       -e bootstrap_os=${BOOTSTRAP_OS}
       -e cloud_provider=gce
-      -e deploy_netchecker=true
-      -e download_localhost=${DOWNLOAD_LOCALHOST}
-      -e download_run_once=${DOWNLOAD_RUN_ONCE}
+      -e "{deploy_netchecker: true}"
+      -e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
+      -e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
       -e etcd_deployment_type=${ETCD_DEPLOYMENT}
       -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
       -e kubedns_min_replicas=1
       -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
       -e local_release_dir=${PWD}/downloads
       -e resolvconf_mode=${RESOLVCONF_MODE}
+      -e vault_deployment_type=${VAULT_DEPLOYMENT}
       -e weave_cpu_requests=${WEAVE_CPU_LIMIT}
       -e weave_cpu_limit=${WEAVE_CPU_LIMIT}
+      -e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
       -e "${AUTHORIZATION_MODES}"
       --limit "all:!fake_hosts"
       $PLAYBOOK;
@@ -178,14 +184,18 @@ before_script:
       --private-key=${HOME}/.ssh/id_rsa
       -e bootstrap_os=${BOOTSTRAP_OS}
       -e ansible_python_interpreter=${PYPATH}
-      -e download_localhost=${DOWNLOAD_LOCALHOST}
-      -e download_run_once=${DOWNLOAD_RUN_ONCE}
-      -e deploy_netchecker=true
-      -e resolvconf_mode=${RESOLVCONF_MODE}
-      -e local_release_dir=${PWD}/downloads
+      -e "{deploy_netchecker: true}"
+      -e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
+      -e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
       -e etcd_deployment_type=${ETCD_DEPLOYMENT}
       -e kubedns_min_replicas=1
       -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
+      -e local_release_dir=${PWD}/downloads
+      -e resolvconf_mode=${RESOLVCONF_MODE}
+      -e vault_deployment_type=${VAULT_DEPLOYMENT}
+      -e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
+      -e weave_cpu_requests=${WEAVE_CPU_LIMIT}
+      -e weave_cpu_limit=${WEAVE_CPU_LIMIT}
       -e "${AUTHORIZATION_MODES}"
       --limit "all:!fake_hosts"
       cluster.yml;
@@ -221,14 +231,18 @@ before_script:
       --private-key=${HOME}/.ssh/id_rsa
       -e bootstrap_os=${BOOTSTRAP_OS}
       -e ansible_python_interpreter=${PYPATH}
-      -e download_localhost=${DOWNLOAD_LOCALHOST}
-      -e download_run_once=${DOWNLOAD_RUN_ONCE}
-      -e deploy_netchecker=true
-      -e resolvconf_mode=${RESOLVCONF_MODE}
-      -e local_release_dir=${PWD}/downloads
+      -e "{deploy_netchecker: true}"
+      -e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
+      -e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
       -e etcd_deployment_type=${ETCD_DEPLOYMENT}
       -e kubedns_min_replicas=1
       -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
+      -e local_release_dir=${PWD}/downloads
+      -e resolvconf_mode=${RESOLVCONF_MODE}
+      -e vault_deployment_type=${VAULT_DEPLOYMENT}
+      -e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
+      -e weave_cpu_requests=${WEAVE_CPU_LIMIT}
+      -e weave_cpu_limit=${WEAVE_CPU_LIMIT}
       -e "${AUTHORIZATION_MODES}"
       --limit "all:!fake_hosts"
       cluster.yml;
@@ -280,6 +294,17 @@ before_script:
   UPGRADE_TEST: "graceful"
   STARTUP_SCRIPT: ""
 
+.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
+# stage: deploy-gce-part1
+  KUBE_NETWORK_PLUGIN: canal
+  AUTHORIZATION_MODES: "{ 'authorization_modes':  [ 'RBAC' ] }"
+  CLOUD_IMAGE: ubuntu-1604-xenial
+  CLOUD_MACHINE_TYPE: "n1-standard-2"
+  CLOUD_REGION: europe-west1-b
+  CLUSTER_MODE: default
+  KUBEADM_ENABLED: "true"
+  STARTUP_SCRIPT: ""
+
 .rhel7_weave_variables: &rhel7_weave_variables
 # stage: deploy-gce-part1
   KUBE_NETWORK_PLUGIN: weave
@@ -470,6 +495,27 @@ ubuntu-canal-ha-rbac-triggers:
   when: on_success
   only: ['triggers']
 
+ubuntu-canal-kubeadm-rbac:
+  stage: deploy-gce-part1
+  <<: *job
+  <<: *gce
+  variables:
+    <<: *gce_variables
+    <<: *ubuntu_canal_kubeadm_variables
+  when: manual
+  except: ['triggers']
+  only: ['master', /^pr-.*$/]
+
+ubuntu-canal-kubeadm-triggers:
+  stage: deploy-gce-part1
+  <<: *job
+  <<: *gce
+  variables:
+    <<: *gce_variables
+    <<: *ubuntu_canal_kubeadm_variables
+  when: on_success
+  only: ['triggers']
+
 rhel7-weave:
   stage: deploy-gce-part1
   <<: *job
diff --git a/cluster.yml b/cluster.yml
index b973d6c14..7b842d917 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -69,6 +69,17 @@
   roles:
     - { role: kubespray-defaults}
     - { role: kubernetes/master, tags: master }
+
+- hosts: k8s-cluster
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  roles:
+    - { role: kubespray-defaults}
+    - { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
+
+- hosts: kube-master
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  roles:
+    - { role: kubespray-defaults}
     - { role: kubernetes-apps/network_plugin, tags: network }
     - { role: kubernetes-apps/policy_controller, tags: policy-controller }
 
diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml
index cc77138b8..be260166a 100644
--- a/inventory/group_vars/all.yml
+++ b/inventory/group_vars/all.yml
@@ -82,6 +82,12 @@ bin_dir: /usr/local/bin
 #openstack_lbaas_monitor_timeout: "30s"
 #openstack_lbaas_monitor_max_retries: "3"
 
+## Uncomment to enable experimental kubeadm deployment mode
+#kubeadm_enabled: false
+#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6  chars=ascii_letters,digits') }}"
+#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_letters,digits') }}"
+#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
+#
 ## Set these proxy values in order to update docker daemon to use proxies
 #http_proxy: ""
 #https_proxy: ""
diff --git a/library/kube.py b/library/kube.py
index 52f6a235d..fa8312f27 100644
--- a/library/kube.py
+++ b/library/kube.py
@@ -135,12 +135,15 @@ class KubeManager(object):
             return None
         return out.splitlines()
 
-    def create(self, check=True):
+    def create(self, check=True, force=True):
         if check and self.exists():
             return []
 
         cmd = ['apply']
 
+        if force:
+            cmd.append('--force')
+
         if not self.filename:
             self.module.fail_json(msg='filename required to create')
 
@@ -148,11 +151,11 @@ class KubeManager(object):
 
         return self._execute(cmd)
 
-    def replace(self):
+    def replace(self, force=True):
 
         cmd = ['apply']
 
-        if self.force:
+        if force:
             cmd.append('--force')
 
         if not self.filename:
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index d5c5ef7e4..ef5628664 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -19,6 +19,7 @@ download_always_pull: False
 
 # Versions
 kube_version: v1.7.3
+kubeadm_version: "{{ kube_version }}"
 etcd_version: v3.2.4
 # TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
 # after migration to container download
@@ -31,11 +32,13 @@ flannel_version: "v0.8.0"
 flannel_cni_version: "v0.2.0"
 pod_infra_version: 3.0
 
-# Download URL's
+# Download URLs
 etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
+kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/amd64/kubeadm"
 
 # Checksums
 etcd_checksum: "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b"
+kubeadm_checksum: "378e6052f8b178f8e6a38e8637681c72d389443b66b78b51b8ddc9a162c655c3"
 
 # Containers
 # Possible values: host, docker
@@ -132,6 +135,15 @@ downloads:
     container: "{{ etcd_deployment_type in [ 'docker', 'rkt' ] }}"
     repo: "{{ etcd_image_repo }}"
     tag: "{{ etcd_image_tag }}"
+  kubeadm:
+    version: "{{ kubeadm_version }}"
+    dest: "kubeadm"
+    sha256: "{{ kubeadm_checksum }}"
+    source_url: "{{ kubeadm_download_url }}"
+    url: "{{ kubeadm_download_url }}"
+    unarchive: false
+    owner: "root"
+    mode: "0755"
   hyperkube:
     container: true
     repo: "{{ hyperkube_image_repo }}"
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 3f8403570..d3bfe9628 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -11,7 +11,7 @@
 
 - name: "Gen_certs | Get etcd certificate serials"
   shell: "openssl x509 -in {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem -noout -serial | cut -d= -f2"
-  register: "node-{{ inventory_hostname }}_serial"
+  register: "etcd_client_cert_serial"
   when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort
 
 - include: "install_{{ etcd_deployment_type }}.yml"
diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml
index c2ffd7507..7771a91c3 100644
--- a/roles/kubernetes-apps/ansible/tasks/main.yml
+++ b/roles/kubernetes-apps/ansible/tasks/main.yml
@@ -8,6 +8,18 @@
   delay: 6
   when: inventory_hostname == groups['kube-master'][0]
 
+- name: kubeadm | Delete kubeadm kubedns
+  kube:
+    name: "kubedns"
+    namespace: "{{ system_namespace }}"
+    kubectl: "{{bin_dir}}/kubectl"
+    resource: "deploy"
+    state: absent
+  when:
+    - kubeadm_enabled|default(false)
+    - kubeadm_init.changed|default(false)
+    - inventory_hostname == groups['kube-master'][0]
+
 - name: Kubernetes Apps | Lay Down KubeDNS Template
   template:
     src: "{{item.file}}"
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
new file mode 100644
index 000000000..ddc86b2f3
--- /dev/null
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+- name: Set kubeadm_discovery_address
+  set_fact:
+    kubeadm_discovery_address: >-
+      {%- if "127.0.0.1" or "localhost" in kube_apiserver_endpoint -%}
+      {{ first_kube_master }}:{{ kube_apiserver_port }}
+      {%- else -%}
+      {{ kube_apiserver_endpoint }}
+      {%- endif %}
+  when: not is_kube_master
+  tags: facts
+
+- name: Create kubeadm client config
+  template:
+    src: kubeadm-client.conf.j2
+    dest: "{{ kube_config_dir }}/kubeadm-client.conf"
+    backup: yes
+  when: not is_kube_master
+  register: kubeadm_client_conf
+
+- name: Join to cluster if needed
+  command: kubeadm join --config {{ kube_config_dir}}/kubeadm-client.conf --skip-preflight-checks
+  register: kubeadm_join
+  when: not is_kube_master and kubeadm_client_conf.changed
+
+- name: Update server field in kubelet kubeconfig
+  replace:
+    path: "{{ kube_config_dir }}/kubelet.conf"
+    regexp: '(\s+){{ first_kube_master }}:{{ kube_apiserver_port }}(\s+.*)?$'
+    replace: '\1{{ kube_apiserver_endpoint }}\2'
+    backup: yes
+  when: not is_kube_master and kubeadm_discovery_address != kube_apiserver_endpoint
+
+# FIXME(mattymo): Reconcile kubelet kubeconfig filename for both deploy modes
+- name: Symlink kubelet kubeconfig for calico/canal
+  file:
+    src: "{{ kube_config_dir }}//kubelet.conf"
+    dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
+    state: link
+    force: yes
+  when: kube_network_plugin in ['calico','canal']
diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2
new file mode 100644
index 000000000..3c8ede9ad
--- /dev/null
+++ b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2
@@ -0,0 +1,6 @@
+apiVersion: kubeadm.k8s.io/v1alpha1
+kind: NodeConfiguration
+caCertPath: {{ kube_config_dir }}/ssl/ca.crt
+token: {{ kubeadm_token }}
+discoveryTokenAPIServers:
+- {{ kubeadm_discovery_address | replace("https://", "")}}
diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml
index 979622731..076b8477c 100644
--- a/roles/kubernetes/master/defaults/main.yml
+++ b/roles/kubernetes/master/defaults/main.yml
@@ -66,3 +66,7 @@ apiserver_custom_flags: []
 controller_mgr_custom_flags: []
 
 scheduler_custom_flags: []
+
+# kubeadm settings
+# Value of 0 means it never expires
+kubeadm_token_ttl: 0
diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml
index d6034aeb2..a27a5772e 100644
--- a/roles/kubernetes/master/handlers/main.yml
+++ b/roles/kubernetes/master/handlers/main.yml
@@ -44,3 +44,7 @@
   until: result.status == 200
   retries: 20
   delay: 6
+
+- name: Master | set secret_changed
+  set_fact:
+    secret_changed: true
diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml
new file mode 100644
index 000000000..cef97e2b0
--- /dev/null
+++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml
@@ -0,0 +1,35 @@
+---
+- name: kubeadm | aggregate all SANs
+  set_fact:
+    apiserver_sans: >-
+      kubernetes
+      kubernetes.default
+      kubernetes.default.svc
+      kubernetes.default.svc.{{ dns_domain }}
+      {{ kube_apiserver_ip }}
+      localhost
+      127.0.0.1
+      {{ ' '.join(groups['kube-master']) }}
+      {%- if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %}
+      {{ apiserver_loadbalancer_domain_name }}
+      {%- endif %}
+      {%- for host in groups['kube-master'] -%}
+      {%- if hostvars[host]['access_ip'] is defined %}{{ hostvars[host]['access_ip'] }}{% endif -%}
+      {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
+      {%- endfor %}
+  tags: facts
+
+- name: kubeadm | Copy etcd cert dir under k8s cert dir
+  command: "cp -TR {{ etcd_cert_dir }} {{ kube_config_dir }}/ssl/etcd"
+  changed_when: false
+
+- name: kubeadm | Create kubeadm config
+  template:
+    src: kubeadm-config.yaml.j2
+    dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
+  register: kubeadm_config
+
+- name: kubeadm | Initialize cluster
+  command: timeout -k 240s 240s kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --skip-preflight-checks
+  register: kubeadm_init
+  when: kubeadm_config.changed
diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml
index 452463118..b6158d9e5 100644
--- a/roles/kubernetes/master/tasks/main.yml
+++ b/roles/kubernetes/master/tasks/main.yml
@@ -2,6 +2,9 @@
 - include: pre-upgrade.yml
   tags: k8s-pre-upgrade
 
+- include: users-file.yml
+  when: kube_basic_auth|default(true)
+
 - name: Copy kubectl from hyperkube container
   command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp /hyperkube /systembindir/kubectl"
   register: kube_task_result
@@ -25,63 +28,10 @@
   when: ansible_os_family in ["Debian","RedHat"]
   tags: [kubectl, upgrade]
 
-- name: Write kube-apiserver manifest
-  template:
-    src: manifests/kube-apiserver.manifest.j2
-    dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
-  notify: Master | wait for the apiserver to be running
-  tags: kube-apiserver
-
-- meta: flush_handlers
-
-- name: Write kube system namespace manifest
-  template:
-    src: namespace.j2
-    dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
-  run_once: yes
-  when: inventory_hostname == groups['kube-master'][0]
-  tags: apps
-
-- name: Check if kube system namespace exists
-  command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}"
-  register: 'kubesystem'
-  changed_when: False
-  failed_when: False
-  run_once: yes
-  tags: apps
-
-- name: Create kube system namespace
-  command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
-  retries: 4
-  delay: "{{ retry_stagger | random + 3 }}"
-  register: create_system_ns
-  until: create_system_ns.rc == 0
-  changed_when: False
-  when: kubesystem|failed and inventory_hostname == groups['kube-master'][0]
-  tags: apps
-
-- name: Write kube-scheduler kubeconfig
-  template:
-    src: kube-scheduler-kubeconfig.yaml.j2
-    dest: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
-  tags: kube-scheduler
-
-- name: Write kube-scheduler manifest
-  template:
-    src: manifests/kube-scheduler.manifest.j2
-    dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest"
-  notify: Master | wait for kube-scheduler
-  tags: kube-scheduler
-
-- name: Write kube-controller-manager kubeconfig
-  template:
-    src: kube-controller-manager-kubeconfig.yaml.j2
-    dest: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
-  tags: kube-controller-manager
+- task: Include kubeadm setup if enabled
+  include: kubeadm-setup.yml
+  when: kubeadm_enabled|bool|default(false)
 
-- name: Write kube-controller-manager manifest
-  template:
-    src: manifests/kube-controller-manager.manifest.j2
-    dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
-  notify: Master | wait for kube-controller-manager
-  tags: kube-controller-manager
+- task: Include static pod setup if not using kubeadm
+  include: static-pod-setup.yml
+  when: not kubeadm_enabled|bool|default(false)
diff --git a/roles/kubernetes/master/tasks/static-pod-setup.yml b/roles/kubernetes/master/tasks/static-pod-setup.yml
new file mode 100644
index 000000000..0f600b244
--- /dev/null
+++ b/roles/kubernetes/master/tasks/static-pod-setup.yml
@@ -0,0 +1,61 @@
+---
+- name: Write kube-apiserver manifest
+  template:
+    src: manifests/kube-apiserver.manifest.j2
+    dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
+  notify: Master | wait for the apiserver to be running
+  tags: kube-apiserver
+
+- meta: flush_handlers
+
+- name: Write kube system namespace manifest
+  template:
+    src: namespace.j2
+    dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
+  run_once: yes
+  when: inventory_hostname == groups['kube-master'][0]
+  tags: apps
+
+- name: Check if kube system namespace exists
+  command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}"
+  register: 'kubesystem'
+  changed_when: False
+  failed_when: False
+  run_once: yes
+  tags: apps
+
+- name: Create kube system namespace
+  command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
+  retries: 4
+  delay: "{{ retry_stagger | random + 3 }}"
+  register: create_system_ns
+  until: create_system_ns.rc == 0
+  changed_when: False
+  when: kubesystem|failed and inventory_hostname == groups['kube-master'][0]
+  tags: apps
+
+- name: Write kube-scheduler kubeconfig
+  template:
+    src: kube-scheduler-kubeconfig.yaml.j2
+    dest: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
+  tags: kube-scheduler
+
+- name: Write kube-scheduler manifest
+  template:
+    src: manifests/kube-scheduler.manifest.j2
+    dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest"
+  notify: Master | wait for kube-scheduler
+  tags: kube-scheduler
+
+- name: Write kube-controller-manager kubeconfig
+  template:
+    src: kube-controller-manager-kubeconfig.yaml.j2
+    dest: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
+  tags: kube-controller-manager
+
+- name: Write kube-controller-manager manifest
+  template:
+    src: manifests/kube-controller-manager.manifest.j2
+    dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
+  notify: Master | wait for kube-controller-manager
+  tags: kube-controller-manager
diff --git a/roles/kubernetes/master/tasks/users-file.yml b/roles/kubernetes/master/tasks/users-file.yml
new file mode 100644
index 000000000..ec0264c4d
--- /dev/null
+++ b/roles/kubernetes/master/tasks/users-file.yml
@@ -0,0 +1,14 @@
+---
+- name: Make sure the users directory exits
+  file:
+    path: "{{ kube_users_dir }}"
+    state: directory
+    mode: o-rwx
+    group: "{{ kube_cert_group }}"
+
+- name: Populate users for basic auth in API
+  template:
+    src: known_users.csv.j2
+    dest: "{{ kube_users_dir }}/known_users.csv"
+    backup: yes
+  notify: Master | set secret_changed
diff --git a/roles/kubernetes/secrets/templates/known_users.csv.j2 b/roles/kubernetes/master/templates/known_users.csv.j2
similarity index 100%
rename from roles/kubernetes/secrets/templates/known_users.csv.j2
rename to roles/kubernetes/master/templates/known_users.csv.j2
diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2
new file mode 100644
index 000000000..7cac5c16e
--- /dev/null
+++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2
@@ -0,0 +1,67 @@
+apiVersion: kubeadm.k8s.io/v1alpha1
+kind: MasterConfiguration
+api:
+  advertiseAddress: {{ ip | default(ansible_default_ipv4.address) }}
+  bindPort: "{{ kube_apiserver_port }}"
+etcd:
+  endpoints: 
+{% for endpoint in etcd_access_endpoint.split(',') %}
+  - {{ endpoint }}
+{% endfor %}
+  caFile: {{ kube_config_dir }}/ssl/etcd/ca.pem
+  certFile: {{ kube_config_dir }}/ssl/etcd/node-{{ inventory_hostname }}.pem
+  keyFile: {{ kube_config_dir }}/ssl/etcd/node-{{ inventory_hostname }}-key.pem
+networking:
+  dnsDomain: {{ dns_domain }}
+  serviceSubnet: {{ kube_service_addresses }}
+  podSubnet: {{ kube_pods_subnet }}
+kubernetesVersion: {{ kube_version }}
+cloudProvider: {{ cloud_provider|default('') }}
+#TODO: cloud provider conf file
+authorizationModes: 
+{% for mode in authorization_modes %}
+- {{ mode }}
+{% endfor %}
+token: {{ kubeadm_token }}
+tokenTTL: {{ kubeadm_token_ttl }}
+selfHosted: false
+apiServerExtraArgs:
+  insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
+  insecure-port: "{{ kube_apiserver_insecure_port }}"
+  admission-control: {{ kube_apiserver_admission_control | join(',') }}
+  service-node-port-range: {{ kube_apiserver_node_port_range }}
+{% if kube_basic_auth|default(true) %}
+  basic-auth-file: {{ kube_users_dir }}/known_users.csv
+{% endif %}
+{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
+  oidc-issuer-url: {{ kube_oidc_url }}
+  oidc-client-id: {{ kube_oidc_client_id }}
+{%   if kube_oidc_ca_file is defined %}
+  oidc-ca-file: {{ kube_oidc_ca_file }}
+{%   endif %}
+{%   if kube_oidc_username_claim is defined %}
+  oidc-username-claim: {{ kube_oidc_username_claim }}
+{%   endif %}
+{%   if kube_oidc_groups_claim is defined %}
+  oidc-groups-claim: {{ kube_oidc_groups_claim }}
+{%   endif %}
+{% endif %}
+  storage-backend: {{ kube_apiserver_storage_backend }}
+{% if kube_api_runtime_config is defined %}
+  runtime-config: {{ kube_api_runtime_config }}
+{% endif %}
+  allow-privileged: "true"
+#TODO: Custom flags compatible with kubeadm
+controllerManagerExtraArgs:
+  node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
+  node-monitor-period: {{ kube_controller_node_monitor_period }}
+  pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }}
+{% if kube_feature_gates %}
+  feature-gates: {{ kube_feature_gates|join(',') }}
+{% endif %}
+#schedulerExtraArgs:
+apiServerCertSANs:
+{% for san in  apiserver_sans.split(' ') | unique %}
+  - {{ san }}
+{% endfor %}
+certificatesDir: {{ kube_config_dir }}/ssl
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
index f5dec5589..58c762961 100644
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -7,7 +7,7 @@ metadata:
     k8s-app: kube-apiserver
     kubespray: v2
   annotations:
-    kubespray.etcd-cert/serial: "{{ etcd_node_cert_serial }}"
+    kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
     kubespray.apiserver-cert/serial: "{{ apiserver_cert_serial }}"
 spec:
   hostNetwork: true
diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
index e0ef08fe4..bf03e6040 100644
--- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
@@ -6,7 +6,7 @@ metadata:
   labels:
     k8s-app: kube-controller
   annotations:
-    kubespray.etcd-cert/serial: "{{ etcd_node_cert_serial }}"
+    kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
     kubespray.controller-manager-cert/serial: "{{ controller_manager_cert_serial }}"
 spec:
   hostNetwork: true
diff --git a/roles/kubernetes/node/meta/main.yml b/roles/kubernetes/node/meta/main.yml
index f0656e571..c1f472bfd 100644
--- a/roles/kubernetes/node/meta/main.yml
+++ b/roles/kubernetes/node/meta/main.yml
@@ -10,7 +10,12 @@ dependencies:
     file: "{{ downloads.install_socat }}"
     tags: [download, kubelet]
     when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
+  - role: download
+    file: "{{ downloads.kubeadm }}"
+    tags: [download, kubelet, kubeadm]
+    when: kubeadm_enabled
   - role: kubernetes/secrets
+    when: not kubeadm_enabled
     tags: k8s-secrets
   - role: download
     file: "{{ downloads.nginx }}"
diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml
index 692f8247c..20d542da8 100644
--- a/roles/kubernetes/node/tasks/install.yml
+++ b/roles/kubernetes/node/tasks/install.yml
@@ -13,6 +13,18 @@
     ]"
   tags: facts
 
+- name: Set kubelet deployment to host if kubeadm is enabled
+  set_fact:
+    kubelet_deployment_type: host
+  when: kubeadm_enabled
+  tags: kubeadm
+
+- name: install | Copy binary from download dir
+  command: rsync -piu "{{ local_release_dir }}/kubeadm" "{{ bin_dir }}/kubeadm"
+  changed_when: false
+  when: kubeadm_enabled
+  tags: kubeadm
+
 - include: "install_{{ kubelet_deployment_type }}.yml"
 
 - name: install | Write kubelet systemd init file
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index d166fe661..04b5132cb 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -20,14 +20,24 @@
   when: is_kube_master == false and loadbalancer_apiserver_localhost|default(true)
   tags: nginx
 
-- name: Write kubelet config file
+- name: Write kubelet config file (non-kubeadm)
   template:
-    src: kubelet.j2
+    src: kubelet.standard.env.j2
     dest: "{{ kube_config_dir }}/kubelet.env"
     backup: yes
+  when: not kubeadm_enabled
   notify: restart kubelet
   tags: kubelet
 
+- name: Write kubelet config file (kubeadm)
+  template:
+    src: kubelet.kubeadm.env.j2
+    dest: "{{ kube_config_dir }}/kubelet.env"
+    backup: yes
+  when: kubeadm_enabled
+  notify: restart kubelet
+  tags: ['kubelet', 'kubeadm']
+
 - name: write the kubecfg (auth) file for kubelet
   template:
     src: "{{ item }}-kubeconfig.yaml.j2"
@@ -36,6 +46,7 @@
   with_items:
     - node
     - kube-proxy
+  when: not kubeadm_enabled
   notify: restart kubelet
   tags: kubelet
 
@@ -84,6 +95,7 @@
   template:
     src: manifests/kube-proxy.manifest.j2
     dest: "{{ kube_manifest_dir }}/kube-proxy.manifest"
+  when: not kubeadm_enabled
   tags: kube-proxy
 
 # reload-systemd
diff --git a/roles/kubernetes/node/templates/kubelet.host.service.j2 b/roles/kubernetes/node/templates/kubelet.host.service.j2
index ec5e3d524..be43934ea 100644
--- a/roles/kubernetes/node/templates/kubelet.host.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.host.service.j2
@@ -5,7 +5,7 @@ After=docker.service
 Wants=docker.socket
 
 [Service]
-EnvironmentFile={{kube_config_dir}}/kubelet.env
+EnvironmentFile=-{{kube_config_dir}}/kubelet.env
 ExecStart={{ bin_dir }}/kubelet \
 		$KUBE_LOGTOSTDERR \
 		$KUBE_LOG_LEVEL \
diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
new file mode 100644
index 000000000..df55be4cf
--- /dev/null
+++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
@@ -0,0 +1,57 @@
+### Upstream source https://github.com/kubernetes/release/blob/master/debian/xenial/kubeadm/channel/stable/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+### All upstream values should be present in this file
+
+# logging to stderr means we get it in the systemd journal
+KUBE_LOGGING="--logtostderr=true"
+KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
+# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
+KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}"
+# The port for the info server to serve on
+# KUBELET_PORT="--port=10250"
+# You may leave this blank to use the actual hostname
+{% if kube_override_hostname %}
+KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
+{% endif %}
+{# Base kubelet args #}
+{% set kubelet_args_base -%}
+{# start kubeadm specific settings #}
+--kubeconfig={{ kube_config_dir }}/kubelet.conf \
+--require-kubeconfig=true \
+--authorization-mode=Webhook \
+--client-ca-file={{ kube_cert_dir }}/ca.crt \
+--pod-manifest-path={{ kube_manifest_dir }} \
+--cadvisor-port=0 \
+{# end kubeadm specific settings #}
+--pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
+--kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \
+--node-status-update-frequency={{ kubelet_status_update_frequency }} \
+{% endset %}
+
+{# DNS settings for kubelet #}
+{% if dns_mode == 'kubedns' %}
+{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
+{% elif dns_mode == 'dnsmasq_kubedns' %}
+{% set kubelet_args_cluster_dns %}--cluster-dns={{ dns_server }}{% endset %}
+{% else %}
+{% set kubelet_args_cluster_dns %}{% endset %}
+{% endif %}
+{% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
+
+
+KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }}"
+{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave"] %}
+KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
+{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
+KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
+{% endif %}
+# Should this cluster be allowed to run privileged docker containers
+KUBE_ALLOW_PRIV="--allow-privileged=true"
+{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
+KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
+{% elif cloud_provider is defined and cloud_provider == "aws" %}
+KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}"
+{% else %}
+KUBELET_CLOUDPROVIDER=""
+{% endif %}
+
+PATH={{ bin_dir }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2
similarity index 100%
rename from roles/kubernetes/node/templates/kubelet.j2
rename to roles/kubernetes/node/templates/kubelet.standard.env.j2
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index 620aae35f..38a329781 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -34,29 +34,18 @@
       skip: true
   tags: facts
 
-- name: Create kubernetes config directory
+- name: Create kubernetes directories
   file:
-    path: "{{ kube_config_dir }}"
+    path: "{{ item }}"
     state: directory
     owner: kube
   when: inventory_hostname in groups['k8s-cluster']
   tags: [kubelet, k8s-secrets, kube-controller-manager, kube-apiserver, bootstrap-os, apps, network, master, node]
-
-- name: Create kubernetes script directory
-  file:
-    path: "{{ kube_script_dir }}"
-    state: directory
-    owner: kube
-  when: "inventory_hostname in groups['k8s-cluster']"
-  tags: [k8s-secrets, bootstrap-os]
-
-- name: Create kubernetes manifests directory
-  file:
-    path: "{{ kube_manifest_dir }}"
-    state: directory
-    owner: kube
-  when: "inventory_hostname in groups['k8s-cluster']"
-  tags: [kubelet, bootstrap-os, master, node]
+  with_items:
+    - "{{ kube_config_dir }}"
+    - "{{ kube_config_dir }}/ssl"
+    - "{{ kube_manifest_dir }}"
+    - "{{ kube_script_dir }}"
 
 - name: check cloud_provider value
   fail:
diff --git a/roles/kubernetes/preinstall/vars/centos.yml b/roles/kubernetes/preinstall/vars/centos.yml
index b2fbcd80a..bacfb96b5 100644
--- a/roles/kubernetes/preinstall/vars/centos.yml
+++ b/roles/kubernetes/preinstall/vars/centos.yml
@@ -2,3 +2,4 @@
 required_pkgs:
   - libselinux-python
   - device-mapper-libs
+  - ebtables
diff --git a/roles/kubernetes/preinstall/vars/debian.yml b/roles/kubernetes/preinstall/vars/debian.yml
index dfcb0bc34..a044e0d49 100644
--- a/roles/kubernetes/preinstall/vars/debian.yml
+++ b/roles/kubernetes/preinstall/vars/debian.yml
@@ -4,3 +4,4 @@ required_pkgs:
   - aufs-tools
   - apt-transport-https
   - software-properties-common
+  - ebtables
diff --git a/roles/kubernetes/preinstall/vars/fedora.yml b/roles/kubernetes/preinstall/vars/fedora.yml
index b2fbcd80a..bacfb96b5 100644
--- a/roles/kubernetes/preinstall/vars/fedora.yml
+++ b/roles/kubernetes/preinstall/vars/fedora.yml
@@ -2,3 +2,4 @@
 required_pkgs:
   - libselinux-python
   - device-mapper-libs
+  - ebtables
diff --git a/roles/kubernetes/preinstall/vars/redhat.yml b/roles/kubernetes/preinstall/vars/redhat.yml
index b2fbcd80a..bacfb96b5 100644
--- a/roles/kubernetes/preinstall/vars/redhat.yml
+++ b/roles/kubernetes/preinstall/vars/redhat.yml
@@ -2,3 +2,4 @@
 required_pkgs:
   - libselinux-python
   - device-mapper-libs
+  - ebtables
diff --git a/roles/kubernetes/preinstall/vars/ubuntu.yml b/roles/kubernetes/preinstall/vars/ubuntu.yml
new file mode 100644
index 000000000..a044e0d49
--- /dev/null
+++ b/roles/kubernetes/preinstall/vars/ubuntu.yml
@@ -0,0 +1,7 @@
+---
+required_pkgs:
+  - python-apt
+  - aufs-tools
+  - apt-transport-https
+  - software-properties-common
+  - ebtables
diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml
index 97987f706..f45b892aa 100644
--- a/roles/kubernetes/secrets/tasks/main.yml
+++ b/roles/kubernetes/secrets/tasks/main.yml
@@ -19,21 +19,6 @@
     mode: o-rwx
     group: "{{ kube_cert_group }}"
 
-- name: Make sure the users directory exits
-  file:
-    path: "{{ kube_users_dir }}"
-    state: directory
-    mode: o-rwx
-    group: "{{ kube_cert_group }}"
-
-- name: Populate users for basic auth in API
-  template:
-    src: known_users.csv.j2
-    dest: "{{ kube_users_dir }}/known_users.csv"
-    backup: yes
-  when: inventory_hostname in groups['kube-master'] and kube_basic_auth|default(true)
-  notify: set secret_changed
-
 #
 # The following directory creates make sure that the directories
 # exist on the first master for cases where the first master isn't
@@ -103,7 +88,7 @@
 
 - name: "Gen_certs | set kube node certificate serial facts"
   set_fact:
-    etcd_node_cert_serial: "{{ node_certificate_serials.results[0].stdout|default() }}"
+    kubelet_cert_serial: "{{ node_certificate_serials.results[0].stdout|default() }}"
     kube_proxy_cert_serial: "{{ node_certificate_serials.results[1].stdout|default() }}"
   when: inventory_hostname in groups['k8s-cluster']
 
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index e6015560a..25fe2ecdd 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -114,6 +114,10 @@ kubelet_deployment_type: docker
 cert_management: script
 vault_deployment_type: docker
 
+# Enable kubeadm deployment (experimental)
+kubeadm_enabled: false
+kubeadm_token: "abcdef.0123456789abcdef"
+
 # K8s image pull policy (imagePullPolicy)
 k8s_image_pull_policy: IfNotPresent
 efk_enabled: false
diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2
index 9f47d468a..8acb28327 100644
--- a/roles/network_plugin/calico/templates/calico-node.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-node.yml.j2
@@ -19,7 +19,7 @@ spec:
         k8s-app: calico-node
       annotations:
         scheduler.alpha.kubernetes.io/critical-pod: ''
-        kubespray.etcd-cert/serial: "{{ etcd_node_cert_serial }}"
+        kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
     spec:
       hostNetwork: true
 {% if rbac_enabled %}
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index 1a66904ce..3dcb69f29 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -67,6 +67,7 @@
     - { role: kubernetes/node, tags: node }
     - { role: kubernetes/master, tags: master }
     - { role: network_plugin, tags: network }
+    - { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
 
 #Finally handle worker upgrades, based on given batch size
@@ -79,6 +80,7 @@
     - { role: kubernetes/node, tags: node }
     - { role: network_plugin, tags: network }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
+    - { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
     - { role: kubespray-defaults}
 
 - hosts: kube-master
-- 
GitLab