From 05dc2b3a097fda2ffff7a77f4ca843d0e41dec76 Mon Sep 17 00:00:00 2001
From: Matthew Mosesohn <matthew.mosesohn@gmail.com>
Date: Fri, 19 Apr 2019 16:01:54 +0300
Subject: [PATCH] Use K8s 1.14 and add kubeadm experimental control plane mode
 (#4514)

* Use K8s 1.14 and add kubeadm experimental control plane mode

This reverts commit d39c273d96afe610fed03a2558ffc1beec64c114.

* Cleanup kubeadm setup run on first master

* pin kubeadm_certificate_key in test

* Remove kubelet autolabel of kube-node, add symlink for pki dir

Change-Id: Id5e74dd667c60675dbfe4193b0bc9fb44380e1ca
---
 README.md                                     |   2 +-
 .../group_vars/k8s-cluster/k8s-cluster.yml    |   6 +-
 roles/download/defaults/main.yml              |   2 +-
 roles/kubernetes/client/tasks/main.yml        |  23 +-
 roles/kubernetes/kubeadm/tasks/main.yml       |  19 +-
 .../master/defaults/main/kube-proxy.yml       |   2 +-
 .../kubernetes/master/defaults/main/main.yml  |  11 +-
 .../master/tasks/kubeadm-certificate.yml      |  30 ---
 .../master/tasks/kubeadm-kubeconfig.yml       |  34 ---
 .../master/tasks/kubeadm-migrate-certs.yml    |   4 +-
 .../tasks/kubeadm-secondary-experimental.yml  |  45 ++++
 .../master/tasks/kubeadm-secondary-legacy.yml |  44 ++++
 .../kubernetes/master/tasks/kubeadm-setup.yml | 123 +++++------
 .../master/tasks/kubeadm-upgrade.yml          |   7 +-
 .../master/tasks/kubeadm-version.yml          |   5 -
 roles/kubernetes/master/tasks/pre-upgrade.yml |   4 +-
 .../templates/kubeadm-config.v1alpha1.yaml.j2 | 204 ------------------
 .../templates/kubeadm-config.v1alpha2.yaml.j2 |  10 +-
 .../templates/kubeadm-config.v1alpha3.yaml.j2 |  10 +-
 .../templates/kubeadm-config.v1beta1.yaml.j2  |   8 +-
 .../kubeadm-controlplane.v1beta1.yaml.j2      |  26 +++
 .../node/templates/kubelet.kubeadm.env.j2     |   8 -
 roles/kubernetes/preinstall/defaults/main.yml |   2 +
 .../tasks/0050-create_directories.yml         |   8 +
 roles/kubespray-defaults/defaults/main.yaml   |  10 +-
 roles/network_plugin/calico/defaults/main.yml |   4 +
 roles/network_plugin/calico/handlers/main.yml |  19 +-
 .../calico/rr/defaults/main.yml               |   4 +
 roles/network_plugin/calico/rr/tasks/main.yml |   6 +-
 roles/network_plugin/calico/tasks/install.yml |   8 +-
 roles/network_plugin/calico/tasks/pre.yml     |   2 +-
 .../calico/templates/etcdv2-store.yml.j2      |   6 +-
 .../calico/templates/etcdv3-store.yml.j2      |   6 +-
 roles/network_plugin/canal/defaults/main.yml  |   5 +
 roles/network_plugin/canal/tasks/main.yml     |   6 +-
 roles/network_plugin/cilium/defaults/main.yml |   3 +
 roles/network_plugin/cilium/handlers/main.yml |   2 +-
 roles/network_plugin/cilium/tasks/main.yml    |   6 +-
 tests/files/gce_centos7-flannel-addons.yml    |   2 +
 39 files changed, 318 insertions(+), 408 deletions(-)
 delete mode 100644 roles/kubernetes/master/tasks/kubeadm-kubeconfig.yml
 create mode 100644 roles/kubernetes/master/tasks/kubeadm-secondary-experimental.yml
 create mode 100644 roles/kubernetes/master/tasks/kubeadm-secondary-legacy.yml
 delete mode 100644 roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
 create mode 100644 roles/kubernetes/master/templates/kubeadm-controlplane.v1beta1.yaml.j2

diff --git a/README.md b/README.md
index 4e4bd49c3..621cf439e 100644
--- a/README.md
+++ b/README.md
@@ -108,7 +108,7 @@ Supported Components
 --------------------
 
 -   Core
-    -   [kubernetes](https://github.com/kubernetes/kubernetes) v1.13.5
+    -   [kubernetes](https://github.com/kubernetes/kubernetes) v1.14.0
     -   [etcd](https://github.com/coreos/etcd) v3.2.26
     -   [docker](https://www.docker.com/) v18.06 (see note)
     -   [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
index 11a22af3a..9a8ef100d 100644
--- a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
@@ -20,7 +20,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
 kube_api_anonymous_auth: true
 
 ## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.13.5
+kube_version: v1.14.0
 
 # kubernetes image repo define
 kube_image_repo: "gcr.io/google-containers"
@@ -153,6 +153,10 @@ etcd_deployment_type: docker
 kubelet_deployment_type: host
 helm_deployment_type: host
 
+# Enable kubeadm experimental control plane
+kubeadm_control_plane: false
+kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
+
 # K8s image pull policy (imagePullPolicy)
 k8s_image_pull_policy: IfNotPresent
 
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index e75ef80fb..3e2914902 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -35,7 +35,7 @@ download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube
 image_arch: "{{host_architecture | default('amd64')}}"
 
 # Versions
-kube_version: v1.13.5
+kube_version: v1.14.0
 kubeadm_version: "{{ kube_version }}"
 etcd_version: v3.2.26
 
diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml
index 71b505a47..19d0f019d 100644
--- a/roles/kubernetes/client/tasks/main.yml
+++ b/roles/kubernetes/client/tasks/main.yml
@@ -40,18 +40,35 @@
   run_once: yes
   when: kubeconfig_localhost|default(false)
 
+# NOTE(mattymo): Please forgive this workaround
 - name: Generate admin kubeconfig with external api endpoint
   shell: >-
-    {{ bin_dir }}/kubeadm alpha
-    {% if kubeadm_version is version('v1.13.0', '<') %}
-    phase
+    {% if kubeadm_version is version('v1.14.0', '>=') %}
+    mkdir -p {{ kube_config_dir }}/external_kubeconfig &&
     {% endif %}
+    {{ bin_dir }}/kubeadm
+    {% if kubeadm_version is version('v1.14.0', '>=') %}
+    init phase
+    {% elif kubeadm_version is version('v1.13.0', '>=') %}
+    alpha
+    {% else %}
+    alpha phase
+    {% endif %}
+    {% if kubeadm_version is version('v1.14.0', '>=') %}
+    kubeconfig admin
+    --kubeconfig-dir {{ kube_config_dir }}/external_kubeconfig
+    {% else %}
     kubeconfig user
     --client-name kubernetes-admin
     --org system:masters
+    {% endif %}
     --cert-dir {{ kube_config_dir }}/ssl
     --apiserver-advertise-address {{ external_apiserver_address }}
     --apiserver-bind-port {{ external_apiserver_port }}
+    {% if kubeadm_version is version('v1.14.0', '>=') %}
+    && cat {{ kube_config_dir }}/external_kubeconfig/admin.conf &&
+    rm -rf {{ kube_config_dir }}/external_kubeconfig
+    {% endif %}
   environment: "{{ proxy_env }}"
   run_once: yes
   register: admin_kubeconfig
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index 2d60876ff..cba856624 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -23,9 +23,15 @@
 
 - name: Create kubeadm token for joining nodes with 24h expiration (default)
   command: "{{ bin_dir }}/kubeadm token create"
-  run_once: true
   register: temp_token
   delegate_to: "{{ groups['kube-master'][0] }}"
+  when: kubeadm_token is not defined
+
+- name: Set kubeadm_token to generated token
+  set_fact:
+    kubeadm_token: "{{ temp_token.stdout }}"
+  when: kubeadm_token is not defined
+
 
 - name: gets the kubeadm version
   command: "{{ bin_dir }}/kubeadm version -o short"
@@ -61,8 +67,6 @@
     dest: "{{ kube_config_dir }}/kubeadm-client.conf"
     backup: yes
   when: not is_kube_master
-  vars:
-    kubeadm_token: "{{ temp_token.stdout }}"
 
 - name: Join to cluster if needed
   environment:
@@ -122,11 +126,10 @@
     {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml
     | sed 's#server:.*#server:\ {{ kube_apiserver_endpoint }}#g'
     | {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf replace -f -
-  delegate_to: "{{groups['kube-master']|first}}"
   run_once: true
   when:
+    - inventory_hostname == groups['kube-master']|first
     - kubeadm_config_api_fqdn is not defined
-    - is_kube_master
     - kubeadm_discovery_address != kube_apiserver_endpoint
     - not kube_proxy_remove
   tags:
@@ -134,11 +137,10 @@
 
 - name: Restart all kube-proxy pods to ensure that they load the new configmap
   shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
-  delegate_to: "{{groups['kube-master']|first}}"
   run_once: true
   when:
+    - inventory_hostname == groups['kube-master']|first
     - kubeadm_config_api_fqdn is not defined
-    - is_kube_master
     - kubeadm_discovery_address != kube_apiserver_endpoint
     - not kube_proxy_remove
   tags:
@@ -159,11 +161,10 @@
 # is fixed
 - name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
   shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
-  delegate_to: "{{groups['kube-master']|first}}"
   run_once: true
   when:
+    - inventory_hostname == groups['kube-master']|first
     - kube_proxy_remove
-    - is_kube_master
     - kubeadm_discovery_address != kube_apiserver_endpoint
   tags:
     - kube-proxy
diff --git a/roles/kubernetes/master/defaults/main/kube-proxy.yml b/roles/kubernetes/master/defaults/main/kube-proxy.yml
index 6dc8d4610..d5bcdb16f 100644
--- a/roles/kubernetes/master/defaults/main/kube-proxy.yml
+++ b/roles/kubernetes/master/defaults/main/kube-proxy.yml
@@ -107,4 +107,4 @@ kube_proxy_resource_container: /kube-proxy
 
 # udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
 # Must be greater than 0. Only applicable for proxyMode=userspace.
-kube_proxy_udp_idle_timeout: 250ms
\ No newline at end of file
+kube_proxy_udp_idle_timeout: 250ms
diff --git a/roles/kubernetes/master/defaults/main/main.yml b/roles/kubernetes/master/defaults/main/main.yml
index 3fb9c5582..3109038e8 100644
--- a/roles/kubernetes/master/defaults/main/main.yml
+++ b/roles/kubernetes/master/defaults/main/main.yml
@@ -23,11 +23,18 @@ kube_apiserver_storage_backend: etcd3
 # By default, force back to etcd2. Set to true to force etcd3 (experimental!)
 force_etcd3: false
 
+kube_etcd_cacert_file: ca.pem
+kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
+kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
+
 # Associated interfaces must be reachable by the rest of the cluster, and by
 # CLI/web clients.
 kube_controller_manager_bind_address: 0.0.0.0
 kube_scheduler_bind_address: 0.0.0.0
 
+# discovery_timeout modifies the discovery timeout
+discovery_timeout: 5m0s
+
 # audit support
 kubernetes_audit: false
 # path to audit log file
@@ -78,7 +85,6 @@ kube_apiserver_request_timeout: "1m0s"
 
 # 1.9 and below Admission control plug-ins
 kube_apiserver_admission_control:
-  - Initializers
   - NamespaceLifecycle
   - LimitRanger
   - ServiceAccount
@@ -99,8 +105,7 @@ kube_apiserver_enable_admission_plugins: []
 kube_apiserver_disable_admission_plugins: []
 
 # extra runtime config
-kube_api_runtime_config:
-  - admissionregistration.k8s.io/v1alpha1
+kube_api_runtime_config: []
 
 ## Enable/Disable Kube API Server Authentication Methods
 kube_basic_auth: false
diff --git a/roles/kubernetes/master/tasks/kubeadm-certificate.yml b/roles/kubernetes/master/tasks/kubeadm-certificate.yml
index 6072085e0..c3d486b83 100644
--- a/roles/kubernetes/master/tasks/kubeadm-certificate.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-certificate.yml
@@ -12,33 +12,3 @@
     - {src: front-proxy-client.crt, dest: front-proxy-client.crt.old}
     - {src: front-proxy-client.key, dest: front-proxy-client.key.old}
   ignore_errors: yes
-
-- name: Remove old certs and keys
-  file:
-    path: "{{ kube_cert_dir }}/{{ item }}"
-    state: absent
-  with_items:
-    - apiserver.crt
-    - apiserver.key
-    - apiserver-kubelet-client.crt
-    - apiserver-kubelet-client.key
-    - front-proxy-client.crt
-    - front-proxy-client.key
-
-- name: Generate new certs and keys
-  command: "{{ bin_dir }}/kubeadm init phase certs {{ item }} --config={{ kube_config_dir }}/kubeadm-config.yaml"
-  environment: "{{ proxy_env }}"
-  with_items:
-    - apiserver
-    - apiserver-kubelet-client
-    - front-proxy-client
-  when: inventory_hostname == groups['kube-master']|first and kubeadm_version is version('v1.13.0', '>=')
-
-- name: Generate new certs and keys
-  command: "{{ bin_dir }}/kubeadm alpha phase certs {{ item }} --config={{ kube_config_dir }}/kubeadm-config.yaml"
-  environment: "{{ proxy_env }}"
-  with_items:
-    - apiserver
-    - apiserver-kubelet-client
-    - front-proxy-client
-  when: inventory_hostname == groups['kube-master']|first and kubeadm_version is version('v1.13.0', '<')
diff --git a/roles/kubernetes/master/tasks/kubeadm-kubeconfig.yml b/roles/kubernetes/master/tasks/kubeadm-kubeconfig.yml
deleted file mode 100644
index 7f4bfbf56..000000000
--- a/roles/kubernetes/master/tasks/kubeadm-kubeconfig.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- name: Backup old configuration files
-  copy:
-    src: "{{ kube_config_dir }}/{{ item.src }}"
-    dest: "{{ kube_config_dir }}/{{ item.dest }}"
-    remote_src: yes
-  with_items:
-    - {src: admin.conf, dest: admin.conf.old}
-    - {src: kubelet.conf, dest: kubelet.conf.old}
-    - {src: controller-manager.conf, dest: controller-manager.conf.old}
-    - {src: scheduler.conf, dest: scheduler.conf.old}
-  ignore_errors: yes
-
-- name: Remove old configuration files
-  file:
-    path: "{{ kube_config_dir }}/{{ item }}"
-    state: absent
-  with_items:
-    - admin.conf
-    - kubelet.conf
-    - controller-manager.conf
-    - scheduler.conf
-
-- name: Generate new configuration files
-  command: "{{ bin_dir }}/kubeadm init phase kubeconfig all --config={{ kube_config_dir }}/kubeadm-config.yaml"
-  environment: "{{ proxy_env }}"
-  when: kubeadm_version is version('v1.13.0', '>=')
-  ignore_errors: yes
-
-- name: Generate new configuration files
-  command: "{{ bin_dir }}/kubeadm alpha phase kubeconfig all --config={{ kube_config_dir }}/kubeadm-config.yaml"
-  environment: "{{ proxy_env }}"
-  when: kubeadm_version is version('v1.13.0', '<')
-  ignore_errors: yes
diff --git a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
index 3a3a45a8e..043530c4a 100644
--- a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml
@@ -15,6 +15,6 @@
     - {src: front-proxy-client-key.pem, dest: front-proxy-client.key}
     - {src: service-account-key.pem, dest: sa.pub}
     - {src: service-account-key.pem, dest: sa.key}
-    - {src: "node-{{ inventory_hostname }}.pem", dest: apiserver-kubelet-client.crt }
-    - {src: "node-{{ inventory_hostname }}-key.pem", dest: apiserver-kubelet-client.key }
+    - {src: "node-{{ inventory_hostname }}.pem", dest: apiserver-kubelet-client.crt}
+    - {src: "node-{{ inventory_hostname }}-key.pem", dest: apiserver-kubelet-client.key}
   register: kubeadm_copy_old_certs
diff --git a/roles/kubernetes/master/tasks/kubeadm-secondary-experimental.yml b/roles/kubernetes/master/tasks/kubeadm-secondary-experimental.yml
new file mode 100644
index 000000000..f7f098d35
--- /dev/null
+++ b/roles/kubernetes/master/tasks/kubeadm-secondary-experimental.yml
@@ -0,0 +1,45 @@
+---
+- name: Set kubeadm_discovery_address
+  set_fact:
+    kubeadm_discovery_address: >-
+      {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
+      {{ first_kube_master }}:{{ kube_apiserver_port }}
+      {%- else -%}
+      {{ kube_apiserver_endpoint }}
+      {%- endif %}
+  tags:
+    - facts
+
+- name: Create kubeadm ControlPlane config
+  template:
+    src: "kubeadm-controlplane.{{ kubeadmConfig_api_version }}.yaml.j2"
+    dest: "{{ kube_config_dir }}/kubeadm-controlplane.yaml"
+    backup: yes
+  when:
+    - inventory_hostname != groups['kube-master']|first
+    - not kubeadm_already_run.stat.exists
+
+- name: Wait for k8s apiserver
+  wait_for:
+    host: "{{kubeadm_discovery_address.split(':')[0]}}"
+    port: "{{kubeadm_discovery_address.split(':')[1]}}"
+    timeout: 180
+
+- name: Joining control plane node to the cluster.
+  command: >-
+    {{ bin_dir }}/kubeadm join
+    --config {{ kube_config_dir}}/kubeadm-controlplane.yaml
+    --ignore-preflight-errors=all
+    {% if kubeadm_certificate_key is defined %}
+    --certificate-key={{ kubeadm_certificate_key }}
+    {% endif %}
+  register: kubeadm_join_control_plane
+  when:
+    - inventory_hostname != groups['kube-master']|first
+    - not kubeadm_already_run.stat.exists
+  environment:
+    PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
+
+- name: Set secret_changed to false to avoid extra token rotation
+  set_fact:
+    secret_changed: false
diff --git a/roles/kubernetes/master/tasks/kubeadm-secondary-legacy.yml b/roles/kubernetes/master/tasks/kubeadm-secondary-legacy.yml
new file mode 100644
index 000000000..a478d94fa
--- /dev/null
+++ b/roles/kubernetes/master/tasks/kubeadm-secondary-legacy.yml
@@ -0,0 +1,44 @@
+---
+- name: slurp kubeadm certs
+  slurp:
+    src: "{{ item }}"
+  with_items:
+    - "{{ kube_cert_dir }}/apiserver.crt"
+    - "{{ kube_cert_dir }}/apiserver.key"
+    - "{{ kube_cert_dir }}/apiserver-kubelet-client.crt"
+    - "{{ kube_cert_dir }}/apiserver-kubelet-client.key"
+    - "{{ kube_cert_dir }}/ca.crt"
+    - "{{ kube_cert_dir }}/ca.key"
+    - "{{ kube_cert_dir }}/front-proxy-ca.crt"
+    - "{{ kube_cert_dir }}/front-proxy-ca.key"
+    - "{{ kube_cert_dir }}/front-proxy-client.crt"
+    - "{{ kube_cert_dir }}/front-proxy-client.key"
+    - "{{ kube_cert_dir }}/sa.key"
+    - "{{ kube_cert_dir }}/sa.pub"
+  register: kubeadm_certs
+  delegate_to: "{{ groups['kube-master']|first }}"
+
+- name: kubeadm | write out kubeadm certs
+  copy:
+    dest: "{{ item.item }}"
+    content: "{{ item.content | b64decode }}"
+    owner: root
+    group: root
+    mode: 0600
+  no_log: true
+  register: copy_kubeadm_certs
+  with_items: "{{ kubeadm_certs.results }}"
+  when: inventory_hostname != groups['kube-master']|first
+
+- name: kubeadm | Init other uninitialized masters
+  command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
+  register: kubeadm_init
+  retries: 10
+  until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
+  when:
+    - inventory_hostname != groups['kube-master']|first
+    - not kubeadm_already_run.stat.exists
+  failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
+  environment:
+    PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
+  notify: Master | restart kubelet
diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml
index c8178a18b..683c9339a 100644
--- a/roles/kubernetes/master/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml
@@ -10,11 +10,10 @@
   import_tasks: kubeadm-migrate-certs.yml
   when: old_apiserver_cert.stat.exists
 
-- name: kubeadm | Check apiserver key
+- name: kubeadm | Check serviceaccount key
   stat:
-    path: "{{ kube_cert_dir }}/apiserver.key"
-  register: apiserver_key_before
-  delegate_to: "{{groups['kube-master']|first}}"
+    path: "{{ kube_cert_dir }}/sa.key"
+  register: sa_key_before
   run_once: true
 
 - name: kubeadm | Check if kubeadm has already run
@@ -62,10 +61,6 @@
     sans_address: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
   tags: facts
 
-- name: kubeadm | Copy etcd cert dir under k8s cert dir
-  command: "cp -TR {{ etcd_cert_dir }} {{ kube_config_dir }}/ssl/etcd"
-  changed_when: false
-
 - name: Create audit-policy directory
   file:
     path: "{{ audit_policy_file | dirname }}"
@@ -94,7 +89,18 @@
     - kubeadm_already_run.stat.exists
 
 - name: kubeadm | Initialize first master
-  command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
+  command: >-
+    timeout -k 600s 600s
+    {{ bin_dir }}/kubeadm init
+    --config={{ kube_config_dir }}/kubeadm-config.yaml
+    --ignore-preflight-errors=all
+    {% if kubeadm_version is version('v1.14.0', '>=') %}
+    --experimental-upload-certs
+    {% endif %}
+    --skip-phases=addon/coredns
+    {% if kubeadm_certificate_key is defined %}
+    --certificate-key={{ kubeadm_certificate_key }}
+    {% endif %}
   register: kubeadm_init
   # Retry is because upload config sometimes fails
   retries: 3
@@ -105,76 +111,73 @@
     PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
   notify: Master | restart kubelet
 
-- name: slurp kubeadm certs
-  slurp:
-    src: "{{ item }}"
-  with_items:
-    - "{{ kube_cert_dir }}/apiserver.crt"
-    - "{{ kube_cert_dir }}/apiserver.key"
-    - "{{ kube_cert_dir }}/apiserver-kubelet-client.crt"
-    - "{{ kube_cert_dir }}/apiserver-kubelet-client.key"
-    - "{{ kube_cert_dir }}/ca.crt"
-    - "{{ kube_cert_dir }}/ca.key"
-    - "{{ kube_cert_dir }}/front-proxy-ca.crt"
-    - "{{ kube_cert_dir }}/front-proxy-ca.key"
-    - "{{ kube_cert_dir }}/front-proxy-client.crt"
-    - "{{ kube_cert_dir }}/front-proxy-client.key"
-    - "{{ kube_cert_dir }}/sa.key"
-    - "{{ kube_cert_dir }}/sa.pub"
-  register: kubeadm_certs
-  delegate_to: "{{ groups['kube-master']|first }}"
-  run_once: true
+- name: set kubeadm certificate key
+  set_fact:
+    kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}"
+  with_items: "{{ (hostvars['kube-master'][0]['kubeadm_init']|default({'stdout_lines': []}))['stdout_lines'] }}"
+  when:
+    - kubeadm_version is version('v1.14.0', '>=')
+    - kubeadm_certificate_key is not defined
+    - item | trim | match('.*--certificate-key .*')
+    - hostvars['kube-master'][0]['kubeadm_init']['stdout_lines'] is defined
+
+- name: Create kubeadm token for joining nodes with 24h expiration (default)
+  command: "{{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create"
+  register: temp_token
+  retries: 5
+  delay: 5
+  until: temp_token is succeeded
+  delegate_to: "{{groups['kube-master']|first}}"
+  when: kubeadm_token is not defined
+  tags:
+    - kubeadm_token
 
-- name: kubeadm | write out kubeadm certs
-  copy:
-    dest: "{{ item.item }}"
-    content: "{{ item.content | b64decode }}"
-    owner: root
-    group: root
-    mode: 0600
-  no_log: true
-  register: copy_kubeadm_certs
-  with_items: "{{ kubeadm_certs.results }}"
-  when: inventory_hostname != groups['kube-master']|first
-
-- name: kubeadm | Kubeconfig management with kubeadm
-  import_tasks: kubeadm-kubeconfig.yml
+- name: Set kubeadm_token
+  set_fact:
+    kubeadm_token: "{{ temp_token.stdout }}"
+  when: temp_token.stdout is defined
+  tags:
+    - kubeadm_token
+
+- name: Create hardcoded kubeadm token for joining nodes with 24h expiration (if defined)
+  shell: >-
+    {{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token delete {{ kubeadm_token }} || :;
+    {{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create {{ kubeadm_token }}
   when:
-    - not upgrade_cluster_setup
-    - kubeadm_already_run.stat.exists
+    - inventory_hostname == groups['kube-master']|first
+    - kubeadm_token is defined
+  tags:
+    - kubeadm_token
 
-- name: kubeadm | Init other uninitialized masters
-  command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
-  register: kubeadm_init
-  retries: 10
-  until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
-  when: inventory_hostname != groups['kube-master']|first and not kubeadm_already_run.stat.exists
-  failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
-  environment:
-    PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
-  notify: Master | restart kubelet
+- name: kubeadm | Initialize other masters (experimental control plane)
+  include: kubeadm-secondary-experimental.yml
+  when: kubeadm_control_plane
 
-- name: kubeadm | upgrage kubernetes cluster
+- name: kubeadm | Initialize other masters (experimental control plane)
+  include: kubeadm-secondary-legacy.yml
+  when: not kubeadm_control_plane
+
+- name: kubeadm | upgrade kubernetes cluster
   import_tasks: kubeadm-upgrade.yml
   when: upgrade_cluster_setup
 
-- name: kubeadm | Check apiserver key again
+- name: kubeadm | Check serviceaccount key again
   stat:
-    path: "{{ kube_cert_dir }}/apiserver.key"
-  register: apiserver_key_after
-  delegate_to: "{{groups['kube-master']|first}}"
+    path: "{{ kube_cert_dir }}/sa.key"
+  register: sa_key_after
   run_once: true
 
 - name: kubeadm | Set secret_changed if service account key was updated
   command: /bin/true
   notify: Master | set secret_changed
-  when: apiserver_key_before.stat.checksum|default("") != apiserver_key_after.stat.checksum
+  when: sa_key_before.stat.checksum|default("") != sa_key_after.stat.checksum
 
 - name: kubeadm | cleanup old certs if necessary
   import_tasks: kubeadm-cleanup-old-certs.yml
   when:
     - old_apiserver_cert.stat.exists
 
+# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
 - name: kubeadm | Remove taint for master with node role
   command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"
   delegate_to: "{{groups['kube-master']|first}}"
diff --git a/roles/kubernetes/master/tasks/kubeadm-upgrade.yml b/roles/kubernetes/master/tasks/kubeadm-upgrade.yml
index 7b74c85c3..5eaefbe3e 100644
--- a/roles/kubernetes/master/tasks/kubeadm-upgrade.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-upgrade.yml
@@ -17,6 +17,8 @@
   failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
   notify: Master | restart kubelet
 
+# FIXME: https://github.com/kubernetes/kubeadm/issues/1498 remove stdout_lines
+# check after issue is fixed
 - name: kubeadm | Upgrade other masters
   command: >-
     timeout -k 600s 600s
@@ -29,5 +31,8 @@
     --etcd-upgrade=false
   register: kubeadm_upgrade
   when: inventory_hostname != groups['kube-master']|first
-  failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
+  failed_when:
+    - kubeadm_upgrade.rc != 0
+    - '"field is immutable" not in kubeadm_upgrade.stderr'
+    - kubeadm_upgrade.stdout_lines | length > 1
   notify: Master | restart kubelet
diff --git a/roles/kubernetes/master/tasks/kubeadm-version.yml b/roles/kubernetes/master/tasks/kubeadm-version.yml
index 971e7930f..f61657f38 100644
--- a/roles/kubernetes/master/tasks/kubeadm-version.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-version.yml
@@ -3,11 +3,6 @@
   command: "{{ bin_dir }}/kubeadm version -o short"
   register: kubeadm_output
 
-- name: sets kubeadm api version to v1alpha1
-  set_fact:
-    kubeadmConfig_api_version: v1alpha1
-  when: kubeadm_output.stdout is version('v1.11.0', '<')
-
 - name: sets kubeadm api version to v1alpha2
   set_fact:
     kubeadmConfig_api_version: v1alpha2
diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml
index 7a36ebc89..371f03847 100644
--- a/roles/kubernetes/master/tasks/pre-upgrade.yml
+++ b/roles/kubernetes/master/tasks/pre-upgrade.yml
@@ -3,8 +3,8 @@
   command: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} ls /registry/minions"
   environment:
     ETCDCTL_API: 2
-    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
-    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
+    ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}"
+    ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/{{ kube_etcd_key_file }}"
   register: old_data_exists
   delegate_to: "{{groups['etcd'][0]}}"
   changed_when: false
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
deleted file mode 100644
index ec157e980..000000000
--- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
+++ /dev/null
@@ -1,204 +0,0 @@
-apiVersion: kubeadm.k8s.io/v1alpha1
-kind: MasterConfiguration
-api:
-{% if kubeadm_config_api_fqdn is defined %}
-  controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}
-  bindPort: {{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
-{% else %}
-  advertiseAddress: {{ ip | default(fallback_ips[inventory_hostname]) }}
-  bindPort: {{ kube_apiserver_port }}
-{% endif %}
-etcd:
-  endpoints:
-{% for endpoint in etcd_access_addresses.split(',') %}
-  - {{ endpoint }}
-{% endfor %}
-  caFile: {{ etcd_cert_dir }}/ca.pem
-  certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
-  keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
-networking:
-  dnsDomain: {{ dns_domain }}
-  serviceSubnet: {{ kube_service_addresses }}
-  podSubnet: {{ kube_pods_subnet }}
-kubernetesVersion: {{ kube_version }}
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
-cloudProvider: {{cloud_provider}}
-cloudConfig: {{ kube_config_dir }}/cloud_config
-{% elif cloud_provider is defined and cloud_provider in ["external"] %}
-cloudConfig: {{ kube_config_dir }}/cloud_config
-{% endif %}
-{% if kube_proxy_mode == 'ipvs' %}
-kubeProxy:
-  config:
-{% if kube_version is version('v1.10', '<') %}
-    featureGates: SupportIPVSProxyMode=true
-{% endif %}
-{% if kube_version is version('v1.10', '>=') %}
-    featureGates:
-      SupportIPVSProxyMode: true
-{% endif %}
-    mode: ipvs
-{% endif %}
-{% if kube_proxy_nodeport_addresses %}
-    nodePortAddresses: {{ kube_proxy_nodeport_addresses }}
-{% endif %}
-resourceContainer: ""
-authorizationModes:
-{% for mode in authorization_modes %}
-- {{ mode }}
-{% endfor %}
-selfHosted: false
-apiServerExtraArgs:
-  bind-address: {{ kube_apiserver_bind_address }}
-{% if kube_apiserver_insecure_port|string != "0" %}
-  insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
-{% endif %}
-  insecure-port: "{{ kube_apiserver_insecure_port }}"
-{% if kube_version is version('v1.10', '<') %}
-  admission-control: {{ kube_apiserver_admission_control | join(',') }}
-{% else %}
-{% if kube_apiserver_enable_admission_plugins|length > 0 %}
-  enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
-{% endif %}
-{% if kube_apiserver_disable_admission_plugins|length > 0 %}
-  disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }}
-{% endif %}
-{% endif %}
-  apiserver-count: "{{ kube_apiserver_count }}"
-{% if kube_version is version('v1.9', '>=') %}
-  endpoint-reconciler-type: lease
-{% endif %}
-{% if etcd_events_cluster_enabled %}
-  etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
-{% endif %}
-  service-node-port-range: {{ kube_apiserver_node_port_range }}
-  kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
-  profiling: "{{ kube_profiling }}"
-  request-timeout: "{{ kube_apiserver_request_timeout }}"
-  repair-malformed-updates: "false"
-  enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
-{% if kube_api_anonymous_auth is defined and kube_version is version('v1.5', '>=')  %}
-  anonymous-auth: "{{ kube_api_anonymous_auth }}"
-{% endif %}
-{% if kube_basic_auth|default(true) %}
-  basic-auth-file: {{ kube_users_dir }}/known_users.csv
-{% endif %}
-{% if kube_token_auth|default(true) %}
-  token-auth-file: {{ kube_token_dir }}/known_tokens.csv
-{% endif %}
-{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
-  oidc-issuer-url: {{ kube_oidc_url }}
-  oidc-client-id: {{ kube_oidc_client_id }}
-{%   if kube_oidc_ca_file is defined %}
-  oidc-ca-file: {{ kube_oidc_ca_file }}
-{%   endif %}
-{%   if kube_oidc_username_claim is defined %}
-  oidc-username-claim: {{ kube_oidc_username_claim }}
-{%   endif %}
-{%   if kube_oidc_groups_claim is defined %}
-  oidc-groups-claim: {{ kube_oidc_groups_claim }}
-{%   endif %}
-{%   if kube_oidc_username_prefix is defined %}
-  oidc-username-prefix: "{{ kube_oidc_username_prefix }}"
-{%   endif %}
-{%   if kube_oidc_groups_prefix is defined %}
-  oidc-groups-prefix: "{{ kube_oidc_groups_prefix }}"
-{%   endif %}
-{% endif %}
-{% if kube_webhook_token_auth|default(false) %}
-  authentication-token-webhook-config-file: {{ kube_config_dir }}/webhook-token-auth-config.yaml
-{% endif %}
-{% if kube_encrypt_secret_data %}
-  experimental-encryption-provider-config: {{ kube_cert_dir }}/secrets_encryption.yaml
-{% endif %}
-  storage-backend: {{ kube_apiserver_storage_backend }}
-{% if kube_api_runtime_config is defined %}
-  runtime-config: {{ kube_api_runtime_config | join(',') }}
-{% endif %}
-  allow-privileged: "true"
-{% for key in kube_kubeadm_apiserver_extra_args %}
-  {{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}"
-{% endfor %}
-{% if kube_feature_gates %}
-  feature-gates: {{ kube_feature_gates|join(',') }}
-{% endif %}
-{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %}
-  configure-cloud-routes: "true"
-{% endif %}
-controllerManagerExtraArgs:
-  node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
-  node-monitor-period: {{ kube_controller_node_monitor_period }}
-  pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }}
-  node-cidr-mask-size: "{{ kube_network_node_prefix }}"
-  profiling: "{{ kube_profiling }}"
-  terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}"
-{% if kube_feature_gates %}
-  feature-gates: {{ kube_feature_gates|join(',') }}
-{% endif %}
-{% for key in kube_kubeadm_controller_extra_args %}
-  {{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
-{% endfor %}
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "external"] %}
-controllerManagerExtraVolumes:
-{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined and openstack_cacert != "" %}
-- name: openstackcacert
-  hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
-  mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
-{% endif %}
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "external"] %}
-- name: cloud-config
-  hostPath: {{ kube_config_dir }}/cloud_config
-  mountPath: {{ kube_config_dir }}/cloud_config
-{% endif %}
-{% endif %}
-schedulerExtraArgs:
-  profiling: "{{ kube_profiling }}"
-{% if kube_feature_gates %}
-  feature-gates: {{ kube_feature_gates|join(',') }}
-{% endif %}
-{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
-{% for key in kube_kubeadm_scheduler_extra_args %}
-  {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
-{% endfor %}
-{% endif %}
-{% if kube_basic_auth|default(true) or kube_token_auth|default(true) or kube_webhook_token_auth|default(false) or ssl_ca_dirs|length %}
-apiServerExtraVolumes:
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "external"] %}
-- name: cloud-config
-  hostPath: {{ kube_config_dir }}/cloud_config
-  mountPath: {{ kube_config_dir }}/cloud_config
-{% endif %}
-{% if kube_basic_auth|default(true) %}
-- name: basic-auth-config
-  hostPath: {{ kube_users_dir }}
-  mountPath: {{ kube_users_dir }}
-{% endif %}
-{% if kube_token_auth|default(true) %}
-- name: token-auth-config
-  hostPath: {{ kube_token_dir }}
-  mountPath: {{ kube_token_dir }}
-{% endif %}
-{% if kube_webhook_token_auth|default(false) %}
-- name: webhook-token-auth-config
-  hostPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
-  mountPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
-{% endif %}
-{% if ssl_ca_dirs|length %}
-{% for dir in ssl_ca_dirs %}
-- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
-  hostPath: {{ dir }}
-  mountPath: {{ dir }}
-  writable: false
-{% endfor %}
-{% endif %}
-{% endif %}
-apiServerCertSANs:
-{% for san in apiserver_sans %}
-  - {{ san }}
-{% endfor %}
-certificatesDir: {{ kube_cert_dir }}
-imageRepository: {{ kube_image_repo }}
-unifiedControlPlaneImage: ""
-{% if kube_override_hostname|default('') %}
-nodeName: {{ kube_override_hostname }}
-{% endif %}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
index 6f9cd4458..8419c3d7f 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
@@ -14,9 +14,9 @@ etcd:
 {% for endpoint in etcd_access_addresses.split(',') %}
       - {{ endpoint }}
 {% endfor %}
-      caFile: {{ etcd_cert_dir }}/ca.pem
-      certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
-      keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
+      caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
+      certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
+      keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }}
 networking:
   dnsDomain: {{ dns_domain }}
   serviceSubnet: {{ kube_service_addresses }}
@@ -221,10 +221,12 @@ nodeRegistration:
 {% if kube_override_hostname|default('') %}
   name: {{ kube_override_hostname }}
 {% endif %}
-{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
+{% if inventory_hostname not in groups['kube-node'] %}
   taints:
   - effect: NoSchedule
     key: node-role.kubernetes.io/master
+{% else %}
+  taints: {}
 {% endif %}
 {% if container_manager == 'crio' %}
   criSocket: /var/run/crio/crio.sock
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2
index 6dc736651..01252f661 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2
@@ -7,10 +7,12 @@ nodeRegistration:
 {% if kube_override_hostname|default('') %}
   name: {{ kube_override_hostname }}
 {% endif %}
-{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
+{% if inventory_hostname not in groups['kube-node'] %}
   taints:
   - effect: NoSchedule
     key: node-role.kubernetes.io/master
+{% else %}
+  taints: {}
 {% endif %}
 {% if container_manager == 'crio' %}
   criSocket: /var/run/crio/crio.sock
@@ -29,9 +31,9 @@ etcd:
 {% for endpoint in etcd_access_addresses.split(',') %}
       - {{ endpoint }}
 {% endfor %}
-      caFile: {{ etcd_cert_dir }}/ca.pem
-      certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
-      keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
+      caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
+      certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
+      keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }}
 networking:
   dnsDomain: {{ dns_domain }}
   serviceSubnet: {{ kube_service_addresses }}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2
index c975d856b..c9341b592 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2
@@ -11,6 +11,8 @@ nodeRegistration:
   taints:
   - effect: NoSchedule
     key: node-role.kubernetes.io/master
+{% else %}
+  taints: []
 {% endif %}
 {% if container_manager == 'crio' %}
   criSocket: /var/run/crio/crio.sock
@@ -29,9 +31,9 @@ etcd:
 {% for endpoint in etcd_access_addresses.split(',') %}
       - {{ endpoint }}
 {% endfor %}
-      caFile: {{ etcd_cert_dir }}/ca.pem
-      certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
-      keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
+      caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
+      certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
+      keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }}
 networking:
   dnsDomain: {{ dns_domain }}
   serviceSubnet: {{ kube_service_addresses }}
diff --git a/roles/kubernetes/master/templates/kubeadm-controlplane.v1beta1.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-controlplane.v1beta1.yaml.j2
new file mode 100644
index 000000000..e9eb6134c
--- /dev/null
+++ b/roles/kubernetes/master/templates/kubeadm-controlplane.v1beta1.yaml.j2
@@ -0,0 +1,26 @@
+apiVersion: kubeadm.k8s.io/v1beta1
+kind: JoinConfiguration
+discovery:
+  bootstrapToken:
+{% if kubeadm_config_api_fqdn is defined %}
+    apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
+{% else %}
+    apiServerEndpoint: {{ kubeadm_discovery_address | replace("https://", "")}}
+{% endif %}
+    token: {{ kubeadm_token }}
+    unsafeSkipCAVerification: true
+  timeout: {{ discovery_timeout }}
+  tlsBootstrapToken: {{ kubeadm_token }}
+controlPlane:
+  localAPIEndpoint:
+    advertiseAddress: {{ kube_apiserver_address }}
+    bindPort: {{ kube_apiserver_port }}
+nodeRegistration:
+  name: {{ inventory_hostname  }}
+{% if container_manager == 'crio' %}
+  criSocket: /var/run/crio/crio.sock
+{% elif container_manager == 'rkt' %}
+  criSocket: /var/run/rkt.sock
+{% else %}
+  criSocket: /var/run/dockershim.sock
+{% endif %}
diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
index 3923df35d..98cf409ee 100644
--- a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
+++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
@@ -84,14 +84,6 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 
 {# Kubelet node labels #}
 {% set role_node_labels = [] %}
-{% if inventory_hostname in groups['kube-master'] %}
-{%   set dummy = role_node_labels.append("node-role.kubernetes.io/master=''") %}
-{%   if not standalone_kubelet|bool %}
-{%     set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
-{%   endif %}
-{% else %}
-{%   set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
-{% endif %}
 {% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
 {%   if inventory_hostname in nvidia_gpu_nodes %}
 {%     set dummy = role_node_labels.append('nvidia.com/gpu=true')  %}
diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml
index b6ed3944e..3eb9bcbf2 100644
--- a/roles/kubernetes/preinstall/defaults/main.yml
+++ b/roles/kubernetes/preinstall/defaults/main.yml
@@ -24,6 +24,8 @@ disable_ipv6_dns: false
 
 kube_cert_group: kube-cert
 kube_config_dir: /etc/kubernetes
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+kube_cert_compat_dir: /etc/kubernetes/pki
 
 # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
 # for hostnet pods and infra needs
diff --git a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
index 7f48ec42c..53c5408e1 100644
--- a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
+++ b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
@@ -23,6 +23,14 @@
     - "{{ kube_manifest_dir }}"
     - "{{ kube_script_dir }}"
 
+- name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498)
+  file:
+    src: "{{ kube_cert_dir }}"
+    dest: "{{ kube_cert_compat_dir }}"
+    state: link
+  when:
+    - kube_cert_dir != kube_cert_compat_dir
+
 - name: Create cni directories
   file:
     path: "{{ item }}"
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 5c91f022d..5bc91620b 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -12,7 +12,7 @@ is_atomic: false
 disable_swap: true
 
 ## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.13.5
+kube_version: v1.14.0
 
 ## Kube Proxy mode One of ['iptables','ipvs']
 kube_proxy_mode: ipvs
@@ -97,6 +97,9 @@ kube_manifest_dir: "{{ kube_config_dir }}/manifests"
 # This is where all the cert scripts and certs will be located
 kube_cert_dir: "{{ kube_config_dir }}/ssl"
 
+# compatibility directory for kubeadm
+kube_cert_compat_dir: "/etc/kubernetes/pki"
+
 # This is where all of the bearer tokens will be stored
 kube_token_dir: "{{ kube_config_dir }}/tokens"
 
@@ -335,6 +338,9 @@ kube_feature_gates: |-
   {{ feature_gate_v1_12 }}
   {%- endif %}
 
+# Enable kubeadm experimental control plane
+kubeadm_control_plane: false
+
 # Local volume provisioner storage classes
 # Levarages Ansibles string to Python datatype casting. Otherwise the dict_key isn't substituted
 # see https://github.com/ansible/ansible/issues/17324
@@ -383,7 +389,7 @@ no_proxy: >-
   {%- endif -%}
   {%- for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}
   {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }},
-  {%-   if item != hostvars[item].get('ansible_hostname', "") -%}
+  {%-   if item != hostvars[item].get('ansible_hostname', '') -%}
   {{ hostvars[item]['ansible_hostname'] }},
   {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
   {%-   endif -%}
diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml
index ae564b1f3..3ee4b1b29 100644
--- a/roles/network_plugin/calico/defaults/main.yml
+++ b/roles/network_plugin/calico/defaults/main.yml
@@ -61,3 +61,7 @@ calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostna
 
 ### do not enable this, this is detected in scope of tasks, this is just a default value
 calico_upgrade_needed: false
+
+kube_etcd_cacert_file: ca.pem
+kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
+kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
diff --git a/roles/network_plugin/calico/handlers/main.yml b/roles/network_plugin/calico/handlers/main.yml
index 05cc73289..64218c040 100644
--- a/roles/network_plugin/calico/handlers/main.yml
+++ b/roles/network_plugin/calico/handlers/main.yml
@@ -1,15 +1,14 @@
 ---
-- name: restart calico-node
+- name: reset_calico_cni
   command: /bin/true
   notify:
-    - Calico | reload systemd
-    - Calico | reload calico-node
+    - delete 10-calico.conflist
+    - delete calico-node containers
 
-- name: Calico | reload systemd
-  shell: systemctl daemon-reload
+- name: delete 10-calico.conflist
+  file:
+    path: /etc/calico/10-calico.conflist
+    state: absent
 
-- name: Calico | reload calico-node
-  service:
-    name: calico-node
-    state: restarted
-    sleep: 10
+- name: delete calico-node containers
+  shell: "docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty docker rm -f"
diff --git a/roles/network_plugin/calico/rr/defaults/main.yml b/roles/network_plugin/calico/rr/defaults/main.yml
index bdc2d9f10..4871f34a8 100644
--- a/roles/network_plugin/calico/rr/defaults/main.yml
+++ b/roles/network_plugin/calico/rr/defaults/main.yml
@@ -10,3 +10,7 @@ calico_rr_memory_limit: 1000M
 calico_rr_cpu_limit: 300m
 calico_rr_memory_requests: 128M
 calico_rr_cpu_requests: 150m
+
+kube_etcd_cacert_file: ca.pem
+kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
+kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml
index 1c41fdf7a..d7f02588c 100644
--- a/roles/network_plugin/calico/rr/tasks/main.yml
+++ b/roles/network_plugin/calico/rr/tasks/main.yml
@@ -22,9 +22,9 @@
     state: hard
     force: yes
   with_items:
-    - {s: "ca.pem", d: "ca_cert.crt"}
-    - {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"}
-    - {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"}
+    - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
+    - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
+    - {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
 
 - name: Calico-rr | Create dir for logs
   file:
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index ad19e5eaf..67b006349 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -11,6 +11,8 @@
     src: "cni-calico.conflist.j2"
     dest: "/etc/cni/net.d/{% if calico_version is version('v3.3.0', '>=') %}calico.conflist.template{% else %}10-calico.conflist{% endif %}"
     owner: kube
+  register: calico_conflist
+  notify: reset_calico_cni
 
 - name: Calico | Create calico certs directory
   file:
@@ -27,9 +29,9 @@
     state: hard
     force: yes
   with_items:
-    - {s: "ca.pem", d: "ca_cert.crt"}
-    - {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"}
-    - {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"}
+    - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
+    - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
+    - {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
 
 - name: Calico | Install calicoctl wrapper script
   template:
diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml
index 4781541bd..b843a92ad 100644
--- a/roles/network_plugin/calico/tasks/pre.yml
+++ b/roles/network_plugin/calico/tasks/pre.yml
@@ -13,4 +13,4 @@
   register: calico_kubelet_name
   delegate_to: "{{ groups['kube-master'][0] }}"
   when:
-    - "cloud_provider is defined"
\ No newline at end of file
+    - "cloud_provider is defined"
diff --git a/roles/network_plugin/calico/templates/etcdv2-store.yml.j2 b/roles/network_plugin/calico/templates/etcdv2-store.yml.j2
index c65728838..41b68d742 100644
--- a/roles/network_plugin/calico/templates/etcdv2-store.yml.j2
+++ b/roles/network_plugin/calico/templates/etcdv2-store.yml.j2
@@ -4,6 +4,6 @@ metadata:
 spec:
   datastoreType: "etcdv2"
   etcdEndpoints: "{{ etcd_access_addresses }}"
-  etcdKeyFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
-  etcdCertFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
-  etcdCACertFile: "{{ etcd_cert_dir }}/ca.pem"
+  etcdKeyFile: "{{ etcd_cert_dir }}/{{ kube_etcd_key_file }}"
+  etcdCertFile: "{{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}"
+  etcdCACertFile: "{{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}"
diff --git a/roles/network_plugin/calico/templates/etcdv3-store.yml.j2 b/roles/network_plugin/calico/templates/etcdv3-store.yml.j2
index 3dc566d0f..17ee75511 100644
--- a/roles/network_plugin/calico/templates/etcdv3-store.yml.j2
+++ b/roles/network_plugin/calico/templates/etcdv3-store.yml.j2
@@ -4,6 +4,6 @@ metadata:
 spec:
   datastoreType: "etcdv3"
   etcdEndpoints: "{{ etcd_access_addresses }}"
-  etcdKeyFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
-  etcdCertFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
-  etcdCACertFile: "{{ etcd_cert_dir }}/ca.pem"
\ No newline at end of file
+  etcdKeyFile: "{{ etcd_cert_dir }}/{{ kube_etcd_key_file }}"
+  etcdCertFile: "{{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}"
+  etcdCACertFile: "{{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}"
diff --git a/roles/network_plugin/canal/defaults/main.yml b/roles/network_plugin/canal/defaults/main.yml
index 0be0f14fc..bed66333e 100644
--- a/roles/network_plugin/canal/defaults/main.yml
+++ b/roles/network_plugin/canal/defaults/main.yml
@@ -30,3 +30,8 @@ calicoctl_memory_limit: 170M
 calicoctl_cpu_limit: 100m
 calicoctl_memory_requests: 32M
 calicoctl_cpu_requests: 25m
+
+# etcd cert filenames
+kube_etcd_cacert_file: ca.pem
+kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
+kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml
index f51da3bb5..acf0d3567 100644
--- a/roles/network_plugin/canal/tasks/main.yml
+++ b/roles/network_plugin/canal/tasks/main.yml
@@ -20,9 +20,9 @@
     state: hard
     force: yes
   with_items:
-    - {s: "ca.pem", d: "ca_cert.crt"}
-    - {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"}
-    - {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"}
+    - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
+    - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
+    - {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
 
 - name: Canal | Set Flannel etcd configuration
   command: |-
diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml
index e97364644..95cfdfcf2 100755
--- a/roles/network_plugin/cilium/defaults/main.yml
+++ b/roles/network_plugin/cilium/defaults/main.yml
@@ -5,6 +5,9 @@ cilium_disable_ipv4: false
 
 # Etcd SSL dirs
 cilium_cert_dir: /etc/cilium/certs
+kube_etcd_cacert_file: ca.pem
+kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
+kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
 
 # Cilium Network Policy directory
 cilium_policy_dir: /etc/kubernetes/policy
diff --git a/roles/network_plugin/cilium/handlers/main.yml b/roles/network_plugin/cilium/handlers/main.yml
index 039c3469f..00525b995 100644
--- a/roles/network_plugin/cilium/handlers/main.yml
+++ b/roles/network_plugin/cilium/handlers/main.yml
@@ -11,4 +11,4 @@
 - name: Kubelet | reload kubelet
   service:
     name: kubelet
-    state: restarted
\ No newline at end of file
+    state: restarted
diff --git a/roles/network_plugin/cilium/tasks/main.yml b/roles/network_plugin/cilium/tasks/main.yml
index 6c55be663..44ab4ae57 100755
--- a/roles/network_plugin/cilium/tasks/main.yml
+++ b/roles/network_plugin/cilium/tasks/main.yml
@@ -21,9 +21,9 @@
     state: hard
     force: yes
   with_items:
-    - {s: "ca.pem", d: "ca_cert.crt"}
-    - {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"}
-    - {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"}
+    - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
+    - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
+    - {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
 
 - name: Cilium | Create Cilium node manifests
   template:
diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml
index b4984c636..7a2cfb640 100644
--- a/tests/files/gce_centos7-flannel-addons.yml
+++ b/tests/files/gce_centos7-flannel-addons.yml
@@ -6,6 +6,8 @@ cloud_machine_type: "n1-standard-2"
 mode: ha
 
 # Deployment settings
+kubeadm_control_plane: true
+kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085
 kube_network_plugin: flannel
 helm_enabled: true
 kubernetes_audit: true
-- 
GitLab