From 225f765b56d21df202a2b932f73b71854527dad9 Mon Sep 17 00:00:00 2001
From: Rong Zhang <rongzhang@alauda.io>
Date: Fri, 7 Dec 2018 04:11:48 +0800
Subject: [PATCH] Upgrade kubernetes to v1.13.0 (#3810)

* Upgrade kubernetes to v1.13.0

* Remove all precense of scheduler.alpha.kubernetes.io/critical-pod in templates

* Fix cert dir

* Use kubespray v2.8 as baseline for gitlab
---
 .gitlab-ci.yml                                |   6 +-
 README.md                                     |   2 +-
 .../group_vars/k8s-cluster/k8s-cluster.yml    |   2 +-
 .../templates/dnsmasq-autoscaler.yml.j2       |   1 -
 roles/download/defaults/main.yml              |   4 +-
 .../ansible/templates/dns-autoscaler.yml.j2   |   1 -
 .../ansible/templates/kubedns-deploy.yml.j2   |   1 -
 .../k8s-device-plugin-nvidia-daemonset.yml.j2 |   2 -
 .../nvidia-driver-install-daemonset.yml.j2    |   2 -
 .../metrics-server-deployment.yaml.j2         |   1 -
 .../templates/calico-kube-controllers.yml.j2  |   2 -
 roles/kubernetes/kubeadm/tasks/main.yml       |   9 +-
 .../templates/kubeadm-client.conf.v1alpha1.j2 |   2 +-
 .../templates/kubeadm-client.conf.v1alpha2.j2 |   2 +-
 .../templates/kubeadm-client.conf.v1alpha3.j2 |   2 +-
 .../templates/kubeadm-client.conf.v1beta1.j2  |  27 ++
 .../kubernetes/master/tasks/kubeadm-setup.yml |  18 +-
 .../templates/kubeadm-config.v1alpha1.yaml.j2 |   7 +-
 .../templates/kubeadm-config.v1alpha2.yaml.j2 |   7 +-
 .../templates/kubeadm-config.v1alpha3.yaml.j2 |   7 +-
 .../templates/kubeadm-config.v1beta1.yaml.j2  | 258 ++++++++++++++++++
 roles/kubespray-defaults/defaults/main.yaml   |   2 +-
 .../calico/templates/calico-node.yml.j2       |   1 -
 .../canal/templates/canal-node.yaml.j2        |   3 -
 .../cilium/templates/cilium-ds.yml.j2         |   6 -
 .../contiv/templates/contiv-api-proxy.yml.j2  |   3 -
 .../contiv/templates/contiv-cleanup.yml.j2    |   3 -
 .../contiv/templates/contiv-etcd-proxy.yml.j2 |   2 -
 .../contiv/templates/contiv-etcd.yml.j2       |   2 -
 .../contiv/templates/contiv-netmaster.yml.j2  |   3 -
 .../contiv/templates/contiv-netplugin.yml.j2  |   3 -
 .../contiv/templates/contiv-ovs.yml.j2        |   3 -
 .../flannel/templates/cni-flannel.yml.j2      |   3 -
 .../kube-router/templates/kube-router.yml.j2  |   2 -
 .../weave/templates/weave-net.yml.j2          |   3 -
 35 files changed, 325 insertions(+), 77 deletions(-)
 create mode 100644 roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta1.j2
 create mode 100644 roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 845f28ca1..21abfb629 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -41,7 +41,7 @@ before_script:
   tags:
     - kubernetes
     - docker
-  image: quay.io/kubespray/kubespray:v2.7
+  image: quay.io/kubespray/kubespray:v2.8
 
 .docker_service: &docker_service
   services:
@@ -88,11 +88,11 @@ before_script:
     - echo ${PWD}
     - echo "${STARTUP_SCRIPT}"
     - cd tests && make create-${CI_PLATFORM} -s ; cd -
-    #- git fetch --all && git checkout v2.7.0
 
     # Check out latest tag if testing upgrade
     # Uncomment when gitlab kubespray repo has tags
-    - test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
+    #- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
+    - test "${UPGRADE_TEST}" != "false" && git checkout 9051aa5296ef76fcff69a2e3827cef28752aa475
     # Checkout the CI vars file so it is available
     - test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
     # Workaround https://github.com/kubernetes-sigs/kubespray/issues/2021
diff --git a/README.md b/README.md
index 1138419b2..67d98bd58 100644
--- a/README.md
+++ b/README.md
@@ -111,7 +111,7 @@ Supported Components
 --------------------
 
 -   Core
-    -   [kubernetes](https://github.com/kubernetes/kubernetes) v1.12.3
+    -   [kubernetes](https://github.com/kubernetes/kubernetes) v1.13.0
     -   [etcd](https://github.com/coreos/etcd) v3.2.24
     -   [docker](https://www.docker.com/) v18.06 (see note)
     -   [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
index ffd158859..e1a800327 100644
--- a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
@@ -19,7 +19,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
 kube_api_anonymous_auth: true
 
 ## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.12.3
+kube_version: v1.13.0
 
 # kubernetes image repo define
 kube_image_repo: "gcr.io/google-containers"
diff --git a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2 b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2
index ec7e43fdb..1b81f4c48 100644
--- a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2
+++ b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2
@@ -28,7 +28,6 @@ spec:
       labels:
         k8s-app: dnsmasq-autoscaler
       annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
         scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index 3bc9e0134..8dd6ae576 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -35,7 +35,7 @@ download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube
 image_arch: "{{host_architecture | default('amd64')}}"
 
 # Versions
-kube_version: v1.12.3
+kube_version: v1.13.0
 kubeadm_version: "{{ kube_version }}"
 etcd_version: v3.2.24
 
@@ -70,6 +70,7 @@ cni_download_url: "https://github.com/containernetworking/plugins/releases/downl
 
 # Checksums
 hyperkube_checksums:
+  v1.13.0: 754f1baae5dc2ba29afc66e1f5d3b676ee59cd5c40ccce813092408d53bde3d9
   v1.12.3: 600aad3f0d016716abd85931239806193ffbe95f2edfdcea11532d518ae5cdb1
   v1.12.2: 566dfed398c20c9944f8999d6370cb584cb8c228b3c5881137b6b3d9306e4b06
   v1.12.1: 4aa23cfb2fc2e2e4d0cbe0d83a648c38e4baabd6c66f5cdbbb40cbc7582fdc74
@@ -88,6 +89,7 @@ hyperkube_checksums:
   v1.10.1: 6e0642ad6bae68dc81b8d1c9efa18e265e17e23da1895862823cafac08c0344c
   v1.10.0: b5575b2fb4266754c1675b8cd5d9b6cac70f3fee7a05c4e80da3a9e83e58c57e
 kubeadm_checksums:
+  v1.13.0: f5366206416dc4cfc840a7add2289957b56ccc479cc1b74f7397a4df995d6b06
   v1.12.3: c675aa3be82754b3f8dfdde2a1526a72986713312d46d898e65cb564c6aa8ad4
   v1.12.2: 51bc4bfd1d934a27245111c0ad1f793d5147ed15389415a1509502f23fcfa642
   v1.12.1: 5d95efd65aad398d85a9802799f36410ae7a95f9cbe73c8b10d2213c10a6d7be
diff --git a/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2
index 2d930c6e3..20c86550d 100644
--- a/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2
@@ -31,7 +31,6 @@ spec:
       labels:
         k8s-app: dns-autoscaler{{ coredns_ordinal_suffix | default('') }}
       annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
         seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
index fa2507115..a095f1625 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
@@ -25,7 +25,6 @@ spec:
       labels:
         k8s-app: kube-dns
       annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
         seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2 b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2
index 84f440442..5fd84012c 100644
--- a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/k8s-device-plugin-nvidia-daemonset.yml.j2
@@ -14,8 +14,6 @@ spec:
     metadata:
       labels:
         k8s-app: nvidia-gpu-device-plugin
-      annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
       priorityClassName: system-node-critical
       affinity:
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2 b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2
index a1adede5a..5dda82189 100644
--- a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/templates/nvidia-driver-install-daemonset.yml.j2
@@ -22,8 +22,6 @@ spec:
     metadata:
       labels:
         name: nvidia-driver-installer
-      annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
       priorityClassName: system-node-critical
       affinity:
diff --git a/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 b/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2
index 6cb51d025..245c3beab 100644
--- a/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2
+++ b/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2
@@ -21,7 +21,6 @@ spec:
         app.kubernetes.io/name: metrics-server
         version: {{ metrics_server_version }}
       annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
         seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
index dcb7c9f5f..1eb8e1d4d 100644
--- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
+++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
@@ -6,8 +6,6 @@ metadata:
   labels:
     k8s-app: calico-kube-controllers
     kubernetes.io/cluster-service: "true"
-  annotations:
-    scheduler.alpha.kubernetes.io/critical-pod: ''
 spec:
   replicas: 1
   strategy:
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index 29b866a97..c2035859d 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -46,7 +46,14 @@
 - name: sets kubeadm api version to v1alpha3
   set_fact:
     kubeadmConfig_api_version: v1alpha3
-  when: kubeadm_output.stdout is version('v1.12.0', '>=')
+  when:
+    - kubeadm_output.stdout is version('v1.12.0', '>=')
+    - kubeadm_output.stdout is version('v1.13.0', '<')
+
+- name: sets kubeadm api version to v1beta1
+  set_fact:
+    kubeadmConfig_api_version: v1beta1
+  when: kubeadm_output.stdout is version('v1.13.0', '>=')
 
 - name: Create kubeadm client config
   template:
diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha1.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha1.j2
index fe9f45b2f..6a40ab03e 100644
--- a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha1.j2
+++ b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha1.j2
@@ -1,6 +1,6 @@
 apiVersion: kubeadm.k8s.io/v1alpha1
 kind: NodeConfiguration
-caCertPath: {{ kube_config_dir }}/ssl/ca.crt
+caCertPath: {{ kube_cert_dir }}/ca.crt
 token: {{ kubeadm_token }}
 discoveryTokenAPIServers:
 {% if groups['kube-master'] | length > 1 and kubeadm_config_api_fqdn is defined %}
diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha2.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha2.j2
index eebcdf7c0..b5d8365d7 100644
--- a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha2.j2
+++ b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha2.j2
@@ -2,7 +2,7 @@ apiVersion: kubeadm.k8s.io/v1alpha2
 kind: NodeConfiguration
 clusterName: {{ cluster_name }}
 discoveryFile: ""
-caCertPath: {{ kube_config_dir }}/ssl/ca.crt
+caCertPath: {{ kube_cert_dir }}/ca.crt
 discoveryTimeout: {{ discovery_timeout }}
 discoveryToken: {{ kubeadm_token }}
 tlsBootstrapToken: {{ kubeadm_token }}
diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha3.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha3.j2
index a1e0887e0..ff5f2c0b4 100644
--- a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha3.j2
+++ b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1alpha3.j2
@@ -2,7 +2,7 @@ apiVersion: kubeadm.k8s.io/v1alpha3
 kind: JoinConfiguration
 clusterName: {{ cluster_name }}
 discoveryFile: ""
-caCertPath: {{ kube_config_dir }}/ssl/ca.crt
+caCertPath: {{ kube_cert_dir }}/ca.crt
 discoveryTimeout: {{ discovery_timeout }}
 discoveryToken: {{ kubeadm_token }}
 tlsBootstrapToken: {{ kubeadm_token }}
diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta1.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta1.j2
new file mode 100644
index 000000000..2eb0cf368
--- /dev/null
+++ b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta1.j2
@@ -0,0 +1,27 @@
+apiVersion: kubeadm.k8s.io/v1beta1
+kind: JoinConfiguration
+discovery:
+  bootstrapToken:
+{% if groups['kube-master'] | length > 1 and kubeadm_config_api_fqdn is defined %}
+    apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
+{% else %}
+    apiServerEndpoint: {{ kubeadm_discovery_address | replace("https://", "")}}
+{% endif %}
+    token: {{ kubeadm_token }}
+    unsafeSkipCAVerification: true
+  timeout: {{ discovery_timeout }}
+  tlsBootstrapToken: {{ kubeadm_token }}
+{% if groups['kube-master'] | length > 1 and kubeadm_config_api_fqdn is defined %}
+controlPlane:
+  localAPIEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
+{% endif %}
+caCertPath: {{ kube_cert_dir }}/ca.crt
+nodeRegistration:
+  name: {{ inventory_hostname  }}
+{% if container_manager == 'crio' %}
+  criSocket: /var/run/crio/crio.sock
+{% elif container_manager == 'rkt' %}
+  criSocket: /var/run/rkt.sock
+{% else %}
+  criSocket: /var/run/dockershim.sock
+{% endif %}
diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml
index c826d9c71..1ed271636 100644
--- a/roles/kubernetes/master/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml
@@ -103,7 +103,14 @@
 - name: sets kubeadm api version to v1alpha3
   set_fact:
     kubeadmConfig_api_version: v1alpha3
-  when: kubeadm_output.stdout is version('v1.12.0', '>=')
+  when:
+    - kubeadm_output.stdout is version('v1.12.0', '>=')
+    - kubeadm_output.stdout is version('v1.13.0', '<')
+
+- name: sets kubeadm api version to v1beta1
+  set_fact:
+    kubeadmConfig_api_version: v1beta1
+  when: kubeadm_output.stdout is version('v1.13.0', '>=')
 
 # Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
 - name: set kubeadm_config_api_fqdn define
@@ -144,15 +151,6 @@
   failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
   notify: Master | restart kubelet
 
-# FIXME(mattymo): remove when https://github.com/kubernetes/kubeadm/issues/433 is fixed
-- name: kubeadm | Enable kube-proxy
-  command: "{{ bin_dir }}/kubeadm alpha phase addon kube-proxy --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml"
-  register: kubeadm_kube_proxy_enable
-  retries: 10
-  until: kubeadm_kube_proxy_enable is succeeded
-  when: inventory_hostname == groups['kube-master']|first
-  changed_when: false
-
 - name: slurp kubeadm certs
   slurp:
     src: "{{ item }}"
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
index ee780cd5e..f2ad127c7 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2
@@ -13,9 +13,9 @@ etcd:
 {% for endpoint in etcd_access_addresses.split(',') %}
   - {{ endpoint }}
 {% endfor %}
-  caFile: {{ kube_config_dir }}/ssl/etcd/ca.pem
-  certFile: {{ kube_config_dir }}/ssl/etcd/node-{{ inventory_hostname }}.pem
-  keyFile: {{ kube_config_dir }}/ssl/etcd/node-{{ inventory_hostname }}-key.pem
+  caFile: {{ etcd_cert_dir }}/ca.pem
+  certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
+  keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
 networking:
   dnsDomain: {{ dns_domain }}
   serviceSubnet: {{ kube_service_addresses }}
@@ -69,6 +69,7 @@ apiServerExtraArgs:
 {% if kube_version is version('v1.9', '>=') %}
   endpoint-reconciler-type: lease
 {% endif %}
+  storage-backend: etcd3
 {% if etcd_events_cluster_enabled %}
   etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
 {% endif %}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
index 87e5a961e..3385d2892 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2
@@ -14,9 +14,9 @@ etcd:
 {% for endpoint in etcd_access_addresses.split(',') %}
       - {{ endpoint }}
 {% endfor %}
-      caFile: {{ kube_config_dir }}/ssl/etcd/ca.pem
-      certFile: {{ kube_config_dir }}/ssl/etcd/node-{{ inventory_hostname }}.pem
-      keyFile: {{ kube_config_dir }}/ssl/etcd/node-{{ inventory_hostname }}-key.pem
+      caFile: {{ etcd_cert_dir }}/ca.pem
+      certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
+      keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
 networking:
   dnsDomain: {{ dns_domain }}
   serviceSubnet: {{ kube_service_addresses }}
@@ -54,6 +54,7 @@ apiServerExtraArgs:
 {% if kube_version is version('v1.9', '>=') %}
   endpoint-reconciler-type: lease
 {% endif %}
+  storage-backend: etcd3
 {% if etcd_events_cluster_enabled %}
   etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
 {% endif %}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2
index 13053ae0b..d6f77ff7f 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2
@@ -29,9 +29,9 @@ etcd:
 {% for endpoint in etcd_access_addresses.split(',') %}
       - {{ endpoint }}
 {% endfor %}
-      caFile: {{ kube_config_dir }}/ssl/etcd/ca.pem
-      certFile: {{ kube_config_dir }}/ssl/etcd/node-{{ inventory_hostname }}.pem
-      keyFile: {{ kube_config_dir }}/ssl/etcd/node-{{ inventory_hostname }}-key.pem
+      caFile: {{ etcd_cert_dir }}/ca.pem
+      certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
+      keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
 networking:
   dnsDomain: {{ dns_domain }}
   serviceSubnet: {{ kube_service_addresses }}
@@ -71,6 +71,7 @@ apiServerExtraArgs:
 {% if kube_version is version('v1.9', '>=') %}
   endpoint-reconciler-type: lease
 {% endif %}
+  storage-backend: etcd3
 {% if etcd_events_cluster_enabled %}
   etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
 {% endif %}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2
new file mode 100644
index 000000000..366cbee23
--- /dev/null
+++ b/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2
@@ -0,0 +1,258 @@
+apiVersion: kubeadm.k8s.io/v1beta1
+kind: InitConfiguration
+localAPIEndpoint:
+  advertiseAddress: {{ ip | default(ansible_default_ipv4.address) }}
+  bindPort: {{ kube_apiserver_port }}
+nodeRegistration:
+{% if kube_override_hostname|default('') %}
+  name: {{ kube_override_hostname }}
+{% endif %}
+{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
+  taints:
+  - key: "kubeadmNode"
+    value: "master"
+    effect: "NoSchedule"
+{% endif %}
+{% if container_manager == 'crio' %}
+  criSocket: /var/run/crio/crio.sock
+{% elif container_manager == 'rkt' %}
+  criSocket: /var/run/rkt.sock
+{% else %}
+  criSocket: /var/run/dockershim.sock
+{% endif %}
+---
+apiVersion: kubeadm.k8s.io/v1beta1
+kind: ClusterConfiguration
+clusterName: {{ cluster_name }}
+etcd:
+  external:
+      endpoints:
+{% for endpoint in etcd_access_addresses.split(',') %}
+      - {{ endpoint }}
+{% endfor %}
+      caFile: {{ etcd_cert_dir }}/ca.pem
+      certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
+      keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
+networking:
+  dnsDomain: {{ dns_domain }}
+  serviceSubnet: {{ kube_service_addresses }}
+  podSubnet: {{ kube_pods_subnet }}
+  podNetworkCidr: "{{ kube_network_node_prefix }}"
+kubernetesVersion: {{ kube_version }}
+{% if groups['kube-master'] | length > 1 and kubeadm_config_api_fqdn is defined %}
+controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
+{% else %}
+controlPlaneEndpoint: {{ ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port }}
+{% endif %}
+certificatesDir: {{ kube_cert_dir }}
+imageRepository: {{ kube_image_repo }}
+UseHyperKubeImage: false
+apiServer:
+  extraArgs:
+    authorization-mode: {{ authorization_modes | join(',') }}
+    bind-address: {{ kube_apiserver_bind_address }}
+{% if kube_apiserver_insecure_port|string != "0" %}
+    insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
+{% endif %}
+    insecure-port: "{{ kube_apiserver_insecure_port }}"
+{% if kube_version is version('v1.10', '<') %}
+    admission-control: {{ kube_apiserver_admission_control | join(',') }}
+{% else %}
+{% if kube_apiserver_enable_admission_plugins|length > 0 %}
+    enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
+{% endif %}
+{% if kube_apiserver_disable_admission_plugins|length > 0 %}
+    disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }}
+{% endif %}
+{% endif %}
+    apiserver-count: "{{ kube_apiserver_count }}"
+{% if kube_version is version('v1.9', '>=') %}
+    endpoint-reconciler-type: lease
+{% endif %}
+    storage-backend: etcd3
+{% if etcd_events_cluster_enabled %}
+    etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
+{% endif %}
+    service-node-port-range: {{ kube_apiserver_node_port_range }}
+    kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
+{% if kube_basic_auth|default(true) %}
+    basic-auth-file: {{ kube_users_dir }}/known_users.csv
+{% endif %}
+{% if kube_token_auth|default(true) %}
+    token-auth-file: {{ kube_token_dir }}/known_tokens.csv
+{% endif %}
+{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
+    oidc-issuer-url: {{ kube_oidc_url }}
+    oidc-client-id: {{ kube_oidc_client_id }}
+{%   if kube_oidc_ca_file is defined %}
+    oidc-ca-file: {{ kube_oidc_ca_file }}
+{%   endif %}
+{%   if kube_oidc_username_claim is defined %}
+    oidc-username-claim: {{ kube_oidc_username_claim }}
+{%   endif %}
+{%   if kube_oidc_groups_claim is defined %}
+    oidc-groups-claim: {{ kube_oidc_groups_claim }}
+{%   endif %}
+{% endif %}
+{% if kube_encrypt_secret_data %}
+    encryption-provider-config: {{ kube_config_dir }}/ssl/secrets_encryption.yaml
+{% endif %}
+    storage-backend: {{ kube_apiserver_storage_backend }}
+{% if kube_api_runtime_config is defined %}
+    runtime-config: {{ kube_api_runtime_config | join(',') }}
+{% endif %}
+    allow-privileged: "true"
+{% if kubernetes_audit %}
+    audit-log-path: "{{ audit_log_path }}"
+    audit-log-maxage: "{{ audit_log_maxage }}"
+    audit-log-maxbackup: "{{ audit_log_maxbackups }}"
+    audit-log-maxsize: "{{ audit_log_maxsize }}"
+    audit-policy-file: {{ audit_policy_file }}
+{% endif %}
+{% for key in kube_kubeadm_apiserver_extra_args %}
+    {{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}"
+{% endfor %}
+{% if kube_feature_gates %}
+    feature-gates: {{ kube_feature_gates|join(',') }}
+{% endif %}
+{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
+    cloud-provider: {{cloud_provider}}
+    cloud-config: {{ kube_config_dir }}/cloud_config
+{% elif cloud_provider is defined and cloud_provider in ["external"] %}
+    cloud-config: {{ kube_config_dir }}/cloud_config
+{% endif %}
+{% if kubernetes_audit or kube_basic_auth|default(true) or kube_token_auth|default(true) or ( cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] ) or apiserver_extra_volumes %}
+  extraVolumes:
+{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "external"] %}
+  - name: cloud-config
+    hostPath: {{ kube_config_dir }}/cloud_config
+    mountPath: {{ kube_config_dir }}/cloud_config
+{% endif %}
+{% if kube_basic_auth|default(true) %}
+  - name: basic-auth-config
+    hostPath: {{ kube_users_dir }}
+    mountPath: {{ kube_users_dir }}
+{% endif %}
+{% if kube_token_auth|default(true) %}
+  - name: token-auth-config
+    hostPath: {{ kube_token_dir }}
+    mountPath: {{ kube_token_dir }}
+{% endif %}
+{% if kubernetes_audit %}
+  - name: {{ audit_policy_name }}
+    hostPath: {{ audit_policy_hostpath }}
+    mountPath: {{ audit_policy_mountpath }}
+{% if audit_log_path != "-" %}
+  - name: {{ audit_log_name }}
+    hostPath: {{ audit_log_hostpath }}
+    mountPath: {{ audit_log_mountpath }}
+    writable: true
+{% endif %}
+{% endif %}
+{% for volume in apiserver_extra_volumes %}
+  - name: {{ volume.name }}
+    hostPath: {{ volume.hostPath }}
+    mountPath: {{ volume.mountPath }}
+    writable: {{ volume.writable | default(false)}}
+{% endfor %}
+{% endif %}
+  certSANs:
+{% for san in  apiserver_sans.split(' ') | unique %}
+  - {{ san }}
+{% endfor %}
+  timeoutForControlPlane: 5m0s
+controllerManager:
+  extraArgs:
+    node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
+    node-monitor-period: {{ kube_controller_node_monitor_period }}
+    pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }}
+{% if kube_feature_gates %}
+    feature-gates: {{ kube_feature_gates|join(',') }}
+{% endif %}
+{% for key in kube_kubeadm_controller_extra_args %}
+    {{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
+{% endfor %}
+{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
+    cloud-provider: {{cloud_provider}}
+    cloud-config: {{ kube_config_dir }}/cloud_config
+{% elif cloud_provider is defined and cloud_provider in ["external"] %}
+    cloud-config: {{ kube_config_dir }}/cloud_config
+{% endif %}
+{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "external"] or controller_manager_extra_volumes %}
+  extraVolumes:
+{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined %}
+  - name: openstackcacert
+    hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
+    mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
+{% endif %}
+{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "external"] %}
+  - name: cloud-config
+    hostPath: {{ kube_config_dir }}/cloud_config
+    mountPath: {{ kube_config_dir }}/cloud_config
+{% endif %}
+{% for volume in controller_manager_extra_volumes %}
+  - name: {{ volume.name }}
+    hostPath: {{ volume.hostPath }}
+    mountPath: {{ volume.mountPath }}
+    writable: {{ volume.writable | default(false)}}
+{% endfor %}
+{% endif %}
+scheduler:
+  extraArgs:
+{% if kube_feature_gates %}
+    feature-gates: {{ kube_feature_gates|join(',') }}
+{% endif %}
+{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
+{% for key in kube_kubeadm_scheduler_extra_args %}
+    {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
+{% endfor %}
+{% endif %}
+  extraVolumes:
+{% if scheduler_extra_volumes %}
+  extraVolumes:
+{% for volume in scheduler_extra_volumes %}
+  - name: {{ volume.name }}
+    hostPath: {{ volume.hostPath }}
+    mountPath: {{ volume.mountPath }}
+    writable: {{ volume.writable | default(false)}}
+{% endfor %}
+{% endif %}
+---
+apiVersion: kubeproxy.config.k8s.io/v1alpha1
+kind: KubeProxyConfiguration
+bindAddress: 0.0.0.0
+clientConnection:
+ acceptContentTypes: ""
+ burst: 10
+ contentType: application/vnd.kubernetes.protobuf
+ kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
+ qps: 5
+clusterCIDR: ""
+configSyncPeriod: 15m0s
+conntrack:
+ max: null
+ maxPerCore: 32768
+ min: 131072
+ tcpCloseWaitTimeout: 1h0m0s
+ tcpEstablishedTimeout: 24h0m0s
+enableProfiling: false
+healthzBindAddress: 0.0.0.0:10256
+iptables:
+ masqueradeAll: false
+ masqueradeBit: 14
+ minSyncPeriod: 0s
+ syncPeriod: 30s
+ipvs:
+ excludeCIDRs: null
+ minSyncPeriod: 0s
+ scheduler: ""
+ syncPeriod: 30s
+metricsBindAddress: 127.0.0.1:10249
+mode: {{ kube_proxy_mode }}
+{% if kube_proxy_nodeport_addresses %}
+nodePortAddresses: [{{ kube_proxy_nodeport_addresses_cidr }}]
+{% endif %}
+oomScoreAdj: -999
+portRange: ""
+resourceContainer: ""
+udpIdleTimeout: 250ms
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 50d058626..745e2a9f8 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -12,7 +12,7 @@ is_atomic: false
 disable_swap: true
 
 ## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.12.3
+kube_version: v1.13.0
 
 ## Kube Proxy mode One of ['iptables','ipvs']
 kube_proxy_mode: ipvs
diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2
index 2090372fb..d8a433679 100644
--- a/roles/network_plugin/calico/templates/calico-node.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-node.yml.j2
@@ -19,7 +19,6 @@ spec:
         k8s-app: calico-node
       annotations:
         # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
-        scheduler.alpha.kubernetes.io/critical-pod: ''
         kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
 {% if calico_felix_prometheusmetricsenabled %}
         prometheus.io/scrape: 'true'
diff --git a/roles/network_plugin/canal/templates/canal-node.yaml.j2 b/roles/network_plugin/canal/templates/canal-node.yaml.j2
index 68a6b9910..a46608de8 100644
--- a/roles/network_plugin/canal/templates/canal-node.yaml.j2
+++ b/roles/network_plugin/canal/templates/canal-node.yaml.j2
@@ -12,9 +12,6 @@ spec:
       k8s-app: canal-node
   template:
     metadata:
-      annotations:
-        # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
-        scheduler.alpha.kubernetes.io/critical-pod: ''
       labels:
         k8s-app: canal-node
     spec:
diff --git a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
index b8a6306cf..bda6000ae 100755
--- a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2
@@ -21,12 +21,6 @@ spec:
       labels:
         k8s-app: cilium
         kubernetes.io/cluster-service: "true"
-      annotations:
-        # This annotation plus the CriticalAddonsOnly toleration makes
-        # cilium to be a critical pod in the cluster, which ensures cilium
-        # gets priority scheduling.
-        # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
-        scheduler.alpha.kubernetes.io/critical-pod: ''
 {% if cilium_enable_prometheus %}
         prometheus.io/scrape: "true"
         prometheus.io/port: "9090"
diff --git a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
index 73a7688ef..c1604d0b5 100644
--- a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
@@ -15,9 +15,6 @@ spec:
       namespace: kube-system
       labels:
         k8s-app: contiv-api-proxy
-      annotations:
-        # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
-        scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
diff --git a/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2 b/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
index 73111072c..c8de9d297 100644
--- a/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
@@ -14,9 +14,6 @@ spec:
     metadata:
       labels:
         k8s-app: contiv-cleanup
-      annotations:
-        # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
-        scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
diff --git a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
index 0b67945e5..a16ee5755 100644
--- a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
@@ -14,8 +14,6 @@ spec:
     metadata:
       labels:
         k8s-app: contiv-etcd-proxy
-      annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
diff --git a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
index b8fba9cc6..e320f5b24 100644
--- a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
@@ -14,8 +14,6 @@ spec:
     metadata:
       labels:
         k8s-app: contiv-etcd
-      annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
index e61510253..a39938f77 100644
--- a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
@@ -15,9 +15,6 @@ spec:
       namespace: kube-system
       labels:
         k8s-app: contiv-netmaster
-      annotations:
-        # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
-        scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
index 045b9e7eb..8b2e65ebd 100644
--- a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
@@ -19,9 +19,6 @@ spec:
     metadata:
       labels:
         k8s-app: contiv-netplugin
-      annotations:
-        # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
-        scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
diff --git a/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2 b/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
index 40c37b6ad..2ec15fc82 100644
--- a/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
@@ -16,9 +16,6 @@ spec:
     metadata:
       labels:
         k8s-app: contiv-ovs
-      annotations:
-        # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
-        scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
index 578409d02..bcaae4a6d 100644
--- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
+++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
@@ -51,9 +51,6 @@ spec:
       labels:
         tier: node
         k8s-app: flannel
-      annotations:
-        # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
-        scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-node-critical
diff --git a/roles/network_plugin/kube-router/templates/kube-router.yml.j2 b/roles/network_plugin/kube-router/templates/kube-router.yml.j2
index ac1029a0e..37f03ea26 100644
--- a/roles/network_plugin/kube-router/templates/kube-router.yml.j2
+++ b/roles/network_plugin/kube-router/templates/kube-router.yml.j2
@@ -60,8 +60,6 @@ spec:
       labels:
         k8s-app: kube-router
         tier: node
-      annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
 {% if kube_version is version('v1.11.1', '>=') %}
       priorityClassName: system-cluster-critical
diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2
index 204e3f993..3d66c043c 100644
--- a/roles/network_plugin/weave/templates/weave-net.yml.j2
+++ b/roles/network_plugin/weave/templates/weave-net.yml.j2
@@ -114,9 +114,6 @@ items:
         metadata:
           labels:
             name: weave-net
-          annotations:
-            # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
-            scheduler.alpha.kubernetes.io/critical-pod: ''
         spec:
 {% if kube_version is version('v1.11.1', '>=') %}
           priorityClassName: system-node-critical
-- 
GitLab