diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml
index f0e944bd9cffe0b6aac3633594baa5525a4e0a00..2c669c46dd98ea65feec9a8cf131c71de13726d7 100644
--- a/roles/kubernetes/master/tasks/main.yml
+++ b/roles/kubernetes/master/tasks/main.yml
@@ -54,6 +54,8 @@
   command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
+  register: create_system_ns
+  until: create_system_ns.rc == 0
   changed_when: False
   when: kubesystem|failed and inventory_hostname == groups['kube-master'][0]
   tags: apps
diff --git a/roles/kubernetes/master/tasks/post-upgrade.yml b/roles/kubernetes/master/tasks/post-upgrade.yml
index e68526493766781b85f45ab92d4150ebb92365be..d157311de6fc44f70b2e46210ace2f77e811aca4 100644
--- a/roles/kubernetes/master/tasks/post-upgrade.yml
+++ b/roles/kubernetes/master/tasks/post-upgrade.yml
@@ -1,4 +1,13 @@
 ---
+- name: "Post-upgrade | restart kubelet on all masters"
+  service:
+    name: kubelet
+    state: restarted
+  delegate_to: "{{item}}"
+  with_items: "{{groups['kube-master']}}"
+  register: kube_apiserver_manifest_replaced
+  when: needs_etcd_migration|bool
+
 - name: "Post-upgrade | etcd3 upgrade | purge etcd2 k8s data"
   command: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} rm -r /registry"
   environment:
diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml
index 7776b970367a7ec90ff28ed785a6b93a4fb8ee10..12b270421ec7de7be7633b490cd1c70f601c0459 100644
--- a/roles/kubernetes/master/tasks/pre-upgrade.yml
+++ b/roles/kubernetes/master/tasks/pre-upgrade.yml
@@ -55,13 +55,15 @@
   set_fact:
     needs_etcd_migration: "{{ kube_apiserver_storage_backend == 'etcd3' and data_migrated.stdout_lines|length == 0 and old_data_exists.rc == 0 }}"
 
-- name: "Pre-upgrade | Write invalid image to kube-apiserver manifest if necessary on all kube-masters"
+- name: "Pre-upgrade | Write invalid image to master manifests on all kube-masters"
   replace:
-    dest: /etc/kubernetes/manifests/kube-apiserver.manifest
+    dest: "/etc/kubernetes/manifests/{{item[1]}}.manifest"
     regexp: '(\s+)image:\s+.*?$'
     replace: '\1image: kill.apiserver.using.fake.image.in:manifest'
-  delegate_to: "{{item}}"
-  with_items: "{{groups['kube-master']}}"
+  delegate_to: "{{item[0]}}"
+  with_nested:
+    - "{{groups['kube-master']}}"
+    - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
   register: kube_apiserver_manifest_replaced
   when: (secret_changed|default(false) or etcd_secret_changed|default(false) or needs_etcd_migration|bool) and kube_apiserver_manifest.stat.exists
 
@@ -95,13 +97,3 @@
   delegate_to: "{{item}}"
   with_items: "{{groups['etcd']}}"
   when: needs_etcd_migration|bool
-
-- name: "Pre-upgrade | restart kubelet on all masters"
-  service:
-    name: kubelet
-    state: restarted
-  delegate_to: "{{item}}"
-  with_items: "{{groups['kube-master']}}"
-  register: kube_apiserver_manifest_replaced
-  when: needs_etcd_migration|bool
-
diff --git a/roles/kubernetes/secrets/tasks/check-tokens.yml b/roles/kubernetes/secrets/tasks/check-tokens.yml
index 497bc7caf095975c824115a7d174deabf6ea60cb..616664b93e7abb3564bae8824fc126f9aee64949 100644
--- a/roles/kubernetes/secrets/tasks/check-tokens.yml
+++ b/roles/kubernetes/secrets/tasks/check-tokens.yml
@@ -27,9 +27,9 @@
     sync_tokens: true
   when: >-
       {%- set tokens = {'sync': False} -%}
-      {%- for server in groups['kube-master'] | intersect(ansible_play_hosts)
+      {%- for server in groups['kube-master'] | intersect(ansible_play_batch)
          if (not hostvars[server].known_tokens.stat.exists) or
-         (hostvars[server].known_tokens.stat.checksum != known_tokens_master.stat.checksum|default('')) -%}
+         (hostvars[server].known_tokens.stat.checksum|default('') != known_tokens_master.stat.checksum|default('')) -%}
          {%- set _ = tokens.update({'sync': True}) -%}
       {%- endfor -%}
       {{ tokens.sync }}
diff --git a/roles/upgrade/pre-upgrade/defaults/main.yml b/roles/upgrade/pre-upgrade/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5980360fc5e5b4734ee32e59bea7305de384eeb3
--- /dev/null
+++ b/roles/upgrade/pre-upgrade/defaults/main.yml
@@ -0,0 +1,3 @@
+drain_grace_period: 30
+drain_timeout: 40s
+
diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml
index fbcd1cf857cd7a64f884599f002616d229478904..f2251375b3938c002071de52d2237612fd204c3a 100644
--- a/roles/upgrade/pre-upgrade/tasks/main.yml
+++ b/roles/upgrade/pre-upgrade/tasks/main.yml
@@ -1,9 +1,9 @@
 ---
 - name: See if node is in ready state
-  command: "kubectl get nodes | grep {{ inventory_hostname }}"
+  shell: "kubectl get nodes | grep {{ inventory_hostname }}"
   register: kubectl_nodes
   delegate_to: "{{ groups['kube-master'][0] }}"
-  ignore_errors: true
+  failed_when: false
 
 - set_fact:
     needs_cordoning: >-