diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
index 4e91da224cc50b16871eca934770e00845eb1be0..3b9168c031322d694a0171ec89272092b4479ff5 100644
--- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml
+++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
@@ -10,6 +10,7 @@
   kube:
     name: "netchecker-server"
     namespace: "{{ netcheck_namespace }}"
+    filename: "{{ netchecker_server_manifest.stat.path }}"
     kubectl: "{{bin_dir}}/kubectl"
     resource: "deploy"
     state: latest
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
index d730042427efd4a63118a33311aa99fceeb6c8a9..4f32214ebd996efcaf101707f3098caabdd72154 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
@@ -42,6 +42,5 @@ spec:
               memory: {{ netchecker_agent_memory_requests }}
   updateStrategy:
     rollingUpdate:
-      maxUnavailable: 1
+      maxUnavailable: 100%
     type: RollingUpdate
-    
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
index 70194c900ab426e3b040be5572fb56d0c5f5f62c..76fca4812834415039eaeafe941a0399f004c057 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
@@ -46,5 +46,5 @@ spec:
               memory: {{ netchecker_agent_memory_requests }}
   updateStrategy:
     rollingUpdate:
-      maxUnavailable: 1
+      maxUnavailable: 100%
     type: RollingUpdate
diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml
index 7cd650cbd350b12e30482e5fd4335d1a01ce6781..2e1aa269c8a9f732a5c3be345749d818d7a6c981 100644
--- a/roles/kubernetes/master/tasks/pre-upgrade.yml
+++ b/roles/kubernetes/master/tasks/pre-upgrade.yml
@@ -13,22 +13,18 @@
     kube_apiserver_storage_backend: "etcd2"
   when: old_data_exists.rc == 0 and not force_etcd3|bool
 
-- name: "Pre-upgrade | Delete master manifests on all kube-masters"
+- name: "Pre-upgrade | Delete master manifests"
   file:
-    path: "/etc/kubernetes/manifests/{{item[1]}}.manifest"
+    path: "/etc/kubernetes/manifests/{{item}}.manifest"
     state: absent
-  delegate_to: "{{item[0]}}"
   with_nested:
-    - "{{groups['kube-master']}}"
     - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
   register: kube_apiserver_manifest_replaced
   when: (secret_changed|default(false) or etcd_secret_changed|default(false))
 
-- name: "Pre-upgrade | Delete master containers forcefully on all kube-masters"
+- name: "Pre-upgrade | Delete master containers forcefully"
   shell: "docker ps -f name=k8s-{{item}}* -q | xargs --no-run-if-empty docker rm -f"
-  delegate_to: "{{item[0]}}"
   with_nested:
-    - "{{groups['kube-master']}}"
     - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
   when: kube_apiserver_manifest_replaced.changed
   run_once: true
diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2
index 8acb283278b1e57419a6422de95a2aee57fcb859..4c0538c8cf23bd04f4b919dcf914fc3225ef3ae8 100644
--- a/roles/network_plugin/calico/templates/calico-node.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-node.yml.j2
@@ -161,6 +161,6 @@ spec:
             path: "{{ calico_cert_dir }}"
   updateStrategy:
     rollingUpdate:
-      maxUnavailable: 1
+      maxUnavailable: {{ serial | default('20%') }}
     type: RollingUpdate
 
diff --git a/roles/network_plugin/canal/templates/canal-node.yaml.j2 b/roles/network_plugin/canal/templates/canal-node.yaml.j2
index 972b02d5febeea3e625b6043dcad948b9c5744f2..07754c089cbff0f43ecdfab91630056be718d83a 100644
--- a/roles/network_plugin/canal/templates/canal-node.yaml.j2
+++ b/roles/network_plugin/canal/templates/canal-node.yaml.j2
@@ -190,5 +190,5 @@ spec:
               readOnly: true
   updateStrategy:
     rollingUpdate:
-      maxUnavailable: 1
+      maxUnavailable: {{ serial | default('20%') }}
     type: RollingUpdate
diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
index 0012228d731c21afd6b55e41d1051df2add47e4f..165395c24f617788edf7abc80eeda70c8120bce0 100644
--- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
+++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2
@@ -121,5 +121,5 @@ spec:
             path: /opt/cni/bin
   updateStrategy:
     rollingUpdate:
-      maxUnavailable: 1
-    type: RollingUpdate
\ No newline at end of file
+      maxUnavailable: {{ serial | default('20%') }}
+    type: RollingUpdate
diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2
index c61f2e7e45209b14c4b82cce5a338452358e6214..67c04d9bea90baebbff9d1d65aae83dfbef8ee40 100644
--- a/roles/network_plugin/weave/templates/weave-net.yml.j2
+++ b/roles/network_plugin/weave/templates/weave-net.yml.j2
@@ -156,6 +156,6 @@ items:
                 path: /lib/modules
       updateStrategy:
         rollingUpdate:
-          maxUnavailable: 1
+          maxUnavailable: {{ serial | default('20%') }}
         type: RollingUpdate
 
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index 7c934c592c4c71d35c7b26c028a0cc2f92540ede..7269dab352caa299d931abfb1b437ea454790c7b 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -12,16 +12,11 @@
       bin_dir: "/usr/local/bin"
     when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
 
-  - name: Check kubectl output
-    shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
-    register: get_pods
-
-  - debug: msg="{{get_pods.stdout.split('\n')}}"
 
   - name: Get pod names
     shell: "{{bin_dir}}/kubectl get pods -o json"
     register: pods
-    until: '"ContainerCreating" not in pods.stdout'
+    until: '"ContainerCreating" not in pods.stdout and "Terminating" not in pods.stdout'
     retries: 60
     delay: 2
     no_log: true
@@ -30,11 +25,20 @@
     command: "{{bin_dir}}/kubectl get pods -o
              jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
     register: hostnet_pods
+    no_log: true
 
   - name: Get running pods
     command: "{{bin_dir}}/kubectl get pods -o
              jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
     register: running_pods
+    no_log: true
+
+  - name: Check kubectl output
+    shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
+    register: get_pods
+    no_log: true
+
+  - debug: msg="{{get_pods.stdout.split('\n')}}"
 
   - set_fact:
       kube_pods_subnet: 10.233.64.0/18