diff --git a/roles/container-engine/containerd/handlers/main.yml b/roles/container-engine/containerd/handlers/main.yml
index 3c132bdf0d0c844bdcbe25831ceb52e84ddbc1b8..1959dc991714b18e1865d7bc9a886cda4201bc60 100644
--- a/roles/container-engine/containerd/handlers/main.yml
+++ b/roles/container-engine/containerd/handlers/main.yml
@@ -1,10 +1,4 @@
 ---
-- name: Restart containerd
-  command: /bin/true
-  notify:
-    - Containerd | restart containerd
-    - Containerd | wait for containerd
-
 - name: Containerd | restart containerd
   systemd:
     name: containerd
@@ -12,6 +6,7 @@
     enabled: yes
     daemon-reload: yes
     masked: no
+  listen: Restart containerd
 
 - name: Containerd | wait for containerd
   command: "{{ containerd_bin_dir }}/ctr images ls -q"
@@ -19,3 +14,4 @@
   retries: 8
   delay: 4
   until: containerd_ready.rc == 0
+  listen: Restart containerd
diff --git a/roles/container-engine/cri-dockerd/handlers/main.yml b/roles/container-engine/cri-dockerd/handlers/main.yml
index 3990d3397c771fece3ac94d2cb79de66383f68dc..3a249791669b9b7d5a113f71867375202f53e31a 100644
--- a/roles/container-engine/cri-dockerd/handlers/main.yml
+++ b/roles/container-engine/cri-dockerd/handlers/main.yml
@@ -1,35 +1,31 @@
 ---
-- name: Restart and enable cri-dockerd
-  command: /bin/true
-  notify:
-    - Cri-dockerd | reload systemd
-    - Cri-dockerd | restart docker.service
-    - Cri-dockerd | reload cri-dockerd.socket
-    - Cri-dockerd | reload cri-dockerd.service
-    - Cri-dockerd | enable cri-dockerd service
-
 - name: Cri-dockerd | reload systemd
   systemd:
     name: cri-dockerd
     daemon_reload: true
     masked: no
+  listen: Restart and enable cri-dockerd
 
 - name: Cri-dockerd | restart docker.service
   service:
     name: docker.service
     state: restarted
+  listen: Restart and enable cri-dockerd
 
 - name: Cri-dockerd | reload cri-dockerd.socket
   service:
     name: cri-dockerd.socket
     state: restarted
+  listen: Restart and enable cri-dockerd
 
 - name: Cri-dockerd | reload cri-dockerd.service
   service:
     name: cri-dockerd.service
     state: restarted
+  listen: Restart and enable cri-dockerd
 
 - name: Cri-dockerd | enable cri-dockerd service
   service:
     name: cri-dockerd.service
     enabled: yes
+  listen: Restart and enable cri-dockerd
diff --git a/roles/container-engine/cri-o/handlers/main.yml b/roles/container-engine/cri-o/handlers/main.yml
index 763f4b558b62da767bebaffd82b44580688c626f..b0c5951fda8aee0210a797491da3f3bdd9edfd6a 100644
--- a/roles/container-engine/cri-o/handlers/main.yml
+++ b/roles/container-engine/cri-o/handlers/main.yml
@@ -1,16 +1,12 @@
 ---
-- name: Restart crio
-  command: /bin/true
-  notify:
-    - CRI-O | reload systemd
-    - CRI-O | reload crio
-
 - name: CRI-O | reload systemd
   systemd:
     daemon_reload: true
+  listen: Restart crio
 
 - name: CRI-O | reload crio
   service:
     name: crio
     state: restarted
     enabled: yes
+  listen: Restart crio
diff --git a/roles/container-engine/docker/handlers/main.yml b/roles/container-engine/docker/handlers/main.yml
index 14a7b3973d3db99d2463c2d2f1cc4ff798087eda..4a8a63948bd6c3ed16ce25c619806f46e50f3897 100644
--- a/roles/container-engine/docker/handlers/main.yml
+++ b/roles/container-engine/docker/handlers/main.yml
@@ -1,28 +1,25 @@
 ---
-- name: Restart docker
-  command: /bin/true
-  notify:
-    - Docker | reload systemd
-    - Docker | reload docker.socket
-    - Docker | reload docker
-    - Docker | wait for docker
-
 - name: Docker | reload systemd
   systemd:
     name: docker
     daemon_reload: true
     masked: no
+  listen: Restart docker
 
 - name: Docker | reload docker.socket
   service:
     name: docker.socket
     state: restarted
   when: ansible_os_family in ['Flatcar', 'Flatcar Container Linux by Kinvolk'] or is_fedora_coreos
+  listen: Restart docker
+
 
 - name: Docker | reload docker
   service:
     name: docker
     state: restarted
+  listen: Restart docker
+
 
 - name: Docker | wait for docker
   command: "{{ docker_bin_dir }}/docker images"
@@ -30,3 +27,4 @@
   retries: 20
   delay: 1
   until: docker_ready.rc == 0
+  listen: Restart docker
diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml
index 2c5577862b735d95ebbddd7b18bc7cffa1e2e656..b79dd014865dde311802c9c8213f303e4c169f11 100644
--- a/roles/etcd/handlers/backup.yml
+++ b/roles/etcd/handlers/backup.yml
@@ -1,22 +1,14 @@
 ---
-- name: Backup etcd data
-  command: /bin/true
-  notify:
-    - Refresh Time Fact
-    - Set Backup Directory
-    - Create Backup Directory
-    - Stat etcd v2 data directory
-    - Backup etcd v2 data
-    - Backup etcd v3 data
-  when: etcd_cluster_is_healthy.rc == 0
-
 - name: Refresh Time Fact
   setup:
     filter: ansible_date_time
+  listen: Restart etcd
+  when: etcd_cluster_is_healthy.rc == 0
 
 - name: Set Backup Directory
   set_fact:
     etcd_backup_directory: "{{ etcd_backup_prefix }}/etcd-{{ ansible_date_time.date }}_{{ ansible_date_time.time }}"
+  listen: Restart etcd
 
 - name: Create Backup Directory
   file:
@@ -25,6 +17,8 @@
     owner: root
     group: root
     mode: 0600
+  listen: Restart etcd
+  when: etcd_cluster_is_healthy.rc == 0
 
 - name: Stat etcd v2 data directory
   stat:
@@ -33,9 +27,13 @@
     get_checksum: no
     get_mime: no
   register: etcd_data_dir_member
+  listen: Restart etcd
+  when: etcd_cluster_is_healthy.rc == 0
 
 - name: Backup etcd v2 data
-  when: etcd_data_dir_member.stat.exists
+  when:
+    - etcd_data_dir_member.stat.exists
+    - etcd_cluster_is_healthy.rc == 0
   command: >-
     {{ bin_dir }}/etcdctl backup
       --data-dir {{ etcd_data_dir }}
@@ -46,6 +44,7 @@
   register: backup_v2_command
   until: backup_v2_command.rc == 0
   delay: "{{ retry_stagger | random + 3 }}"
+  listen: Restart etcd
 
 - name: Backup etcd v3 data
   command: >-
@@ -61,3 +60,5 @@
   register: etcd_backup_v3_command
   until: etcd_backup_v3_command.rc == 0
   delay: "{{ retry_stagger | random + 3 }}"
+  listen: Restart etcd
+  when: etcd_cluster_is_healthy.rc == 0
diff --git a/roles/etcd/handlers/backup_cleanup.yml b/roles/etcd/handlers/backup_cleanup.yml
index 63dcf41918f0ba9835c6a270a8316cf0c9a43b07..85b8d19cf6822a43e9332bc3373c417a37c2bc05 100644
--- a/roles/etcd/handlers/backup_cleanup.yml
+++ b/roles/etcd/handlers/backup_cleanup.yml
@@ -1,10 +1,4 @@
 ---
-- name: Cleanup etcd backups
-  command: /bin/true
-  notify:
-    - Find old etcd backups
-    - Remove old etcd backups
-
 - name: Find old etcd backups
   ansible.builtin.find:
     file_type: directory
@@ -13,6 +7,7 @@
     patterns: "etcd-*"
   register: _etcd_backups
   when: etcd_backup_retention_count >= 0
+  listen: Restart etcd
 
 - name: Remove old etcd backups
   ansible.builtin.file:
@@ -20,3 +15,4 @@
     path: "{{ item }}"
   loop: "{{ (_etcd_backups.files | sort(attribute='ctime', reverse=True))[etcd_backup_retention_count:] | map(attribute='path') }}"
   when: etcd_backup_retention_count >= 0
+  listen: Restart etcd
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
index f09789c25f0e1e2cf70ad82441519f0d446fc3a8..33890617aec54201122ac168d28f284d8cf7e9e7 100644
--- a/roles/etcd/handlers/main.yml
+++ b/roles/etcd/handlers/main.yml
@@ -1,38 +1,27 @@
 ---
-- name: Restart etcd
-  command: /bin/true
-  notify:
-    - Backup etcd data
-    - Etcd | reload systemd
-    - Reload etcd
-    - Wait for etcd up
-    - Cleanup etcd backups
-
-- name: Restart etcd-events
-  command: /bin/true
-  notify:
-    - Etcd | reload systemd
-    - Reload etcd-events
-    - Wait for etcd-events up
-
 - name: Backup etcd
   import_tasks: backup.yml
 
 - name: Etcd | reload systemd
   systemd:
     daemon_reload: true
+  listen:
+    - Restart etcd
+    - Restart etcd-events
 
 - name: Reload etcd
   service:
     name: etcd
     state: restarted
   when: is_etcd_master
+  listen: Restart etcd
 
 - name: Reload etcd-events
   service:
     name: etcd-events
     state: restarted
   when: is_etcd_master
+  listen: Restart etcd-events
 
 - name: Wait for etcd up
   uri:
@@ -44,6 +33,7 @@
   until: result.status is defined and result.status == 200
   retries: 60
   delay: 1
+  listen: Restart etcd
 
 - name: Cleanup etcd backups
   import_tasks: backup_cleanup.yml
@@ -58,6 +48,7 @@
   until: result.status is defined and result.status == 200
   retries: 60
   delay: 1
+  listen: Restart etcd-events
 
 - name: Set etcd_secret_changed
   set_fact:
diff --git a/roles/kubernetes/control-plane/handlers/main.yml b/roles/kubernetes/control-plane/handlers/main.yml
index d5f17963ffd646781396b73b8a656aa5f96d0efb..1ee64f230696dd84f8ae3b672cf511f6c7f0a778 100644
--- a/roles/kubernetes/control-plane/handlers/main.yml
+++ b/roles/kubernetes/control-plane/handlers/main.yml
@@ -1,47 +1,14 @@
 ---
-- name: Master | restart kubelet
-  command: /bin/true
-  notify:
-    - Master | reload systemd
-    - Master | reload kubelet
-    - Master | wait for master static pods
-
-- name: Master | wait for master static pods
-  command: /bin/true
-  notify:
-    - Master | wait for the apiserver to be running
-    - Master | wait for kube-scheduler
-    - Master | wait for kube-controller-manager
-
-- name: Master | Restart apiserver
-  command: /bin/true
-  notify:
-    - Master | Remove apiserver container docker
-    - Master | Remove apiserver container containerd/crio
-    - Master | wait for the apiserver to be running
-
-- name: Master | Restart kube-scheduler
-  command: /bin/true
-  notify:
-    - Master | Remove scheduler container docker
-    - Master | Remove scheduler container containerd/crio
-    - Master | wait for kube-scheduler
-
-- name: Master | Restart kube-controller-manager
-  command: /bin/true
-  notify:
-    - Master | Remove controller manager container docker
-    - Master | Remove controller manager container containerd/crio
-    - Master | wait for kube-controller-manager
-
 - name: Master | reload systemd
   systemd:
     daemon_reload: true
+  listen: Master | restart kubelet
 
 - name: Master | reload kubelet
   service:
     name: kubelet
     state: restarted
+  listen: Master | restart kubelet
 
 - name: Master | Remove apiserver container docker
   shell: "set -o pipefail && docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f"
@@ -52,6 +19,7 @@
   until: remove_apiserver_container.rc == 0
   delay: 1
   when: container_manager == "docker"
+  listen: Master | Restart apiserver
 
 - name: Master | Remove apiserver container containerd/crio
   shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
@@ -62,6 +30,7 @@
   until: remove_apiserver_container.rc == 0
   delay: 1
   when: container_manager in ['containerd', 'crio']
+  listen: Master | Restart apiserver
 
 - name: Master | Remove scheduler container docker
   shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
@@ -72,6 +41,7 @@
   until: remove_scheduler_container.rc == 0
   delay: 1
   when: container_manager == "docker"
+  listen: Master | Restart kube-scheduler
 
 - name: Master | Remove scheduler container containerd/crio
   shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
@@ -82,6 +52,7 @@
   until: remove_scheduler_container.rc == 0
   delay: 1
   when: container_manager in ['containerd', 'crio']
+  listen: Master | Restart kube-scheduler
 
 - name: Master | Remove controller manager container docker
   shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
@@ -92,6 +63,7 @@
   until: remove_cm_container.rc == 0
   delay: 1
   when: container_manager == "docker"
+  listen: Master | Restart kube-controller-manager
 
 - name: Master | Remove controller manager container containerd/crio
   shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
@@ -102,6 +74,7 @@
   until: remove_cm_container.rc == 0
   delay: 1
   when: container_manager in ['containerd', 'crio']
+  listen: Master | Restart kube-controller-manager
 
 - name: Master | wait for kube-scheduler
   vars:
@@ -113,6 +86,9 @@
   until: scheduler_result.status == 200
   retries: 60
   delay: 1
+  listen:
+    - Master | restart kubelet
+    - Master | Restart kube-scheduler
 
 - name: Master | wait for kube-controller-manager
   vars:
@@ -124,6 +100,9 @@
   until: controller_manager_result.status == 200
   retries: 60
   delay: 1
+  listen:
+    - Master | restart kubelet
+    - Master | Restart kube-controller-manager
 
 - name: Master | wait for the apiserver to be running
   uri:
@@ -133,3 +112,6 @@
   until: result.status == 200
   retries: 60
   delay: 1
+  listen:
+    - Master | restart kubelet
+    - Master | Restart apiserver
diff --git a/roles/kubernetes/kubeadm/handlers/main.yml b/roles/kubernetes/kubeadm/handlers/main.yml
index 4c2b1257c1eca0fb592d497e2cea94f268137191..9f6d4318b6cfbf3b12b76a5ee6a261f160390a0a 100644
--- a/roles/kubernetes/kubeadm/handlers/main.yml
+++ b/roles/kubernetes/kubeadm/handlers/main.yml
@@ -1,15 +1,11 @@
 ---
-- name: Kubeadm | restart kubelet
-  command: /bin/true
-  notify:
-    - Kubeadm | reload systemd
-    - Kubeadm | reload kubelet
-
 - name: Kubeadm | reload systemd
   systemd:
     daemon_reload: true
+  listen: Kubeadm | restart kubelet
 
 - name: Kubeadm | reload kubelet
   service:
     name: kubelet
     state: restarted
+  listen: Kubeadm | restart kubelet
diff --git a/roles/kubernetes/node/handlers/main.yml b/roles/kubernetes/node/handlers/main.yml
index 512b4e8d40140439a01f6b77de28e2edf9fb9178..8195b7d06bacb635faad78d8300ad60a97d8a5e0 100644
--- a/roles/kubernetes/node/handlers/main.yml
+++ b/roles/kubernetes/node/handlers/main.yml
@@ -1,15 +1,11 @@
 ---
-- name: Node | restart kubelet
-  command: /bin/true
-  notify:
-    - Kubelet | reload systemd
-    - Kubelet | restart kubelet
-
 - name: Kubelet | reload systemd
   systemd:
     daemon_reload: true
+  listen: Node | restart kubelet
 
 - name: Kubelet | restart kubelet
   service:
     name: kubelet
     state: restarted
+  listen: Node | restart kubelet
diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml
index 8ae931f267d728e39ea944f05ed4dd6ad42c45a1..35140ab42fc5b3f08bd074f9fb971e025388b8f4 100644
--- a/roles/kubernetes/preinstall/handlers/main.yml
+++ b/roles/kubernetes/preinstall/handlers/main.yml
@@ -1,38 +1,14 @@
 ---
-- name: Preinstall | propagate resolvconf to k8s components
-  command: /bin/true
-  notify:
-    - Preinstall | reload kubelet
-    - Preinstall | kube-controller configured
-    - Preinstall | kube-apiserver configured
-    - Preinstall | restart kube-controller-manager docker
-    - Preinstall | restart kube-controller-manager crio/containerd
-    - Preinstall | restart kube-apiserver docker
-    - Preinstall | restart kube-apiserver crio/containerd
-    - Preinstall | wait for the apiserver to be running
-  when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos
-
-- name: Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk
-  command: /bin/true
-  notify:
-    - Preinstall | apply resolvconf cloud-init
-    - Preinstall | reload kubelet
-  when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
-
 - name: Preinstall | apply resolvconf cloud-init
   command: /usr/bin/coreos-cloudinit --from-file {{ resolveconf_cloud_init_conf }}
   when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
-
-- name: Preinstall | update resolvconf for networkmanager
-  command: /bin/true
-  notify:
-    - Preinstall | reload NetworkManager
-    - Preinstall | reload kubelet
+  listen: Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk
 
 - name: Preinstall | reload NetworkManager
   service:
     name: NetworkManager.service
     state: restarted
+  listen: Preinstall | update resolvconf for networkmanager
 
 - name: Preinstall | reload kubelet
   service:
@@ -46,6 +22,10 @@
     - Preinstall | restart kube-apiserver docker
     - Preinstall | restart kube-apiserver crio/containerd
   when: not dns_early | bool
+  listen:
+    - Preinstall | propagate resolvconf to k8s components
+    - Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk
+    - Preinstall | update resolvconf for networkmanager
 
 # FIXME(mattymo): Also restart for kubeadm mode
 - name: Preinstall | kube-apiserver configured
@@ -56,6 +36,7 @@
     get_mime: no
   register: kube_apiserver_set
   when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
+  listen: Preinstall | propagate resolvconf to k8s components
 
 # FIXME(mattymo): Also restart for kubeadm mode
 - name: Preinstall | kube-controller configured
@@ -66,6 +47,7 @@
     get_mime: no
   register: kube_controller_set
   when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
+  listen: Preinstall | propagate resolvconf to k8s components
 
 - name: Preinstall | restart kube-controller-manager docker
   shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
@@ -77,6 +59,7 @@
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
     - kube_controller_set.stat.exists
+  listen: Preinstall | propagate resolvconf to k8s components
 
 - name: Preinstall | restart kube-controller-manager crio/containerd
   shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
@@ -92,6 +75,7 @@
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
     - kube_controller_set.stat.exists
+  listen: Preinstall | propagate resolvconf to k8s components
 
 - name: Preinstall | restart kube-apiserver docker
   shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
@@ -103,6 +87,7 @@
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
     - kube_apiserver_set.stat.exists
+  listen: Preinstall | propagate resolvconf to k8s components
 
 - name: Preinstall | restart kube-apiserver crio/containerd
   shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
@@ -118,6 +103,7 @@
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
     - kube_apiserver_set.stat.exists
+  listen: Preinstall | propagate resolvconf to k8s components
 
 # When running this as the last phase ensure we wait for kube-apiserver to come up
 - name: Preinstall | wait for the apiserver to be running
@@ -133,6 +119,8 @@
     - inventory_hostname in groups['kube_control_plane']
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
+    - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos
+  listen: Preinstall | propagate resolvconf to k8s components
 
 - name: Preinstall | Restart systemd-resolved
   service:
diff --git a/roles/network_plugin/calico/handlers/main.yml b/roles/network_plugin/calico/handlers/main.yml
index 7f998dba3828353ae15960ff6197ed7db74d6d4a..f5f5dc29ebc25ea509b2b64f59cb8d88bb38d286 100644
--- a/roles/network_plugin/calico/handlers/main.yml
+++ b/roles/network_plugin/calico/handlers/main.yml
@@ -1,16 +1,10 @@
 ---
-- name: Reset_calico_cni
-  command: /bin/true
-  when: calico_cni_config is defined
-  notify:
-    - Delete 10-calico.conflist
-    - Calico | delete calico-node docker containers
-    - Calico | delete calico-node crio/containerd containers
-
 - name: Delete 10-calico.conflist
   file:
     path: /etc/cni/net.d/10-calico.conflist
     state: absent
+  listen: Reset_calico_cni
+  when: calico_cni_config is defined
 
 - name: Calico | delete calico-node docker containers
   shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
@@ -19,7 +13,10 @@
   register: docker_calico_node_remove
   until: docker_calico_node_remove is succeeded
   retries: 5
-  when: container_manager in ["docker"]
+  when:
+    - container_manager in ["docker"]
+    - calico_cni_config is defined
+  listen: Reset_calico_cni
 
 - name: Calico | delete calico-node crio/containerd containers
   shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
@@ -28,4 +25,7 @@
   register: crictl_calico_node_remove
   until: crictl_calico_node_remove is succeeded
   retries: 5
-  when: container_manager in ["crio", "containerd"]
+  when:
+    - container_manager in ["crio", "containerd"]
+    - calico_cni_config is defined
+  listen: Reset_calico_cni
diff --git a/roles/network_plugin/kube-router/handlers/main.yml b/roles/network_plugin/kube-router/handlers/main.yml
index 0723dfd8ab0b4f0441443291e0fde995ee450d6e..ad5eb21401d8a6f31669d9fd9a6079c2786e4606 100644
--- a/roles/network_plugin/kube-router/handlers/main.yml
+++ b/roles/network_plugin/kube-router/handlers/main.yml
@@ -1,10 +1,4 @@
 ---
-- name: Reset_kube_router
-  command: /bin/true
-  notify:
-    - Kube-router | delete kube-router docker containers
-    - Kube-router | delete kube-router crio/containerd containers
-
 - name: Kube-router | delete kube-router docker containers
   shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f"
   args:
@@ -13,6 +7,7 @@
   until: docker_kube_router_remove is succeeded
   retries: 5
   when: container_manager in ["docker"]
+  listen: Reset_kube_router
 
 - name: Kube-router | delete kube-router crio/containerd containers
   shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
@@ -22,3 +17,4 @@
   until: crictl_kube_router_remove is succeeded
   retries: 5
   when: container_manager in ["crio", "containerd"]
+  listen: Reset_kube_router
diff --git a/roles/network_plugin/macvlan/handlers/main.yml b/roles/network_plugin/macvlan/handlers/main.yml
index aba4cbc00315dda4229dc32aab72584609c1af3f..e4844c2217481478727c78bfdfecc8ef09f8267d 100644
--- a/roles/network_plugin/macvlan/handlers/main.yml
+++ b/roles/network_plugin/macvlan/handlers/main.yml
@@ -1,10 +1,4 @@
 ---
-- name: Macvlan | restart network
-  command: /bin/true
-  notify:
-    - Macvlan | reload network
-  when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
-
 - name: Macvlan | reload network
   service:
     # noqa: jinja[spacing]
@@ -18,3 +12,4 @@
       {%- endif %}
     state: restarted
   when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and kube_network_plugin not in ['calico']
+  listen: Macvlan | restart network