diff --git a/roles/network_plugin/contiv/files/contiv-cleanup.sh b/roles/network_plugin/contiv/files/contiv-cleanup.sh
new file mode 100644
index 0000000000000000000000000000000000000000..2aa1a7796d8a91aeaa70d008a7bc7e9ecc782909
--- /dev/null
+++ b/roles/network_plugin/contiv/files/contiv-cleanup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -e
+echo "Starting cleanup"
+ovs-vsctl list-br | grep contiv | xargs -I % ovs-vsctl del-br %
+for p in $(ifconfig | grep vport | awk '{print $1}');
+do
+	ip link delete $p type veth
+done
+touch /tmp/cleanup.done
+sleep 60
diff --git a/roles/network_plugin/contiv/tasks/pre-reset.yml b/roles/network_plugin/contiv/tasks/pre-reset.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a811d59213f778c5283f6277003cf8f30779ef3b
--- /dev/null
+++ b/roles/network_plugin/contiv/tasks/pre-reset.yml
@@ -0,0 +1,66 @@
+---
+- name: reset | Check that kubectl is still here
+  stat:
+    path: "{{ bin_dir }}/kubectl"
+  register: contiv_kubectl
+
+- name: reset | Delete contiv netplugin and netmaster daemonsets
+  kube:
+    name: "{{ item }}"
+    namespace: "kube-system"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "ds"
+    state: absent
+  with_items:
+    - contiv-netplugin
+    - contiv-netmaster
+  register: contiv_cleanup_deletion
+  tags:
+    - network
+  when:
+    - contiv_kubectl.stat.exists
+    - inventory_hostname == groups['kube-master'][0]
+
+- name: reset | Copy contiv temporary cleanup script
+  copy:
+    src: ../files/contiv-cleanup.sh  # Not in role_path so we must trick...
+    dest: /opt/cni/bin/cleanup
+    owner: root
+    group: root
+    mode: 0750
+  when:
+    - contiv_kubectl.stat.exists
+
+- name: reset | Lay down contiv cleanup template
+  template:
+    src: ../templates/contiv-cleanup.yml.j2  # Not in role_path so we must trick...
+    dest: "{{ kube_config_dir }}/contiv-cleanup.yml"  # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset
+  register: contiv_cleanup_manifest
+  when:
+    - contiv_kubectl.stat.exists
+    - inventory_hostname == groups['kube-master'][0]
+
+- name: reset | Start contiv cleanup resources
+  kube:
+    name: "contiv-cleanup"
+    namespace: "kube-system"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "ds"
+    state: latest
+    filename: "{{ kube_config_dir }}/contiv-cleanup.yml"
+  when:
+    - contiv_kubectl.stat.exists
+    - inventory_hostname == groups['kube-master'][0]
+  ignore_errors: true
+
+- name: reset | Wait until contiv cleanup is done
+  command: "{{ bin_dir }}/kubectl -n kube-system get ds contiv-cleanup -o jsonpath='{.status.numberReady}'"
+  register: cleanup_done_all_nodes
+  until: cleanup_done_all_nodes.stdout|int == groups['k8s-cluster']|length
+  retries: 5
+  delay: 5
+  ignore_errors: true
+  changed_when: false
+  when:
+    - contiv_kubectl.stat.exists
+    - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2 b/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..99cbecb7da6baf5b4392118743457e751312074d
--- /dev/null
+++ b/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
@@ -0,0 +1,57 @@
+---
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+  name: contiv-cleanup
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-cleanup
+spec:
+  selector:
+    matchLabels:
+      k8s-app: contiv-cleanup
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-cleanup
+    spec:
+      hostNetwork: true
+      hostPID: true
+      tolerations:
+      - key: node-role.kubernetes.io/master
+        effect: NoSchedule
+      serviceAccountName: contiv-netplugin
+      containers:
+      - name: contiv-ovs-cleanup
+        image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
+        command: ["/opt/cni/bin/cleanup"]
+        securityContext:
+          privileged: true
+        volumeMounts:
+         - mountPath: /etc/openvswitch
+           name: etc-openvswitch
+           readOnly: false
+         - mountPath: /var/run
+           name: var-run
+           readOnly: false
+         - mountPath: /opt/cni/bin
+           name: cni-bin-dir
+           readOnly: false
+        readinessProbe:
+          exec:
+            command:
+            - cat
+            - /tmp/cleanup.done
+          initialDelaySeconds: 3
+          periodSeconds: 3
+          successThreshold: 1
+      volumes:
+        - name: etc-openvswitch
+          hostPath:
+            path: /etc/openvswitch
+        - name: var-run
+          hostPath:
+            path: /var/run
+        - name: cni-bin-dir
+          hostPath:
+            path: /opt/cni/bin
diff --git a/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2 b/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..825ab3042a7c519e6010011e0a5e8a3156d6ae07
--- /dev/null
+++ b/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
@@ -0,0 +1,80 @@
+---
+apiVersion: apps/v1
+# This manifest deploys the contiv-ovs pod.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+  name: contiv-ovs
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-ovs
+spec:
+  selector:
+    matchLabels:
+      k8s-app: contiv-ovs
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-ovs
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      hostNetwork: true
+      hostPID: true
+      tolerations:
+      - key: node-role.kubernetes.io/master
+        effect: NoSchedule
+      containers:
+      # Runs ovs containers on each Kubernetes node.
+      - name: contiv-ovsdb-server
+        image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
+        command: ["/scripts/start-ovsdb-server.sh"]
+        securityContext:
+          privileged: false
+        # Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again
+        env:
+          - name: OVSDBSERVER_EXTRA_FLAGS
+            valueFrom:
+              configMapKeyRef:
+                name: contiv-config
+                key: contiv_ovsdb_server_extra_flags
+        volumeMounts:
+          - mountPath: /etc/openvswitch
+            name: etc-openvswitch
+            readOnly: false
+          - mountPath: /var/run
+            name: var-run
+            readOnly: false
+      - name: contiv-ovs-vswitchd
+        image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
+        command: ["/scripts/start-ovs-vswitchd.sh"]
+        securityContext:
+          privileged: true
+        # Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again
+        env:
+          - name: OVSVSWITCHD_EXTRA_FLAGS
+            valueFrom:
+              configMapKeyRef:
+                name: contiv-config
+                key: contiv_ovs_vswitchd_extra_flags
+        volumeMounts:
+         - mountPath: /etc/openvswitch
+           name: etc-openvswitch
+           readOnly: false
+         - mountPath: /lib/modules
+           name: lib-modules
+           readOnly: true
+         - mountPath: /var/run
+           name: var-run
+           readOnly: false
+      volumes:
+        # Used by contiv-ovs
+        - name: etc-openvswitch
+          hostPath:
+            path: /etc/openvswitch
+        - name: lib-modules
+          hostPath:
+            path: /lib/modules
+        - name: var-run
+          hostPath:
+            path: /var/run
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 0fe73408b99146999bab0ad98001e4593f7ebd55..38945c64b86b94bd89bd826b6a9e7224b299b0d5 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -1,5 +1,12 @@
 ---
 
+- name: reset | include file with pre-reset tasks specific to the network_plugin if exists
+  include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/pre-reset.yml') | realpath  }}"
+  when:
+    - kube_network_plugin in ['contiv']
+  tags:
+    - network
+
 - name: reset | stop services
   service:
     name: "{{ item }}"