diff --git a/README.md b/README.md
index 59686019fde1e5e261c514206631155452fd20b3..19b29ebe06d13adbde64d755ba02d7dcbb614da0 100644
--- a/README.md
+++ b/README.md
@@ -111,10 +111,10 @@ Supported Components
     -   [cilium](https://github.com/cilium/cilium) v1.2.0
     -   [contiv](https://github.com/contiv/install) v1.1.7
     -   [flanneld](https://github.com/coreos/flannel) v0.10.0
-    -   [weave](https://github.com/weaveworks/weave) v2.4.0
+    -   [weave](https://github.com/weaveworks/weave) v2.4.1
 -   Application
     -   [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
-    -   [cert-manager](https://github.com/jetstack/cert-manager) v0.4.1
+    -   [cert-manager](https://github.com/jetstack/cert-manager) v0.5.0
     -   [coredns](https://github.com/coredns/coredns) v1.2.2
     -   [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.19.0
 
diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md
index 709d0633faf390d82ae5bb575d361920346d95ec..e677869d6146e3f6a9a419f8b94d4e91f63ee465 100644
--- a/contrib/terraform/aws/README.md
+++ b/contrib/terraform/aws/README.md
@@ -22,8 +22,6 @@ export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
 export TF_VAR_AWS_SSH_KEY_NAME="yyy"
 export TF_VAR_AWS_DEFAULT_REGION="zzz"
 ```
-- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
-
 - Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
 - Create an AWS EC2 SSH Key
 - Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
diff --git a/docs/contiv.md b/docs/contiv.md
index 1366a2dfd653c011a631ecd670717965de69c8c2..29a8ebbc70e4620d7695661996a99abc3afb6ead 100644
--- a/docs/contiv.md
+++ b/docs/contiv.md
@@ -54,16 +54,18 @@ The default configuration uses VXLAN to create an overlay. Two networks are crea
 
 You can change the default network configuration by overriding the `contiv_networks` variable.
 
-The default forward mode is set to routing:
+The default forward mode is set to routing and the default network mode is vxlan:
 
 ```yaml
 contiv_fwd_mode: routing
+contiv_net_mode: vxlan
 ```
 
 The following is an example of how you can use VLAN instead of VXLAN:
 
 ```yaml
 contiv_fwd_mode: bridge
+contiv_net_mode: vlan
 contiv_vlan_interface: eth0
 contiv_networks:
   - name: default-net
diff --git a/inventory/sample/group_vars/all/all.yml b/inventory/sample/group_vars/all/all.yml
index faf65eb1ad4ff7d82bd041b72e5700989637b1fc..eff115f9fbc009c7dfce58131cdf1cc8649b4aec 100644
--- a/inventory/sample/group_vars/all/all.yml
+++ b/inventory/sample/group_vars/all/all.yml
@@ -43,6 +43,13 @@ bin_dir: /usr/local/bin
 ## The subnets of each nodes will be distributed by the datacenter router
 #peer_with_router: false
 
+## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing".
+## In this case, you may need to peer with an uplink
+## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor"
+#contiv_peer_with_uplink_leaf: false
+#contiv_global_as: "65002"
+#contiv_global_neighbor_as: "500"
+
 ## Upstream dns servers used by dnsmasq
 #upstream_dns_servers:
 #  - 8.8.8.8
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index 5c1ca3f4ecc0424dcecd627fd88f252a76db6887..4000cffd1c56775233c42156fcbcd044ca98d4fe 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -46,9 +46,9 @@ flannel_version: "v0.10.0"
 flannel_cni_version: "v0.3.0"
 
 vault_version: 0.10.1
-weave_version: "2.4.0"
+weave_version: "2.4.1"
 pod_infra_version: 3.1
-contiv_version: 1.1.7
+contiv_version: 1.2.1
 cilium_version: "v1.2.0"
 
 # Download URLs
@@ -98,16 +98,20 @@ netcheck_agent_img_repo: "mirantis/k8s-netchecker-agent"
 netcheck_agent_tag: "{{ netcheck_version }}"
 netcheck_server_img_repo: "mirantis/k8s-netchecker-server"
 netcheck_server_tag: "{{ netcheck_version }}"
-weave_kube_image_repo: "weaveworks/weave-kube"
+weave_kube_image_repo: "docker.io/weaveworks/weave-kube"
 weave_kube_image_tag: "{{ weave_version }}"
-weave_npc_image_repo: "weaveworks/weave-npc"
+weave_npc_image_repo: "docker.io/weaveworks/weave-npc"
 weave_npc_image_tag: "{{ weave_version }}"
 contiv_image_repo: "contiv/netplugin"
 contiv_image_tag: "{{ contiv_version }}"
+contiv_init_image_repo: "contiv/netplugin-init"
+contiv_init_image_tag: "latest"
 contiv_auth_proxy_image_repo: "contiv/auth_proxy"
 contiv_auth_proxy_image_tag: "{{ contiv_version }}"
 contiv_etcd_init_image_repo: "ferest/etcd-initer"
 contiv_etcd_init_image_tag: latest
+contiv_ovs_image_repo: "contiv/ovs"
+contiv_ovs_image_tag: "latest"
 cilium_image_repo: "docker.io/cilium/cilium"
 cilium_image_tag: "{{ cilium_version }}"
 nginx_image_repo: nginx
@@ -164,7 +168,7 @@ ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/ngin
 ingress_nginx_controller_image_tag: "0.19.0"
 ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
 ingress_nginx_default_backend_image_tag: "1.4"
-cert_manager_version: "v0.4.1"
+cert_manager_version: "v0.5.0"
 cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
 cert_manager_controller_image_tag: "{{ cert_manager_version }}"
 
diff --git a/roles/etcd/tasks/install_host.yml b/roles/etcd/tasks/install_host.yml
index 0dc226e666e4af23f05e4f5ed1aec7b4c29f4b0a..fe50a7b1a2e9fbe12bcb569a8c7dff5166debdcf 100644
--- a/roles/etcd/tasks/install_host.yml
+++ b/roles/etcd/tasks/install_host.yml
@@ -1,21 +1,25 @@
 ---
-- name: install | Copy etcd binary from download dir
-  shell: |
-    rsync -piu "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-amd64/etcd" "{{ bin_dir }}/etcd"
-    rsync -piu "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-amd64/etcdctl" "{{ bin_dir }}/etcdctl"
+- name: install | Copy etcd and etcdctl binary from download dir
+  synchronize:
+    src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-amd64/{{ item }}"
+    dest: "{{ bin_dir }}/{{ item }}"
+    compress: no
+    perms: yes
+    owner: no
+    group: no
   changed_when: false
+  delegate_to: "{{ inventory_hostname }}"
+  with_items:
+    - "etcd"
+    - "etcdctl"
   when: etcd_cluster_setup
 
-- name: install | Set etcd binary permissions
+- name: install | Set etcd and etcdctl binary permissions
   file:
-    path: "{{ bin_dir }}/etcd"
+    path: "{{ bin_dir }}/{{ item }}"
     mode: "0755"
     state: file
+  with_items:
+    - "etcd"
+    - "etcdctl"
   when: etcd_cluster_setup
-
-- name: install | Set etcdctl binary permissions
-  file:
-    path: "{{ bin_dir }}/etcdctl"
-    mode: "0755"
-    state: file
-  when: etcd_cluster_setup
\ No newline at end of file
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index 24614da32d7d34e48920515b6052b74802c29ad5..c3b8c26f5da4d6dd434002a7b650f120db671a25 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -68,6 +68,7 @@
     {% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
     {% if tiller_enable_tls %} --tiller-tls --tiller-tls-verify --tiller-tls-cert={{ tiller_tls_cert }} --tiller-tls-key={{ tiller_tls_key }} --tls-ca-cert={{ tiller_tls_ca_cert }} {% endif %}
     {% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %}
+    --debug --dry-run
     | kubectl apply -f -
   changed_when: false
   when:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/00-namespace.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/00-namespace.yml.j2
index 7cf3a282dc113c6b615406b050116b91df9f1db5..fef90aed6cf9aff4560ec3da228f80e919947485 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/00-namespace.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/00-namespace.yml.j2
@@ -5,3 +5,4 @@ metadata:
   name: {{ cert_manager_namespace }}
   labels:
     name: {{ cert_manager_namespace }}
+    certmanager.k8s.io/disable-validation: "true"
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrole-cert-manager.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrole-cert-manager.yml.j2
index 0ce11fb9b39e44f2486f2575408a662924980088..b8b6251fa1f6b59ece24db7a9ac10a11c6028943 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrole-cert-manager.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrole-cert-manager.yml.j2
@@ -5,7 +5,7 @@ metadata:
   name: cert-manager
   labels:
     app: cert-manager
-    chart: cert-manager-v0.4.1
+    chart: cert-manager-v0.5.0
     release: cert-manager
     heritage: Tiller
 rules:
@@ -13,12 +13,7 @@ rules:
     resources: ["certificates", "issuers", "clusterissuers"]
     verbs: ["*"]
   - apiGroups: [""]
-    # TODO: remove endpoints once 0.4 is released. We include it here in case
-    # users use the 'master' version of the Helm chart with a 0.2.x release of
-    # cert-manager that still performs leader election with Endpoint resources.
-    # We advise users don't do this, but some will anyway and this will reduce
-    # friction.
-    resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"]
+    resources: ["configmaps", "secrets", "events", "services", "pods"]
     verbs: ["*"]
   - apiGroups: ["extensions"]
     resources: ["ingresses"]
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrolebinding-cert-manager.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrolebinding-cert-manager.yml.j2
index 7dd567fd988097769b9293afe9ad88bcbcd64a40..95cdeb52561a23f9cfd54d29527e71d29c329a75 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrolebinding-cert-manager.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/clusterrolebinding-cert-manager.yml.j2
@@ -5,7 +5,7 @@ metadata:
   name: cert-manager
   labels:
     app: cert-manager
-    chart: cert-manager-v0.4.1
+    chart: cert-manager-v0.5.0
     release: cert-manager
     heritage: Tiller
 roleRef:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-certificate.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-certificate.yml.j2
index a1663c64d58ec8cb48be9f7c8101c937adb339d0..2d9a5c1f991c354c67b28e8e09ee886ac6e2b0f4 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-certificate.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-certificate.yml.j2
@@ -3,9 +3,11 @@ apiVersion: apiextensions.k8s.io/v1beta1
 kind: CustomResourceDefinition
 metadata:
   name: certificates.certmanager.k8s.io
+  annotations:
+    "helm.sh/hook": crd-install
   labels:
     app: cert-manager
-    chart: cert-manager-v0.4.1
+    chart: cert-manager-v0.5.0
     release: cert-manager
     heritage: Tiller
 spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-clusterissuer.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-clusterissuer.yml.j2
index 869d4d2600a271dbfdf1f0ddc72cb64e14780e7b..53d65e4bc33e31343674d51204b36dd491cafe43 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-clusterissuer.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-clusterissuer.yml.j2
@@ -3,9 +3,11 @@ apiVersion: apiextensions.k8s.io/v1beta1
 kind: CustomResourceDefinition
 metadata:
   name: clusterissuers.certmanager.k8s.io
+  annotations:
+    "helm.sh/hook": crd-install
   labels:
     app: cert-manager
-    chart: cert-manager-v0.4.1
+    chart: cert-manager-v0.5.0
     release: cert-manager
     heritage: Tiller
 spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-issuer.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-issuer.yml.j2
index 1946b81bf1f89b366965cb8ce62cb7d2107aa811..7a19c7ede8bfeb3fa9f81148a43ad437b8dbe96c 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-issuer.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/crd-issuer.yml.j2
@@ -3,9 +3,11 @@ apiVersion: apiextensions.k8s.io/v1beta1
 kind: CustomResourceDefinition
 metadata:
   name: issuers.certmanager.k8s.io
+  annotations:
+    "helm.sh/hook": crd-install
   labels:
     app: cert-manager
-    chart: cert-manager-v0.4.1
+    chart: cert-manager-v0.5.0
     release: cert-manager
     heritage: Tiller
 spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2
index 2bcf5c701e9586d448189c22d2a02954eba9c5c3..1fedf42a295673d28e694d2f1daa0cff2ddf19f1 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/deploy-cert-manager.yml.j2
@@ -6,7 +6,7 @@ metadata:
   namespace: {{ cert_manager_namespace }}
   labels:
     app: cert-manager
-    chart: cert-manager-v0.4.1
+    chart: cert-manager-v0.5.0
     release: cert-manager
     heritage: Tiller
 spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/sa-cert-manager.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/sa-cert-manager.yml.j2
index c5270e88baaa5e1463c00c62f0fb93f1b08a00d4..f73fd0c3445d0e75e405cab46b360c6a5010c949 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/sa-cert-manager.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/sa-cert-manager.yml.j2
@@ -6,6 +6,6 @@ metadata:
   namespace: {{ cert_manager_namespace }}
   labels:
     app: cert-manager
-    chart: cert-manager-v0.4.1
+    chart: cert-manager-v0.5.0
     release: cert-manager
     heritage: Tiller
diff --git a/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml b/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml
index 35eeeacfc87911cf0cc73bed154451c49ed3a9f1..a080aa4f063c00fdaf0c316e86714b9c8926089f 100644
--- a/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml
+++ b/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml
@@ -33,6 +33,46 @@
   when: "contiv_global_config.networkInfraType != contiv_fabric_mode"
   run_once: true
 
+- name: Contiv | Set peer hostname
+  set_fact:
+    contiv_peer_hostname: >-
+      {%- if override_system_hostname|default(true) -%}
+      {{ contiv_peer_hostname|default({})|combine({item: hostvars[item]['inventory_hostname']}) }}
+      {%- else -%}
+      {{ contiv_peer_hostname|default({})|combine({item: hostvars[item]['ansible_fqdn']}) }}
+      {%- endif -%}
+  with_items: "{{ groups['k8s-cluster'] }}"
+  run_once: true
+  when:
+    - contiv_fwd_mode == 'routing'
+    - contiv_peer_with_uplink_leaf
+
+- name: Contiv | Get BGP configuration
+  command: |
+    {{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
+      bgp ls --json
+  register: bgp_config
+  run_once: true
+  changed_when: false
+  when:
+    - contiv_fwd_mode == 'routing'
+    - contiv_peer_with_uplink_leaf
+
+- name: Contiv | Configure peering with router(s)
+  command: |
+    {{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
+      bgp create {{ item.value }} \
+      --router-ip="{{ hostvars[item.key]['contiv']['router_ip'] }}" \
+      --as="{{ hostvars[item.key]['contiv']['as'] | default(contiv_global_as) }}" \
+      --neighbor-as="{{ hostvars[item.key]['contiv']['neighbor_as'] | default(contiv_global_neighbor_as) }}" \
+      --neighbor="{{ hostvars[item.key]['contiv']['neighbor'] }}"
+  run_once: true
+  with_dict: "{{ contiv_peer_hostname }}"
+  when:
+    - contiv_fwd_mode == 'routing'
+    - contiv_peer_with_uplink_leaf
+    - bgp_config.stdout|from_json|length == 0 or not item.value in bgp_config.stdout|from_json|map(attribute='key')|list
+
 - name: Contiv | Get existing networks
   command: |
     {{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
diff --git a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml
index 5289296dc65104d528d6625aba235e2d3f99d345..1bca923294d1bc522473c21830b06b853aa89edd 100644
--- a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml
@@ -9,7 +9,6 @@
     filename: "{{ contiv_config_dir }}/{{ item.item.file }}"
     state: "{{ item.changed | ternary('latest','present') }}"
   with_items: "{{ contiv_manifests_results.results }}"
-  delegate_to: "{{ groups['kube-master'][0] }}"
   run_once: true
 
 - import_tasks: configure.yml
diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml
index 93da9760bccb26717ea747d34866e83992e6e2a9..a249e4164f626397286b60c9d01edbc7cf5f8e92 100644
--- a/roles/kubernetes/master/tasks/main.yml
+++ b/roles/kubernetes/master/tasks/main.yml
@@ -10,8 +10,15 @@
   when: kube_encrypt_secret_data
 
 - name: install | Copy kubectl binary from download dir
-  command: rsync -piu "{{ local_release_dir }}/hyperkube" "{{ bin_dir }}/kubectl"
+  synchronize:
+    src: "{{ local_release_dir }}/hyperkube"
+    dest: "{{ bin_dir }}/kubectl"
+    compress: no
+    perms: yes
+    owner: no
+    group: no
   changed_when: false
+  delegate_to: "{{ inventory_hostname }}"
   tags:
     - hyperkube
     - kubectl
diff --git a/roles/kubernetes/node/tasks/install b/roles/kubernetes/node/tasks/install
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml
index f3e1dca4043e81d196d04fadea5307e57b777460..ceeaa442b1d8e571419b8ef4095dc42307199eec 100644
--- a/roles/kubernetes/node/tasks/install.yml
+++ b/roles/kubernetes/node/tasks/install.yml
@@ -11,6 +11,7 @@
     src: "{{ local_release_dir }}/kubeadm"
     dest: "{{ bin_dir }}/kubeadm"
     compress: no
+    perms: yes
     owner: no
     group: no
   delegate_to: "{{ inventory_hostname }}"
diff --git a/roles/kubernetes/node/tasks/install_ b/roles/kubernetes/node/tasks/install_
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/roles/kubernetes/node/tasks/install_host.yml b/roles/kubernetes/node/tasks/install_host.yml
index 3ca92384805107e335e3882e1d2eeea9be3c01c5..3ec1f18005251f015d4ccc442434077c1ec85465 100644
--- a/roles/kubernetes/node/tasks/install_host.yml
+++ b/roles/kubernetes/node/tasks/install_host.yml
@@ -1,11 +1,18 @@
 ---
 
 - name: install | Copy kubelet binary from download dir
-  command: rsync -piu "{{ local_release_dir }}/hyperkube" "{{ bin_dir }}/kubelet"
-  changed_when: false
+  synchronize:
+    src: "{{ local_release_dir }}/hyperkube"
+    dest: "{{ bin_dir }}/kubelet"
+    compress: no
+    perms: yes
+    owner: no
+    group: no
+  delegate_to: "{{ inventory_hostname }}"
   tags:
     - hyperkube
     - upgrade
+  notify: restart kubelet
 
 - name: install | Set kubelet binary permissions
   file:
@@ -15,7 +22,6 @@
   tags:
     - hyperkube
     - upgrade
-  notify: restart kubelet
 
 - name: install | Copy socat wrapper for Container Linux
   command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}"
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index e405c7a3f9e10fea1d824305bb6d699f5641cacb..d3e56393523f23ba33dedc7d7f30d33e098c1af2 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -303,6 +303,11 @@ weave_mode_seed: false
 weave_seed: uninitialized
 weave_peers: uninitialized
 
+# Contiv L3 BGP Mode
+contiv_peer_with_uplink_leaf: false
+contiv_global_as: "65002"
+contiv_global_neighbor_as: "500"
+
 ## Set no_proxy to all assigned cluster IPs and hostnames
 no_proxy: >-
   {%- if http_proxy is defined or https_proxy is defined %}
diff --git a/roles/network_plugin/contiv/defaults/main.yml b/roles/network_plugin/contiv/defaults/main.yml
index b6e237df577e125b1f1fb8df04c9dae6a697c40b..622d0fd8d335f87be19790ae8c2ba932e1c0a22f 100644
--- a/roles/network_plugin/contiv/defaults/main.yml
+++ b/roles/network_plugin/contiv/defaults/main.yml
@@ -6,8 +6,10 @@ contiv_etcd_data_dir: "/var/lib/etcd/contiv-data"
 contiv_netmaster_port: 9999
 contiv_cni_version: 0.1.0
 
+# No need to download it by default, but must be defined
 contiv_etcd_image_repo: "{{ etcd_image_repo }}"
 contiv_etcd_image_tag: "{{ etcd_image_tag }}"
+
 contiv_etcd_listen_port: 6666
 contiv_etcd_peer_port: 6667
 contiv_etcd_endpoints: |-
@@ -26,9 +28,21 @@ contiv_fwd_mode: routing
 # Fabric mode: aci, aci-opflex or default
 contiv_fabric_mode: default
 
+# Defaut netmode: vxlan or vlan
+contiv_net_mode: vxlan
+
 # Dataplane interface
 contiv_vlan_interface: ""
 
+# Default loglevels are INFO
+contiv_netmaster_loglevel: "WARN"
+contiv_netplugin_loglevel: "WARN"
+contiv_ovsdb_server_loglevel: "warn"
+contiv_ovs_vswitchd_loglevel: "warn"
+
+# VxLAN port
+contiv_vxlan_port: 4789
+
 # Default network configuration
 contiv_networks:
   - name: contivh1
diff --git a/roles/network_plugin/contiv/files/contiv-cleanup.sh b/roles/network_plugin/contiv/files/contiv-cleanup.sh
new file mode 100644
index 0000000000000000000000000000000000000000..2aa1a7796d8a91aeaa70d008a7bc7e9ecc782909
--- /dev/null
+++ b/roles/network_plugin/contiv/files/contiv-cleanup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -e
+echo "Starting cleanup"
+ovs-vsctl list-br | grep contiv | xargs -I % ovs-vsctl del-br %
+for p in $(ifconfig | grep vport | awk '{print $1}');
+do
+	ip link delete $p type veth
+done
+touch /tmp/cleanup.done
+sleep 60
diff --git a/roles/network_plugin/contiv/tasks/main.yml b/roles/network_plugin/contiv/tasks/main.yml
index bc9dcd3c0a4a86352688768514c6ba95e0295582..9f8258785fbb6342367e7a2ad347907e19a1a6e7 100644
--- a/roles/network_plugin/contiv/tasks/main.yml
+++ b/roles/network_plugin/contiv/tasks/main.yml
@@ -16,8 +16,25 @@
   with_items:
     - "{{ contiv_etcd_conf_dir }}"
     - "{{ contiv_etcd_data_dir }}"
+  when: inventory_hostname in groups['kube-master']
 
-- set_fact:
+- name: Contiv | Workaround https://github.com/contiv/netplugin/issues/1152
+  set_fact:
+    kube_apiserver_endpoint_for_contiv: |-
+      {% if not is_kube_master and loadbalancer_apiserver_localhost -%}
+      https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
+      {%- elif loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
+      https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}
+      {%-   if loadbalancer_apiserver.port|string != "443" -%}
+      :{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
+      {%-   endif -%}
+      {%- else -%}
+      https://{{ first_kube_master }}:{{ kube_apiserver_port }}
+      {%- endif %}
+  when: inventory_hostname in groups['kube-master']
+
+- name: Contiv | Set necessary facts
+  set_fact:
     contiv_config_dir: "{{ contiv_config_dir }}"
     contiv_enable_api_proxy: "{{ contiv_enable_api_proxy }}"
     contiv_fabric_mode: "{{ contiv_fabric_mode }}"
@@ -26,22 +43,26 @@
     contiv_networks: "{{ contiv_networks }}"
     contiv_manifests:
       - {name: contiv-config, file: contiv-config.yml, type: configmap}
+      - {name: contiv-etcd, file: contiv-etcd.yml, type: daemonset}
+      - {name: contiv-etcd-proxy, file: contiv-etcd-proxy.yml, type: daemonset}
+      - {name: contiv-ovs, file: contiv-ovs.yml, type: daemonset}
       - {name: contiv-netmaster, file: contiv-netmaster-clusterrolebinding.yml, type: clusterrolebinding}
       - {name: contiv-netmaster, file: contiv-netmaster-clusterrole.yml, type: clusterrole}
       - {name: contiv-netmaster, file: contiv-netmaster-serviceaccount.yml, type: serviceaccount}
+      - {name: contiv-netmaster, file: contiv-netmaster.yml, type: daemonset}
       - {name: contiv-netplugin, file: contiv-netplugin-clusterrolebinding.yml, type: clusterrolebinding}
       - {name: contiv-netplugin, file: contiv-netplugin-clusterrole.yml, type: clusterrole}
       - {name: contiv-netplugin, file: contiv-netplugin-serviceaccount.yml, type: serviceaccount}
-      - {name: contiv-etcd, file: contiv-etcd.yml, type: daemonset}
-      - {name: contiv-etcd-proxy, file: contiv-etcd-proxy.yml, type: daemonset}
       - {name: contiv-netplugin, file: contiv-netplugin.yml, type: daemonset}
-      - {name: contiv-netmaster, file: contiv-netmaster.yml, type: daemonset}
+  when: inventory_hostname in groups['kube-master']
 
 - set_fact:
     contiv_manifests: |-
       {% set _ = contiv_manifests.append({"name": "contiv-api-proxy", "file": "contiv-api-proxy.yml", "type": "daemonset"}) %}
       {{ contiv_manifests }}
-  when: contiv_enable_api_proxy
+  when:
+    - contiv_enable_api_proxy
+    - inventory_hostname in groups['kube-master']
 
 - name: Contiv | Create /var/contiv
   file:
@@ -55,21 +76,23 @@
     mode: 0755
     owner: root
     group: root
+  when: inventory_hostname in groups['kube-master']
 
 - name: Contiv | Install all Kubernetes resources
   template:
     src: "{{ item.file }}.j2"
     dest: "{{ contiv_config_dir }}/{{ item.file }}"
   with_items: "{{ contiv_manifests }}"
-  delegate_to: "{{ groups['kube-master'][0] }}"
-  run_once: true
   register: contiv_manifests_results
+  when: inventory_hostname in groups['kube-master']
 
 - name: Contiv | Generate contiv-api-proxy certificates
   script: generate-certificate.sh
   args:
     creates: /var/contiv/auth_proxy_key.pem
-  when: "contiv_enable_api_proxy and contiv_generate_certificate"
+  when:
+    - contiv_enable_api_proxy
+    - contiv_generate_certificate
   delegate_to: "{{ groups['kube-master'][0] }}"
   run_once: true
 
@@ -81,7 +104,9 @@
   with_items:
     - auth_proxy_key.pem
     - auth_proxy_cert.pem
-  when: "contiv_enable_api_proxy and contiv_generate_certificate"
+  when:
+    - contiv_enable_api_proxy
+    - contiv_generate_certificate
   delegate_to: "{{ groups['kube-master'][0] }}"
   run_once: true
 
@@ -92,9 +117,11 @@
   with_items:
     - auth_proxy_key.pem
     - auth_proxy_cert.pem
-  when: "inventory_hostname != groups['kube-master'][0]
-         and inventory_hostname in groups['kube-master']
-         and contiv_enable_api_proxy and contiv_generate_certificate"
+  when:
+    - inventory_hostname != groups['kube-master'][0]
+    - inventory_hostname in groups['kube-master']
+    - contiv_enable_api_proxy
+    - contiv_generate_certificate
 
 - name: Contiv | Copy cni plugins from hyperkube
   command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/bash -c '/bin/cp -fa /opt/cni/bin/* /cnibindir/'"
diff --git a/roles/network_plugin/contiv/tasks/pre-reset.yml b/roles/network_plugin/contiv/tasks/pre-reset.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a811d59213f778c5283f6277003cf8f30779ef3b
--- /dev/null
+++ b/roles/network_plugin/contiv/tasks/pre-reset.yml
@@ -0,0 +1,66 @@
+---
+- name: reset | Check that kubectl is still here
+  stat:
+    path: "{{ bin_dir }}/kubectl"
+  register: contiv_kubectl
+
+- name: reset | Delete contiv netplugin and netmaster daemonsets
+  kube:
+    name: "{{ item }}"
+    namespace: "kube-system"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "ds"
+    state: absent
+  with_items:
+    - contiv-netplugin
+    - contiv-netmaster
+  register: contiv_cleanup_deletion
+  tags:
+    - network
+  when:
+    - contiv_kubectl.stat.exists
+    - inventory_hostname == groups['kube-master'][0]
+
+- name: reset | Copy contiv temporary cleanup script
+  copy:
+    src: ../files/contiv-cleanup.sh  # Not in role_path so we must trick...
+    dest: /opt/cni/bin/cleanup
+    owner: root
+    group: root
+    mode: 0750
+  when:
+    - contiv_kubectl.stat.exists
+
+- name: reset | Lay down contiv cleanup template
+  template:
+    src: ../templates/contiv-cleanup.yml.j2  # Not in role_path so we must trick...
+    dest: "{{ kube_config_dir }}/contiv-cleanup.yml"  # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset
+  register: contiv_cleanup_manifest
+  when:
+    - contiv_kubectl.stat.exists
+    - inventory_hostname == groups['kube-master'][0]
+
+- name: reset | Start contiv cleanup resources
+  kube:
+    name: "contiv-cleanup"
+    namespace: "kube-system"
+    kubectl: "{{ bin_dir }}/kubectl"
+    resource: "ds"
+    state: latest
+    filename: "{{ kube_config_dir }}/contiv-cleanup.yml"
+  when:
+    - contiv_kubectl.stat.exists
+    - inventory_hostname == groups['kube-master'][0]
+  ignore_errors: true
+
+- name: reset | Wait until contiv cleanup is done
+  command: "{{ bin_dir }}/kubectl -n kube-system get ds contiv-cleanup -o jsonpath='{.status.numberReady}'"
+  register: cleanup_done_all_nodes
+  until: cleanup_done_all_nodes.stdout|int == groups['k8s-cluster']|length
+  retries: 5
+  delay: 5
+  ignore_errors: true
+  changed_when: false
+  when:
+    - contiv_kubectl.stat.exists
+    - inventory_hostname == groups['kube-master'][0]
diff --git a/roles/network_plugin/contiv/tasks/reset.yml b/roles/network_plugin/contiv/tasks/reset.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3cf29361096fb72eb14bb53b20fe5b15f97edc09
--- /dev/null
+++ b/roles/network_plugin/contiv/tasks/reset.yml
@@ -0,0 +1,9 @@
+---
+- name: reset | check contiv vxlan_sys network device
+  stat:
+    path: "/sys/class/net/vxlan_sys_{{ contiv_vxlan_port | default('4789') }}"
+  register: contiv_vxlan_sys
+
+- name: reset | remove the vxlan_sys network device created by contiv
+  command: "ip link del vxlan_sys_{{ contiv_vxlan_port | default('4789') }}"
+  when: contiv_vxlan_sys.stat.exists
diff --git a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
index cea0efe5118da2861e31eb7151ed2d33a4db8210..16b8a9713475e6254633581e0a5c83cf9cad3bb5 100644
--- a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
@@ -35,16 +35,19 @@ spec:
             - --listen-address=0.0.0.0:{{ contiv_api_proxy_port }}
             - --tls-key-file=/var/contiv/auth_proxy_key.pem
             - --tls-certificate=/var/contiv/auth_proxy_cert.pem
+            - --data-store-driver=$(STORE_DRIVER)
             - --data-store-address=$(CONTIV_ETCD)
             - --netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}
           env:
             - name: NO_NETMASTER_STARTUP_CHECK
               value: "0"
+            - name: STORE_DRIVER
+              value: etcd
             - name: CONTIV_ETCD
               valueFrom:
                 configMapKeyRef:
                   name: contiv-config
-                  key: cluster_store
+                  key: contiv_etcd
           securityContext:
             privileged: false
           volumeMounts:
diff --git a/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2 b/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..99cbecb7da6baf5b4392118743457e751312074d
--- /dev/null
+++ b/roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
@@ -0,0 +1,57 @@
+---
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+  name: contiv-cleanup
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-cleanup
+spec:
+  selector:
+    matchLabels:
+      k8s-app: contiv-cleanup
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-cleanup
+    spec:
+      hostNetwork: true
+      hostPID: true
+      tolerations:
+      - key: node-role.kubernetes.io/master
+        effect: NoSchedule
+      serviceAccountName: contiv-netplugin
+      containers:
+      - name: contiv-ovs-cleanup
+        image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
+        command: ["/opt/cni/bin/cleanup"]
+        securityContext:
+          privileged: true
+        volumeMounts:
+         - mountPath: /etc/openvswitch
+           name: etc-openvswitch
+           readOnly: false
+         - mountPath: /var/run
+           name: var-run
+           readOnly: false
+         - mountPath: /opt/cni/bin
+           name: cni-bin-dir
+           readOnly: false
+        readinessProbe:
+          exec:
+            command:
+            - cat
+            - /tmp/cleanup.done
+          initialDelaySeconds: 3
+          periodSeconds: 3
+          successThreshold: 1
+      volumes:
+        - name: etc-openvswitch
+          hostPath:
+            path: /etc/openvswitch
+        - name: var-run
+          hostPath:
+            path: /var/run
+        - name: cni-bin-dir
+          hostPath:
+            path: /opt/cni/bin
diff --git a/roles/network_plugin/contiv/templates/contiv-config.yml.j2 b/roles/network_plugin/contiv/templates/contiv-config.yml.j2
index 249d9d88ebbcbfb9478f9b0739fa80c8246ebd27..18b7748eb652d84241d18e4856523b73bcf2a7c5 100644
--- a/roles/network_plugin/contiv/templates/contiv-config.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-config.yml.j2
@@ -7,20 +7,22 @@ metadata:
   name: contiv-config
   namespace: kube-system
 data:
-  # The location of your cluster store. This is set to the
-  # avdertise-client value below from the contiv-etcd service.
-  # Change it to an external etcd/consul instance if required.
-  cluster_store: "etcd://127.0.0.1:{{ contiv_etcd_listen_port }}"
-  # The CNI network configuration to install on each node.
-  cni_config: |-
+  contiv_netmaster_loglevel: {{ contiv_netmaster_loglevel }}
+  contiv_netplugin_loglevel: {{ contiv_netplugin_loglevel }}
+  contiv_ovsdb_server_extra_flags: "--verbose={{ contiv_ovsdb_server_loglevel }}"
+  contiv_ovs_vswitchd_extra_flags: "--verbose={{ contiv_ovs_vswitchd_loglevel }}"
+  contiv_fwdmode: {{ contiv_fwd_mode }}
+  contiv_netmode: {{ contiv_net_mode }}
+  contiv_etcd: "http://127.0.0.1:{{ contiv_etcd_listen_port }}"
+  contiv_cni_config: |-
     {
       "cniVersion": "{{ contiv_cni_version }}",
       "name": "contiv-net",
       "type": "contivk8s"
     }
-  config: |-
+  contiv_k8s_config: |-
     {
-       "K8S_API_SERVER": "{{ kube_apiserver_endpoint }}",
+       "K8S_API_SERVER": "{{ kube_apiserver_endpoint_for_contiv }}",
        "K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
        "K8S_KEY": "",
        "K8S_CERT": "",
diff --git a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
index 75946d82191729ac0a27d14c2407eb1fbe8b1e16..a4adedd46c71e1bf44578b0d2dfd5b749413faff 100644
--- a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
@@ -19,6 +19,8 @@ spec:
     spec:
       hostNetwork: true
       hostPID: true
+      nodeSelector:
+        node-role.kubernetes.io/node: "true"
       containers:
         - name: contiv-etcd-proxy
           image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}
diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2
index 92b4f588d4de04ff00da773b984421beb4137be9..4c179e6c48439e9e36c9c915eea8e981b5ee1261 100644
--- a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2
@@ -13,6 +13,7 @@ rules:
       - namespaces
       - networkpolicies
     verbs:
+      - get
       - watch
       - list
       - update
diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
index 787fe5c279e74dd8dcac4a65c9fa7a831d39b22c..be0f23360f928f7b09db28698ec87aaee8e1416e 100644
--- a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
@@ -1,4 +1,4 @@
-# This manifest deploys the Contiv API Server on Kubernetes.
+---
 kind: DaemonSet
 apiVersion: extensions/v1beta1
 metadata:
@@ -31,20 +31,31 @@ spec:
       containers:
         - name: contiv-netmaster
           image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
-          args:
-            - -m
-            - -pkubernetes
           env:
-            - name: CONTIV_ETCD
+            - name: CONTIV_ROLE
+              value: netmaster
+            - name: CONTIV_NETMASTER_MODE
+              value: kubernetes
+            - name: CONTIV_NETMASTER_ETCD_ENDPOINTS
               valueFrom:
                 configMapKeyRef:
                   name: contiv-config
-                  key: cluster_store
-            - name: CONTIV_CONFIG
+                  key: contiv_etcd
+            - name: CONTIV_NETMASTER_FORWARD_MODE
               valueFrom:
                 configMapKeyRef:
                   name: contiv-config
-                  key: config
+                  key: contiv_fwdmode
+            - name: CONTIV_NETMASTER_NET_MODE
+              valueFrom:
+                configMapKeyRef:
+                  name: contiv-config
+                  key: contiv_netmode
+            - name: CONTIV_NETMASTER_LOG_LEVEL
+              valueFrom:
+                configMapKeyRef:
+                  name: contiv-config
+                  key: contiv_netmaster_loglevel
           securityContext:
             privileged: true
           volumeMounts:
diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
index b7927f51c58c0400c0b10b8ce601da6d4ebd2f7a..755e9b204ab9dee39e122fdb21fe1de802962f20 100644
--- a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
+++ b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
@@ -1,3 +1,4 @@
+---
 # This manifest installs contiv-netplugin container, as well
 # as the Contiv CNI plugins and network config on
 # each master and worker node in a Kubernetes cluster.
@@ -27,73 +28,99 @@ spec:
       - key: node-role.kubernetes.io/master
         effect: NoSchedule
       serviceAccountName: contiv-netplugin
+      initContainers:
+        - name: contiv-netplugin-init
+          image: {{ contiv_init_image_repo }}:{{ contiv_init_image_tag }}
+          env:
+            - name: CONTIV_ROLE
+              value: netplugin
+            - name: CONTIV_MODE
+              value: kubernetes
+            - name: CONTIV_K8S_CONFIG
+              valueFrom:
+                configMapKeyRef:
+                  name: contiv-config
+                  key: contiv_k8s_config
+            - name: CONTIV_CNI_CONFIG
+              valueFrom:
+                configMapKeyRef:
+                  name: contiv-config
+                  key: contiv_cni_config
+          volumeMounts:
+            - mountPath: /var/contiv
+              name: var-contiv
+              readOnly: false
+            - mountPath: /etc/cni/net.d/
+              name: etc-cni-dir
+              readOnly: false
+        - name: contiv-cni
+          image: {{ contiv_image_repo }}:{{ contiv_version }}
+          command: ["cp", "/contiv/bin/contivk8s", "/opt/cni/bin/contivk8s"]
+          volumeMounts:
+            - mountPath: /opt/cni/bin
+              name: cni-bin-dir
+              readOnly: false
       containers:
         # Runs netplugin container on each Kubernetes node. This
         # container programs network policy and routes on each
         # host.
         - name: contiv-netplugin
           image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
-          args:
-            - -pkubernetes
-            - -x
           env:
             - name: VLAN_IF
               value: {{ contiv_vlan_interface }}
-            - name: VTEP_IP
+            - name: CONTIV_NETPLUGIN_VLAN_UPLINKS
+              value: {{ contiv_vlan_interface }}
+            - name: CONTIV_NETPLUGIN_VXLAN_PORT
+              value: "{{ contiv_vxlan_port }}"
+            - name: CONTIV_ROLE
+              value: netplugin
+            - name: CONTIV_NETPLUGIN_MODE
+              value: kubernetes
+            - name: CONTIV_NETPLUGIN_VTEP_IP
               valueFrom:
                  fieldRef:
                     fieldPath: status.podIP
-            - name: CONTIV_ETCD
+            - name: CONTIV_NETPLUGIN_ETCD_ENDPOINTS
               valueFrom:
                 configMapKeyRef:
                   name: contiv-config
-                  key: cluster_store
-            - name: CONTIV_CNI_CONFIG
+                  key: contiv_etcd
+            - name: CONTIV_NETPLUGIN_FORWARD_MODE
               valueFrom:
                 configMapKeyRef:
                   name: contiv-config
-                  key: cni_config
-            - name: CONTIV_CONFIG
+                  key: contiv_fwdmode
+            - name: CONTIV_NETPLUGIN_NET_MODE
               valueFrom:
                 configMapKeyRef:
                   name: contiv-config
-                  key: config
+                  key: contiv_netmode
+            - name: CONTIV_NETPLUGIN_LOG_LEVEL
+              valueFrom:
+                configMapKeyRef:
+                  name: contiv-config
+                  key: contiv_netplugin_loglevel
+          resources:
+            requests:
+              cpu: 250m
           securityContext:
             privileged: true
           volumeMounts:
-            - mountPath: /etc/openvswitch
-              name: etc-openvswitch
-              readOnly: false
-            - mountPath: /lib/modules
-              name: lib-modules
-              readOnly: false
             - mountPath: /var/run
               name: var-run
               readOnly: false
             - mountPath: /var/contiv
               name: var-contiv
               readOnly: false
-            - mountPath: /opt/cni/bin
-              name: cni-bin-dir
-              readOnly: false
-            - mountPath: /etc/cni/net.d/
-              name: etc-cni-dir
-              readOnly: false
       volumes:
         # Used by contiv-netplugin
-        - name: etc-openvswitch
-          hostPath:
-            path: /etc/openvswitch
-        - name: lib-modules
-          hostPath:
-            path: /lib/modules
         - name: var-run
           hostPath:
             path: /var/run
         - name: var-contiv
           hostPath:
             path: /var/contiv
-        # Used to install CNI.
         - name: cni-bin-dir
           hostPath:
             path: /opt/cni/bin
diff --git a/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2 b/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..825ab3042a7c519e6010011e0a5e8a3156d6ae07
--- /dev/null
+++ b/roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
@@ -0,0 +1,80 @@
+---
+apiVersion: apps/v1
+# This manifest deploys the contiv-ovs pod.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+  name: contiv-ovs
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-ovs
+spec:
+  selector:
+    matchLabels:
+      k8s-app: contiv-ovs
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-ovs
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      hostNetwork: true
+      hostPID: true
+      tolerations:
+      - key: node-role.kubernetes.io/master
+        effect: NoSchedule
+      containers:
+      # Runs ovs containers on each Kubernetes node.
+      - name: contiv-ovsdb-server
+        image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
+        command: ["/scripts/start-ovsdb-server.sh"]
+        securityContext:
+          privileged: false
+        # Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again
+        env:
+          - name: OVSDBSERVER_EXTRA_FLAGS
+            valueFrom:
+              configMapKeyRef:
+                name: contiv-config
+                key: contiv_ovsdb_server_extra_flags
+        volumeMounts:
+          - mountPath: /etc/openvswitch
+            name: etc-openvswitch
+            readOnly: false
+          - mountPath: /var/run
+            name: var-run
+            readOnly: false
+      - name: contiv-ovs-vswitchd
+        image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
+        command: ["/scripts/start-ovs-vswitchd.sh"]
+        securityContext:
+          privileged: true
+        # Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again
+        env:
+          - name: OVSVSWITCHD_EXTRA_FLAGS
+            valueFrom:
+              configMapKeyRef:
+                name: contiv-config
+                key: contiv_ovs_vswitchd_extra_flags
+        volumeMounts:
+         - mountPath: /etc/openvswitch
+           name: etc-openvswitch
+           readOnly: false
+         - mountPath: /lib/modules
+           name: lib-modules
+           readOnly: true
+         - mountPath: /var/run
+           name: var-run
+           readOnly: false
+      volumes:
+        # Used by contiv-ovs
+        - name: etc-openvswitch
+          hostPath:
+            path: /etc/openvswitch
+        - name: lib-modules
+          hostPath:
+            path: /lib/modules
+        - name: var-run
+          hostPath:
+            path: /var/run
diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2
index 09e5fbb7c7c05c936dccdc4a8faaba51f2d33801..60e7b6325b2f8b9fec7666dc377bbb80a99f26bd 100644
--- a/roles/network_plugin/weave/templates/weave-net.yml.j2
+++ b/roles/network_plugin/weave/templates/weave-net.yml.j2
@@ -42,13 +42,13 @@ items:
           - patch
           - update
       - apiGroups:
-        - policy
+          - policy
         resourceNames:
-        - privileged
+          - privileged
         resources:
-        - podsecuritypolicies
+          - podsecuritypolicies
         verbs:
-        - use
+          - use
   - apiVersion: rbac.authorization.k8s.io/v1beta1
     kind: ClusterRoleBinding
     metadata:
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 88dec8d7a5ae74e2a7b1e97208ad94cf83dc1bfc..38945c64b86b94bd89bd826b6a9e7224b299b0d5 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -1,5 +1,12 @@
 ---
 
+- name: reset | include file with pre-reset tasks specific to the network_plugin if exists
+  include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/pre-reset.yml') | realpath  }}"
+  when:
+    - kube_network_plugin in ['contiv']
+  tags:
+    - network
+
 - name: reset | stop services
   service:
     name: "{{ item }}"
@@ -150,6 +157,11 @@
     - "{{ bin_dir }}/weave"
     - /var/lib/rkt
     - /etc/vault
+    - /etc/contiv
+    - /var/contiv
+    - /run/contiv
+    - /etc/openvswitch
+    - /run/openvswitch
   ignore_errors: yes
   tags:
     - files
@@ -181,7 +193,7 @@
 - name: reset | include file with reset tasks specific to the network_plugin if exists
   include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath  }}"
   when:
-    - kube_network_plugin in ['flannel', 'cilium']
+    - kube_network_plugin in ['flannel', 'cilium', 'contiv']
   tags:
     - network
 
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index cbb59e93e3147c00aeb197444a40c446ec29b89e..dd705439e02eb12fb92ae576bbe92d679ab31758 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -100,9 +100,8 @@
     - { role: kubespray-defaults}
     - { role: upgrade/pre-upgrade, tags: pre-upgrade }
     - { role: kubernetes/node, tags: node }
-    - { role: upgrade/post-upgrade, tags: post-upgrade }
     - { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
-    - { role: kubespray-defaults}
+    - { role: upgrade/post-upgrade, tags: post-upgrade }
   environment: "{{proxy_env}}"
 
 - hosts: kube-master[0]