diff --git a/environments/dev/group_vars/k8s-cluster.yml b/environments/dev/group_vars/k8s-cluster.yml
index 609e86807f1d615d9f19f707074b868593816984..61247c5a8b003bba1d6477a1e5ef02acbc3d3182 100644
--- a/environments/dev/group_vars/k8s-cluster.yml
+++ b/environments/dev/group_vars/k8s-cluster.yml
@@ -31,6 +31,11 @@ overlay_network_subnet: 10.233.64.0/18
 # room for 4096 nodes with 254 pods per node.
 overlay_network_host_prefix: 24
 
+# With calico it is possible to distributed routes with border routers of the datacenter.
+peer_with_router: false
+# Warning : enabling router peering will disable calico's default behavior ('node mesh').
+# The subnets of each nodes will be distributed by the datacenter router
+
 # Internal DNS configuration.
 # Kubernetes can create and mainatain its own DNS server to resolve service names
 # into appropriate IP addresses. It's highly advisable to run such DNS server,
diff --git a/environments/dev/inventory b/environments/dev/inventory
index 9955305dddb262add39223df4578aca73660ad4c..7b67a7a8f9c8adb5497b23d38817560ea80f1403 100644
--- a/environments/dev/inventory
+++ b/environments/dev/inventory
@@ -1,19 +1,36 @@
 [downloader]
-192.168.0.1
+172.16.0.1
 
 [kube-master]
 # NB : the br_addr must be in the {{ calico_pool }} subnet
 # it will assign a /24 subnet per node
-192.168.0.1 br_addr=10.233.64.1
-
-[kube-node]
-192.168.0.2 br_addr=10.233.65.1
-192.168.0.3 br_addr=10.233.66.1
-192.168.0.4 br_addr=10.233.67.1
+172.16.0.1 br_addr=10.233.64.1
 
 [etcd]
-192.168.0.1
+172.16.0.1
+
+[kube-node:children]
+usa
+france
+
+[usa]
+172.16.0.1 br_addr=10.233.64.1
+# Configure the as assigned to the each node if bgp peering with border routers is enabled
+172.16.0.2 br_addr=10.233.65.1 # local_as=65xxx
+172.16.0.3 br_addr=10.233.66.1 # local_as=65xxx
+
+[france]
+192.168.0.1 br_addr=10.233.67.1 # local_as=65xxx
+192.168.0.2 br_addr=10.233.68.1 # local_as=65xxx
 
 [k8s-cluster:children]
 kube-node
 kube-master
+
+# If you want to configure bgp peering with border router you'll need to set the following vars
+# List of routers and their as number
+#[usa:vars]
+#bgp_peers=[{"router_id": "172.16.0.252", "as": "65xxx"}, {"router_id": "172.16.0.253", "as": "65xxx"}]
+#
+#[france:vars]
+#bgp_peers=[{"router_id": "192.168.0.252", "as": "65xxx"}, {"router_id": "192.168.0.253", "as": "65xxx"}]
diff --git a/environments/production/group_vars/k8s-cluster.yml b/environments/production/group_vars/k8s-cluster.yml
index 4231c4c19f1751a954835b25e7a38227dec392cc..95c89cead3d695aafce1106dc6f68b695c523991 100644
--- a/environments/production/group_vars/k8s-cluster.yml
+++ b/environments/production/group_vars/k8s-cluster.yml
@@ -31,6 +31,11 @@
 # room for 4096 nodes with 254 pods per node.
 # overlay_network_host_prefix: 24
 
+# With calico it is possible to distributed routes with border routers of the datacenter.
+# peer_with_router: false
+# Warning : enabling router peering will disable calico's default behavior ('node mesh').
+# The subnets of each nodes will be distributed by the datacenter router
+
 # Internal DNS configuration.
 # Kubernetes can create and mainatain its own DNS server to resolve service names
 # into appropriate IP addresses. It's highly advisable to run such DNS server,
diff --git a/roles/overlay_network/tasks/calico.yml b/roles/overlay_network/tasks/calico.yml
index 2b5e3d0402bc41a41bfe6c6a12f808f56f3d96fc..f09526fb08ecf2e6d2178f07cc4fe91ff23f5d0a 100644
--- a/roles/overlay_network/tasks/calico.yml
+++ b/roles/overlay_network/tasks/calico.yml
@@ -1,37 +1,46 @@
 ---
-- name: Install calicoctl bin
+- name: Calico | Install calicoctl bin
   copy: 
      src={{ local_release_dir }}/calico/bin/calicoctl
      dest={{ bin_dir }}
      mode=u+x
   notify: restart calico-node
 
-- name: Create calicoctl symlink (needed by kubelet)
+- name: Calico | Create calicoctl symlink (needed by kubelet)
   file: src=/usr/local/bin/calicoctl dest=/usr/bin/calicoctl state=link
 
-- name: Write calico-node systemd init file
+- name: Calico | Write calico-node systemd init file
   template: src=calico/calico-node.service.j2 dest=/etc/systemd/system/calico-node.service
   notify: 
     - reload systemd
     - restart calico-node
 
-- name: Write network-environment
+- name: Calico | Write network-environment
   template: src=calico/network-environment.j2 dest=/etc/network-environment mode=u+x
   notify: 
     - reload systemd
     - restart calico-node
 
-- name: Enable calico-node
+- name: Calico | Enable calico-node
   service: name=calico-node enabled=yes state=started
 
-- name: Configure calico-node remove default pool
+- name: Calico | Configure calico-node remove default pool
   shell: calicoctl pool remove 192.168.0.0/16
   environment: 
      ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
   run_once: true
 
-- name: Configure calico-node desired pool
+- name: Calico | Configure calico-node desired pool
   shell: calicoctl pool add {{ overlay_network_subnet }}
   environment: 
      ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
   run_once: true
+
+- name: Calico | Disable node mesh
+  shell: calicoctl bgp node-mesh off
+  when: peer_with_router and inventory_hostname in groups['kube-node']
+
+- name: Calico | Configure peering with router(s)
+  shell: calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}
+  with_items: peers
+  when: peer_with_router and inventory_hostname in groups['kube-node']
diff --git a/roles/overlay_network/templates/calico/calico-node.service.j2 b/roles/overlay_network/templates/calico/calico-node.service.j2
index 4f51407b0ea13ed00ac8fd3da219e1d9e098e9e1..fe44d0f5edefe740e15ef33c4d3bde7c7a74f359 100644
--- a/roles/overlay_network/templates/calico/calico-node.service.j2
+++ b/roles/overlay_network/templates/calico/calico-node.service.j2
@@ -1,19 +1,23 @@
-[Unit]
-Description=calicoctl node
-After=etcd2.service
-
-[Service]
-EnvironmentFile=/etc/network-environment
-User=root
-PermissionsStartOnly=true
-ExecStartPre={{ bin_dir }}/calicoctl checksystem --fix
-{% if inventory_hostname in groups['kube-node'] %}
-ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --kubernetes
-{% else %}
-ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4}
-{% endif %}
-RemainAfterExit=yes
-Type=oneshot
-
-[Install]
-WantedBy=multi-user.target
+[Unit]
+Description=calicoctl node
+After=etcd2.service
+
+[Service]
+EnvironmentFile=/etc/network-environment
+User=root
+PermissionsStartOnly=true
+ExecStartPre={{ bin_dir }}/calicoctl checksystem --fix
+{% if inventory_hostname in groups['kube-node'] %}
+{%    if peer_with_router %}
+ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --as={{ local_as }} --kubernetes
+{%     else %}
+ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --kubernetes
+{%     endif %}
+{% else %}
+ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4}
+{% endif %}
+RemainAfterExit=yes
+Type=oneshot
+
+[Install]
+WantedBy=multi-user.target