diff --git a/contrib/dind/README.md b/contrib/dind/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..af0bf30bacd00d999f16a8039bc0ec8831074b2b
--- /dev/null
+++ b/contrib/dind/README.md
@@ -0,0 +1,152 @@
+# Kubespray DIND experimental setup
+
+This ansible playbook creates local docker containers
+to serve as Kubernetes "nodes", which in turn will run
+"normal" Kubernetes docker containers, a mode usually
+called DIND (Docker-IN-Docker).
+
+The playbook has two roles:
+- dind-host: creates the "nodes" as containers in localhost, with
+  appropiate settings for DIND (privileged, volume mapping for dind
+  storage, etc).
+- dind-cluster: customizes each node container to have required
+  system packages installed, and some utils (swapoff, lsattr)
+  symlinked to /bin/true to ease mimicking a real node.
+
+This playbook has been test with Ubuntu 16.04 as host and ubuntu:16.04
+as docker images (note that dind-cluster has specific customization
+for these images).
+
+The playbook also creates a `/tmp/kubespray.dind.inventory_builder.sh`
+helper (wraps up running `contrib/inventory_builder/inventory.py` with
+node containers IPs and prefix).
+
+## Deploying
+
+See below for a complete successful run:
+
+1. Create the node containers
+
+~~~~
+# From the kubespray root dir
+cd contrib/dind
+pip install -r requirements.txt
+
+ansible-playbook -i hosts dind-cluster.yaml
+
+# Back to kubespray root
+cd ../..
+~~~~
+
+NOTE: if the playbook run fails with something like below error
+message, you may need to specifically set `ansible_python_interpreter`,
+see `./hosts` file for an example expanded localhost entry.
+
+~~~
+failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
+~~~
+
+2. Customize kubespray-dind.yaml
+
+Note that there's coupling between above created node containers
+and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
+(as set in `group_vars/all/all.yaml`), and docker settings.
+
+~~~
+$EDITOR contrib/dind/kubespray-dind.yaml
+~~~
+
+3. Prepare the inventory and run the playbook
+
+~~~
+INVENTORY_DIR=inventory/local-dind
+mkdir -p ${INVENTORY_DIR}
+rm -f ${INVENTORY_DIR}/hosts.ini
+CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
+
+ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
+~~~
+
+NOTE: You could also test other distros without editing files by
+passing `--extra-vars` as per below commandline,
+replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
+
+~~~
+cd contrib/dind
+ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
+
+cd ../..
+CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
+ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
+~~~
+
+## Resulting deployment
+
+See below to get an idea on how a completed deployment looks like,
+from the host where you ran kubespray playbooks.
+
+### node_distro: debian
+
+Running from an Ubuntu Xenial host:
+
+~~~
+$ uname -a
+Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
+15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
+
+$ docker ps
+CONTAINER ID        IMAGE               COMMAND CREATED             STATUS              PORTS               NAMES
+1835dd183b75        debian:9.5          "sh -c 'apt-get -qy …"   43 minutes ago      Up 43 minutes                           kube-node5
+30b0af8d2924        debian:9.5          "sh -c 'apt-get -qy …"   43 minutes ago      Up 43 minutes                           kube-node4
+3e0d1510c62f        debian:9.5          "sh -c 'apt-get -qy …"   43 minutes ago      Up 43 minutes                           kube-node3
+738993566f94        debian:9.5          "sh -c 'apt-get -qy …"   44 minutes ago      Up 44 minutes                           kube-node2
+c581ef662ed2        debian:9.5          "sh -c 'apt-get -qy …"   44 minutes ago      Up 44 minutes                           kube-node1
+
+$ docker exec kube-node1 kubectl get node
+NAME         STATUS   ROLES         AGE   VERSION
+kube-node1   Ready    master,node   18m   v1.12.1
+kube-node2   Ready    master,node   17m   v1.12.1
+kube-node3   Ready    node          17m   v1.12.1
+kube-node4   Ready    node          17m   v1.12.1
+kube-node5   Ready    node          17m   v1.12.1
+
+$ docker exec kube-node1 kubectl get pod --all-namespaces
+NAMESPACE     NAME                                    READY   STATUS    RESTARTS   AGE
+default       netchecker-agent-67489                  1/1     Running   0          2m51s
+default       netchecker-agent-6qq6s                  1/1     Running   0          2m51s
+default       netchecker-agent-fsw92                  1/1     Running   0          2m51s
+default       netchecker-agent-fw6tl                  1/1     Running   0          2m51s
+default       netchecker-agent-hostnet-8f2zb          1/1     Running   0          3m
+default       netchecker-agent-hostnet-gq7ml          1/1     Running   0          3m
+default       netchecker-agent-hostnet-jfkgv          1/1     Running   0          3m
+default       netchecker-agent-hostnet-kwfwx          1/1     Running   0          3m
+default       netchecker-agent-hostnet-r46nm          1/1     Running   0          3m
+default       netchecker-agent-lxdrn                  1/1     Running   0          2m51s
+default       netchecker-server-864bd4c897-9vstl      1/1     Running   0          2m40s
+default       sh-68fcc6db45-qf55h                     1/1     Running   1          12m
+kube-system   coredns-7598f59475-6vknq                1/1     Running   0          14m
+kube-system   coredns-7598f59475-l5q5x                1/1     Running   0          14m
+kube-system   kube-apiserver-kube-node1               1/1     Running   0          17m
+kube-system   kube-apiserver-kube-node2               1/1     Running   0          18m
+kube-system   kube-controller-manager-kube-node1      1/1     Running   0          18m
+kube-system   kube-controller-manager-kube-node2      1/1     Running   0          18m
+kube-system   kube-proxy-5xx9d                        1/1     Running   0          17m
+kube-system   kube-proxy-cdqq4                        1/1     Running   0          17m
+kube-system   kube-proxy-n64ls                        1/1     Running   0          17m
+kube-system   kube-proxy-pswmj                        1/1     Running   0          18m
+kube-system   kube-proxy-x89qw                        1/1     Running   0          18m
+kube-system   kube-scheduler-kube-node1               1/1     Running   4          17m
+kube-system   kube-scheduler-kube-node2               1/1     Running   4          18m
+kube-system   kubernetes-dashboard-5db4d9f45f-548rl   1/1     Running   0          14m
+kube-system   nginx-proxy-kube-node3                  1/1     Running   4          17m
+kube-system   nginx-proxy-kube-node4                  1/1     Running   4          17m
+kube-system   nginx-proxy-kube-node5                  1/1     Running   4          17m
+kube-system   weave-net-42bfr                         2/2     Running   0          16m
+kube-system   weave-net-6gt8m                         2/2     Running   0          16m
+kube-system   weave-net-88nnc                         2/2     Running   0          16m
+kube-system   weave-net-shckr                         2/2     Running   0          16m
+kube-system   weave-net-xr46t                         2/2     Running   0          16m
+
+$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
+{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
+~~~
diff --git a/contrib/dind/dind-cluster.yaml b/contrib/dind/dind-cluster.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3fcae1eb063fede2e3264b2067006d1dbe8362e2
--- /dev/null
+++ b/contrib/dind/dind-cluster.yaml
@@ -0,0 +1,9 @@
+---
+- hosts: localhost
+  gather_facts: False
+  roles:
+    - { role: dind-host }
+
+- hosts: containers
+  roles:
+    - { role: dind-cluster }
diff --git a/contrib/dind/group_vars/all/all.yaml b/contrib/dind/group_vars/all/all.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6e6223898e625fa3c16d897e7f7412d2a02bb23e
--- /dev/null
+++ b/contrib/dind/group_vars/all/all.yaml
@@ -0,0 +1,2 @@
+# See distro.yaml for supported node_distro images
+node_distro: debian
diff --git a/contrib/dind/group_vars/all/distro.yaml b/contrib/dind/group_vars/all/distro.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..354d962978d1492bccef46ee9917343d50c20ead
--- /dev/null
+++ b/contrib/dind/group_vars/all/distro.yaml
@@ -0,0 +1,40 @@
+distro_settings:
+  debian: &DEBIAN
+    image: "debian:9.5"
+    user: "debian"
+    pid1_exe: /lib/systemd/systemd
+    init: |
+      sh -c "apt-get -qy update && apt-get -qy install systemd-sysv dbus && exec /sbin/init"
+    raw_setup: apt-get -qy update && apt-get -qy install dbus python sudo iproute2
+    raw_setup_done: test -x /usr/bin/sudo
+    agetty_svc: getty@*
+    ssh_service: ssh
+    extra_packages: []
+  ubuntu:
+    <<: *DEBIAN
+    image: "ubuntu:16.04"
+    user: "ubuntu"
+    init: |
+      /sbin/init
+  centos: &CENTOS
+    image: "centos:7"
+    user: "centos"
+    pid1_exe: /usr/lib/systemd/systemd
+    init: |
+      /sbin/init
+    raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables
+    raw_setup_done: test -x /usr/bin/sudo
+    agetty_svc: getty@* serial-getty@*
+    ssh_service: sshd
+    extra_packages: []
+  fedora:
+    <<: *CENTOS
+    image: "fedora:latest"
+    user: "fedora"
+    raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables; mkdir -p /etc/modules-load.d
+    extra_packages:
+      - hostname
+      - procps
+      - findutils
+      - kmod
+      - iputils
diff --git a/contrib/dind/hosts b/contrib/dind/hosts
new file mode 100644
index 0000000000000000000000000000000000000000..356aa2675bf9dabffe5c44a25342f2864d32912f
--- /dev/null
+++ b/contrib/dind/hosts
@@ -0,0 +1,15 @@
+[local]
+# If you created a virtualenv for ansible, you may need to specify running the
+# python binary from there instead:
+#localhost ansible_connection=local ansible_python_interpreter=/home/user/kubespray/.venv/bin/python
+localhost ansible_connection=local
+
+[containers]
+kube-node1
+kube-node2
+kube-node3
+kube-node4
+kube-node5
+
+[containers:vars]
+ansible_connection=docker
diff --git a/contrib/dind/kubespray-dind.yaml b/contrib/dind/kubespray-dind.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..628ed787b2df22be1b600ad075b3b246df221d91
--- /dev/null
+++ b/contrib/dind/kubespray-dind.yaml
@@ -0,0 +1,24 @@
+# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
+# See contrib/dind/README.md
+kube_api_anonymous_auth: true
+kubeadm_enabled: true
+
+kubelet_fail_swap_on: false
+
+# Docker nodes need to have been created with same "node_distro: debian"
+# at contrib/dind/group_vars/all/all.yaml
+bootstrap_os: debian
+
+docker_version: latest
+
+docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check=true -g /dind/docker
+
+dns_mode: coredns
+
+kube_network_plugin: weave
+
+deploy_netchecker: True
+netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
+netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
+netcheck_agent_image_tag: v1.0
+netcheck_server_image_tag: v1.0
diff --git a/contrib/dind/requirements.txt b/contrib/dind/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bdb9670965e4db736d9bdab6edce7cf0d688bea9
--- /dev/null
+++ b/contrib/dind/requirements.txt
@@ -0,0 +1 @@
+docker
diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..60e330ac28b04990d3a817be69808c147fea69e8
--- /dev/null
+++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml
@@ -0,0 +1,70 @@
+- name: set_fact distro_setup
+  set_fact:
+    distro_setup: "{{ distro_settings[node_distro] }}"
+
+- name: set_fact other distro settings
+  set_fact:
+    distro_user: "{{ distro_setup['user'] }}"
+    distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
+    distro_extra_packages: "{{ distro_setup['extra_packages'] }}"
+
+- name: Null-ify some linux tools to ease DIND
+  file:
+    src: "/bin/true"
+    dest: "{{item}}"
+    state: link
+    force: yes
+  with_items:
+    # DIND box may have swap enable, don't bother
+    - /sbin/swapoff
+    # /etc/hosts handling would fail on trying to copy file attributes on edit,
+    # void it by successfully returning nil output
+    - /usr/bin/lsattr
+    # disable selinux-isms, sp needed if running on non-Selinux host
+    - /usr/sbin/semodule
+
+- name: Void installing dpkg docs and man pages on Debian based distros
+  copy:
+    content: |
+      # Delete locales
+      path-exclude=/usr/share/locale/*
+      # Delete man pages
+      path-exclude=/usr/share/man/*
+      # Delete docs
+      path-exclude=/usr/share/doc/*
+      path-include=/usr/share/doc/*/copyright
+    dest:  /etc/dpkg/dpkg.cfg.d/01_nodoc
+  when:
+    - ansible_os_family == 'Debian'
+
+- name: Install system packages to better match a full-fledge node
+  package:
+    name: "{{ item }}"
+    state: present
+  with_items: "{{ distro_extra_packages }} + [ 'rsyslog', 'openssh-server' ]"
+
+- name: Start needed services
+  service:
+    name: "{{ item }}"
+    state: started
+  with_items:
+    - rsyslog
+    - "{{ distro_ssh_service }}"
+
+- name: Create distro user "{{distro_user}}"
+  user:
+    name: "{{ distro_user }}"
+    uid: 1000
+    #groups: sudo
+    append: yes
+
+- name: Allow password-less sudo to "{{ distro_user }}"
+  copy:
+    content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
+    dest: "/etc/sudoers.d/{{ distro_user }}"
+
+- name: Add my pubkey to "{{ distro_user }}" user authorized keys
+  authorized_key:
+    user: "{{ distro_user }}"
+    state: present
+    key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a9b102ff0b8b34e994c25528a54c3259f906ef7d
--- /dev/null
+++ b/contrib/dind/roles/dind-host/tasks/main.yaml
@@ -0,0 +1,86 @@
+- name: set_fact distro_setup
+  set_fact:
+    distro_setup: "{{ distro_settings[node_distro] }}"
+
+- name: set_fact other distro settings
+  set_fact:
+    distro_image: "{{ distro_setup['image'] }}"
+    distro_init: "{{ distro_setup['init'] }}"
+    distro_pid1_exe: "{{ distro_setup['pid1_exe'] }}"
+    distro_raw_setup: "{{ distro_setup['raw_setup'] }}"
+    distro_raw_setup_done: "{{ distro_setup['raw_setup_done'] }}"
+    distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
+
+- name: Create dind node containers from "containers" inventory section
+  docker_container:
+    image: "{{ distro_image }}"
+    name: "{{ item }}"
+    state: started
+    hostname: "{{ item }}"
+    command: "{{ distro_init }}"
+    #recreate: yes
+    privileged: true
+    tmpfs:
+      - /sys/module/nf_conntrack/parameters
+    volumes:
+      - /boot:/boot
+      - /lib/modules:/lib/modules
+      - "{{ item }}:/dind/docker"
+  register: containers
+  with_items: "{{groups.containers}}"
+  tags:
+    - addresses
+
+- name: Gather list of containers IPs
+  set_fact:
+    addresses: "{{ containers.results | map(attribute='ansible_facts') | map(attribute='docker_container') | map(attribute='NetworkSettings') | map(attribute='IPAddress') | list }}"
+  tags:
+    - addresses
+
+- name: Create inventory_builder helper already set with the list of node containers' IPs
+  template:
+    src: inventory_builder.sh.j2
+    dest: /tmp/kubespray.dind.inventory_builder.sh
+    mode: 0755
+  tags:
+    - addresses
+
+- name: Install needed packages into node containers via raw, need to wait for possible systemd packages to finish installing
+  raw: |
+    # agetty processes churn a lot of cpu time failing on inexistent ttys, early STOP them, to rip them in below task
+    pkill -STOP agetty || true
+    {{ distro_raw_setup_done }}  && echo SKIPPED && exit 0
+    until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
+    {{ distro_raw_setup }}
+  delegate_to: "{{ item._ansible_item_label }}"
+  with_items: "{{ containers.results }}"
+  register: result
+  changed_when: result.stdout.find("SKIPPED") < 0
+
+- name: Remove gettys from node containers
+  raw: |
+    until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
+    systemctl disable {{ distro_agetty_svc }}
+    systemctl stop {{ distro_agetty_svc }}
+  delegate_to: "{{ item._ansible_item_label }}"
+  with_items: "{{ containers.results }}"
+  changed_when: false
+
+# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
+# handle manually
+- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
+  raw: |
+    echo {{ item | hash('sha1') }} > /etc/machine-id.new
+    mv -b /etc/machine-id.new /etc/machine-id
+    cmp /etc/machine-id /etc/machine-id~ || true
+    systemctl daemon-reload
+  delegate_to: "{{ item._ansible_item_label }}"
+  with_items: "{{ containers.results }}"
+
+- name: Early hack image install to adapt for DIND
+  raw: |
+    rm -fv /usr/bin/udevadm /usr/sbin/udevadm
+  delegate_to: "{{ item._ansible_item_label }}"
+  with_items: "{{ containers.results }}"
+  register: result
+  changed_when: result.stdout.find("removed") >= 0
diff --git a/contrib/dind/roles/dind-host/templates/inventory_builder.sh.j2 b/contrib/dind/roles/dind-host/templates/inventory_builder.sh.j2
new file mode 100644
index 0000000000000000000000000000000000000000..48e17583e3723179602220027af3d02f90c0cd0e
--- /dev/null
+++ b/contrib/dind/roles/dind-host/templates/inventory_builder.sh.j2
@@ -0,0 +1,3 @@
+#!/bin/bash
+# NOTE: if you change HOST_PREFIX, you also need to edit ./hosts [containers] section
+HOST_PREFIX=kube-node python3 contrib/inventory_builder/inventory.py {% for ip in addresses %} {{ ip }} {% endfor %}
diff --git a/contrib/dind/run-test-distros.sh b/contrib/dind/run-test-distros.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e1c2c3ef7998a37463a02c5e7fb4494dcc0078e8
--- /dev/null
+++ b/contrib/dind/run-test-distros.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+# Q&D test'em all: creates full DIND kubespray deploys
+# for each distro, verifying it via netchecker.
+
+pass_or_fail() {
+    local rc="$?"
+    local msg="$*"
+    local date="$(date -Isec)"
+    [ $rc -eq 0 ] && echo "PASS: [$date] $msg" || echo "FAIL: [$date] $msg"
+}
+test_distro() {
+    local distro=${1:?}
+    ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=$distro
+    pass_or_fail "$distro: dind-nodes"
+    (cd ../..
+        INVENTORY_DIR=inventory/local-dind
+        mkdir -p ${INVENTORY_DIR}
+        rm -f ${INVENTORY_DIR}/hosts.ini
+        CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
+        ansible-playbook --become -e ansible_ssh_user=$distro -i \
+            ${INVENTORY_DIR}/hosts.ini cluster.yml \
+            --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=$distro
+        pass_or_fail "$distro: kubespray"
+    )
+    docker exec kube-node1 kubectl get pod --all-namespaces
+    pass_or_fail "$distro: kube-api"
+    let n=60
+    while ((n--)); do
+        docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check | grep successfully && break
+        sleep 2
+    done
+    [ $n -ge 0 ]
+    pass_or_fail "$distro: netcheck"
+}
+
+# Get all DISTROS from distro.yaml if $* unset (shame no yaml parsing, but nuff anyway)
+DISTROS="${*:-$(egrep -o '^  \w+' group_vars/all/distro.yaml|paste -s)}"
+NODES="$(egrep ^kube-node hosts|paste -s)"
+echo "DISTROS=${DISTROS}"
+for distro in ${DISTROS}; do
+    docker rm -f ${NODES}
+    { time test_distro ${distro} ;} |& tee test-${distro}.out
+    # sleeping for the sake of the human to verify if they want
+    sleep 2m
+done
+egrep '^(PASS:|FAIL:|real)' $(ls -tr test-*out)
diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
index f7d3e6023fd683860a4d2479f216b7b4def240b5..d15787e76ebdb3a254c869a62cc4e5e44529fcad 100644
--- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
+++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
@@ -8,6 +8,7 @@
     state: present
     create: yes
     backup: yes
+    unsafe_writes: yes
     marker: "# Ansible inventory hosts {mark}"
   when: populate_inventory_to_hosts_file
 
@@ -18,6 +19,7 @@
     line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name }}"
     state: present
     backup: yes
+    unsafe_writes: yes
   when:
     - loadbalancer_apiserver is defined
     - loadbalancer_apiserver.address is defined
@@ -51,4 +53,5 @@
     regexp: "^{{ item.key }}.*$"
     state: present
     backup: yes
+    unsafe_writes: yes
   with_dict: "{{ etc_hosts_localhosts_dict_target }}"