diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index c674c2e9936f2892fd1a4ddc9bf021cd89172317..5da1a97230b5335fc478bdd94535a8eb629ac30d 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -87,7 +87,7 @@ before_script:
       -e gce_credentials_file=${HOME}/.ssh/gce.json
       -e gce_project_id=${GCE_PROJECT_ID}
       -e gce_service_account_email=${GCE_ACCOUNT}
-      -e inventory_path=${PWD}/inventory/inventory.ini
+      -e inventory_path=${PWD}/inventory/sample/hosts.ini
       -e test_id=${TEST_ID}
       -e preemptible=$GCE_PREEMPTIBLE
 
@@ -104,7 +104,7 @@ before_script:
     # Create cluster
     - >
       ansible-playbook
-      -i inventory/inventory.ini
+      -i inventory/sample/hosts.ini
       -b --become-user=root
       --private-key=${HOME}/.ssh/id_rsa
       -u $SSH_USER
@@ -124,7 +124,7 @@ before_script:
       test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
       git checkout "${CI_BUILD_REF}";
       ansible-playbook
-      -i inventory/inventory.ini
+      -i inventory/sample/hosts.ini
       -b --become-user=root
       --private-key=${HOME}/.ssh/id_rsa
       -u $SSH_USER
@@ -141,20 +141,20 @@ before_script:
     # Tests Cases
     ## Test Master API
     - >
-      ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
+      ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
       -e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
 
     ## Ping the between 2 pod
-    - ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
+    - ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
 
     ## Advanced DNS checks
-    - ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
+    - ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
 
     ## Idempotency checks 1/5 (repeat deployment)
     - >
       if [ "${IDEMPOT_CHECK}" = "true" ]; then
       ansible-playbook
-      -i inventory/inventory.ini
+      -i inventory/sample/hosts.ini
       -b --become-user=root
       --private-key=${HOME}/.ssh/id_rsa
       -u $SSH_USER
@@ -171,7 +171,7 @@ before_script:
     - >
       if [ "${IDEMPOT_CHECK}" = "true" ]; then
       ansible-playbook
-      -i inventory/inventory.ini
+      -i inventory/sample/hosts.ini
       -b --become-user=root
       --private-key=${HOME}/.ssh/id_rsa
       -u $SSH_USER
@@ -186,7 +186,7 @@ before_script:
     - >
       if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
       ansible-playbook
-      -i inventory/inventory.ini
+      -i inventory/sample/hosts.ini
       -b --become-user=root
       --private-key=${HOME}/.ssh/id_rsa
       -u $SSH_USER
@@ -203,7 +203,7 @@ before_script:
     - >
       if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
       ansible-playbook
-      -i inventory/inventory.ini
+      -i inventory/sample/hosts.ini
       -b --become-user=root
       --private-key=${HOME}/.ssh/id_rsa
       -u $SSH_USER
@@ -219,7 +219,7 @@ before_script:
     ## Idempotency checks 5/5 (Advanced DNS checks)
     - >
       if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
-      ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
+      ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH}
       -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
       --limit "all:!fake_hosts"
       tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
@@ -227,13 +227,13 @@ before_script:
 
   after_script:
     - >
-      ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local  $LOG_LEVEL
+      ansible-playbook -i inventory/sample/hosts.ini tests/cloud_playbooks/delete-gce.yml -c local  $LOG_LEVEL
       -e @${CI_TEST_VARS}
       -e test_id=${TEST_ID}
       -e gce_project_id=${GCE_PROJECT_ID}
       -e gce_service_account_email=${GCE_ACCOUNT}
       -e gce_credentials_file=${HOME}/.ssh/gce.json
-      -e inventory_path=${PWD}/inventory/inventory.ini
+      -e inventory_path=${PWD}/inventory/sample/hosts.ini
 
 # Test matrix. Leave the comments for markup scripts.
 .coreos_calico_aio_variables: &coreos_calico_aio_variables
diff --git a/README.md b/README.md
index 7a42cec0e40ec144d2fee27723fa4c8eb3a249ed..f784fc2cfbdd9db287180547f71309d2c007e794 100644
--- a/README.md
+++ b/README.md
@@ -1,67 +1,89 @@
 ![Kubernetes Logo](https://s28.postimg.org/lf3q4ocpp/k8s.png)
 
-## Deploy a production ready kubernetes cluster
+Deploy a Production Ready Kubernetes Cluster
+============================================
 
-If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **#kubespray**.
+If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
 
-- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
-- **High available** cluster
-- **Composable** (Choice of the network plugin for instance)
-- Support most popular **Linux distributions**
-- **Continuous integration tests**
+-   Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
+-   **High available** cluster
+-   **Composable** (Choice of the network plugin for instance)
+-   Support most popular **Linux distributions**
+-   **Continuous integration tests**
 
+Quick Start
+-----------
 
 To deploy the cluster you can use :
 
-**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) <br>
-**vagrant** by simply running `vagrant up` (for tests purposes) <br>
-
-
-*  [Requirements](#requirements)
-*  [Kubespray vs ...](docs/comparisons.md)
-*  [Getting started](docs/getting-started.md)
-*  [Ansible inventory and tags](docs/ansible.md)
-*  [Integration with existing ansible repo](docs/integration.md)
-*  [Deployment data variables](docs/vars.md)
-*  [DNS stack](docs/dns-stack.md)
-*  [HA mode](docs/ha-mode.md)
-*  [Network plugins](#network-plugins)
-*  [Vagrant install](docs/vagrant.md)
-*  [CoreOS bootstrap](docs/coreos.md)
-*  [Debian Jessie setup](docs/debian.md)
-*  [Downloaded artifacts](docs/downloads.md)
-*  [Cloud providers](docs/cloud.md)
-*  [OpenStack](docs/openstack.md)
-*  [AWS](docs/aws.md)
-*  [Azure](docs/azure.md)
-*  [vSphere](docs/vsphere.md)
-*  [Large deployments](docs/large-deployments.md)
-*  [Upgrades basics](docs/upgrades.md)
-*  [Roadmap](docs/roadmap.md)
-
-Supported Linux distributions
-===============
-
-* **Container Linux by CoreOS**
-* **Debian** Jessie
-* **Ubuntu** 16.04
-* **CentOS/RHEL** 7
+### Ansible
+
+    # Copy ``inventory/sample`` as ``inventory/mycluster``
+    cp -rfp inventory/sample inventory/mycluster
+
+    # Update Ansible inventory file with inventory builder
+    declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
+    CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
+
+    # Review and change parameters under ``inventory/mycluster/group_vars``
+    cat inventory/mycluster/group_vars/all.yml
+    cat inventory/mycluster/group_vars/k8s-cluster.yml
+
+    # Deploy Kubespray with Ansible Playbook
+    ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml
+
+### Vagrant
+
+    # Simply running `vagrant up` (for tests purposes)
+    vagrant up
+
+Documents
+---------
+
+-   [Requirements](#requirements)
+-   [Kubespray vs ...](docs/comparisons.md)
+-   [Getting started](docs/getting-started.md)
+-   [Ansible inventory and tags](docs/ansible.md)
+-   [Integration with existing ansible repo](docs/integration.md)
+-   [Deployment data variables](docs/vars.md)
+-   [DNS stack](docs/dns-stack.md)
+-   [HA mode](docs/ha-mode.md)
+-   [Network plugins](#network-plugins)
+-   [Vagrant install](docs/vagrant.md)
+-   [CoreOS bootstrap](docs/coreos.md)
+-   [Debian Jessie setup](docs/debian.md)
+-   [Downloaded artifacts](docs/downloads.md)
+-   [Cloud providers](docs/cloud.md)
+-   [OpenStack](docs/openstack.md)
+-   [AWS](docs/aws.md)
+-   [Azure](docs/azure.md)
+-   [vSphere](docs/vsphere.md)
+-   [Large deployments](docs/large-deployments.md)
+-   [Upgrades basics](docs/upgrades.md)
+-   [Roadmap](docs/roadmap.md)
+
+Supported Linux Distributions
+-----------------------------
+
+-   **Container Linux by CoreOS**
+-   **Debian** Jessie
+-   **Ubuntu** 16.04
+-   **CentOS/RHEL** 7
 
 Note: Upstart/SysV init based OS types are not supported.
 
 Versions of supported components
 --------------------------------
 
-
-[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.2 <br>
-[etcd](https://github.com/coreos/etcd/releases) v3.2.4 <br>
-[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br>
-[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 <br>
-[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
-[contiv](https://github.com/contiv/install/releases) v1.0.3 <br>
-[weave](http://weave.works/) v2.0.1 <br>
-[docker](https://www.docker.com/) v1.13 (see note)<br>
-[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
+-   [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.2
+-   [etcd](https://github.com/coreos/etcd/releases) v3.2.4
+-   [flanneld](https://github.com/coreos/flannel/releases) v0.8.0
+-   [calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0
+-   [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
+-   [contiv](https://github.com/contiv/install/releases) v1.0.3
+-   [weave](http://weave.works/) v2.0.1
+-   [docker](https://www.docker.com/) v1.13 (see note)
+-   [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
 
 Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
 
@@ -71,54 +93,59 @@ plugins' related OS services. Also note, only one of the supported network
 plugins can be deployed for a given single cluster.
 
 Requirements
---------------
-
-* **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
-  that will run Ansible commands**
-* **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
-* The target servers must have **access to the Internet** in order to pull docker images.
-* The target servers are configured to allow **IPv4 forwarding**.
-* **Your ssh key must be copied** to all the servers part of your inventory.
-* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
-in order to avoid any issue during deployment you should disable your firewall.
+------------
 
+-   **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
+    that will run Ansible commands**
+-   **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
+-   The target servers must have **access to the Internet** in order to pull docker images.
+-   The target servers are configured to allow **IPv4 forwarding**.
+-   **Your ssh key must be copied** to all the servers part of your inventory.
+-   The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
+    in order to avoid any issue during deployment you should disable your firewall.
 
-## Network plugins
+Network Plugins
+---------------
 
 You can choose between 4 network plugins. (default: `calico`, except Vagrant uses `flannel`)
 
-* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
+-   [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
 
-* [**calico**](docs/calico.md): bgp (layer 3) networking.
+-   [calico](docs/calico.md): bgp (layer 3) networking.
 
-* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
+-   [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
 
-* [**contiv**](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
-  apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
+-   [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
+    apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
 
-* [**weave**](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
-(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
+-   [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
+    (Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
 
 The choice is defined with the variable `kube_network_plugin`. There is also an
 option to leverage built-in cloud provider networking instead.
 See also [Network checker](docs/netcheck.md).
 
-## Community docs and resources
- - [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
- - [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
- - [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
- - [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
+Community docs and resources
+----------------------------
+
+-   [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
+-   [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
+-   [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
+-   [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
+
+Tools and projects on top of Kubespray
+--------------------------------------
 
-## Tools and projects on top of Kubespray
- - [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
- - [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
- - [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
+-   [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
+-   [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
+-   [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
 
-## CI Tests
+CI Tests
+--------
 
 ![Gitlab Logo](https://s27.postimg.org/wmtaig1wz/gitlabci.png)
 
-[![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines) </br>
+[![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
 
 CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
 See the [test matrix](docs/test_cases.md) for details.
diff --git a/contrib/azurerm/README.md b/contrib/azurerm/README.md
index ac2548c85027bb653e205217ebc8ec7eda268797..c15d3ecf2532fc8e1f91b4ef5f76f165356c3e83 100644
--- a/contrib/azurerm/README.md
+++ b/contrib/azurerm/README.md
@@ -59,6 +59,6 @@ It will create the file ./inventory which can then be used with kubespray, e.g.:
 
 ```shell
 $ cd kubespray-root-dir
-$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
+$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all.yml" cluster.yml
 ```
 
diff --git a/contrib/network-storage/glusterfs/README.md b/contrib/network-storage/glusterfs/README.md
index d7aea26aafde13ba89975c968e4b1a4ddb012251..6c403eb5e29e963c19ee7105255ac9d003ca8dd1 100644
--- a/contrib/network-storage/glusterfs/README.md
+++ b/contrib/network-storage/glusterfs/README.md
@@ -6,16 +6,16 @@ You can either deploy using Ansible on its own by supplying your own inventory f
 
 In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
 
-Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
+Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
 
 ```
-ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
+ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
 ```
 
 This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
 
 ```
-ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
+ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
 ```
 
 If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
diff --git a/contrib/packaging/rpm/kubespray.spec b/contrib/packaging/rpm/kubespray.spec
index b1cf7f5dc1bbbc7d01ce3771c4a7590caefb1229..7080cf99857547e127b5a98af43f53ec6eda412b 100644
--- a/contrib/packaging/rpm/kubespray.spec
+++ b/contrib/packaging/rpm/kubespray.spec
@@ -47,10 +47,10 @@ export SKIP_PIP_INSTALL=1
 
 %files
 %doc %{_docdir}/%{name}/README.md
-%doc %{_docdir}/%{name}/inventory/inventory.example
+%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
 %config %{_sysconfdir}/%{name}/ansible.cfg
-%config %{_sysconfdir}/%{name}/inventory/group_vars/all.yml
-%config %{_sysconfdir}/%{name}/inventory/group_vars/k8s-cluster.yml
+%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
+%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
 %license %{_docdir}/%{name}/LICENSE
 %{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
 %{_datarootdir}/%{name}/roles/
diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md
index d839529496eb0c5f679b5b8a9fc3797bf89fe203..032f2c93e377e1e490036bed5283a37d3f564c92 100644
--- a/contrib/terraform/openstack/README.md
+++ b/contrib/terraform/openstack/README.md
@@ -200,7 +200,7 @@ if it fails try to connect manually via SSH ... it could be something as simple
 
 ## Configure Cluster variables
 
-Edit`inventory/group_vars/all.yml`:
+Edit `inventory/sample/group_vars/all.yml`:
 - Set variable **bootstrap_os** according selected image
 ```
 # Valid bootstrap options (required): ubuntu, coreos, centos, none
@@ -218,7 +218,7 @@ bin_dir: /opt/bin
 ```
 cloud_provider: openstack
 ```
-Edit`inventory/group_vars/k8s-cluster.yml`:
+Edit `inventory/sample/group_vars/k8s-cluster.yml`:
 - Set variable **kube_network_plugin** according selected networking
 ```
 # Choose network plugin (calico, weave or flannel)
diff --git a/docs/ansible.md b/docs/ansible.md
index feb345c4e769e643556011bcd73c86b7453fa2f9..5e17147bedcb15b75ec5c7950402458977f4e5e3 100644
--- a/docs/ansible.md
+++ b/docs/ansible.md
@@ -27,7 +27,7 @@ not _kube-node_.
 
 There are also two special groups:
 
-* **calico-rr**  : explained for [advanced Calico networking cases](calico.md)
+* **calico-rr** : explained for [advanced Calico networking cases](calico.md)
 * **bastion** : configure a bastion host if your nodes are not directly reachable
 
 Below is a complete inventory example:
@@ -66,10 +66,10 @@ kube-master
 Group vars and overriding variables precedence
 ----------------------------------------------
 
-The group variables to control main deployment options are located in the directory ``inventory/group_vars``.
-Optional variables are located in the `inventory/group_vars/all.yml`.
+The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``.
+Optional variables are located in the `inventory/sample/group_vars/all.yml`.
 Mandatory variables that are common for at least one role (or a node group) can be found in the
-`inventory/group_vars/k8s-cluster.yml`.
+`inventory/sample/group_vars/k8s-cluster.yml`.
 There are also role vars for docker, rkt, kubernetes preinstall and master roles.
 According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
 those cannot be overriden from the group vars. In order to override, one should use
@@ -153,16 +153,16 @@ Example command to filter and apply only DNS configuration tasks and skip
 everything else related to host OS configuration and downloading images of containers:
 
 ```
-ansible-playbook -i inventory/inventory.ini cluster.yml  --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
+ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
 ```
 And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
 ```
-ansible-playbook -i inventory/inventory.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
+ansible-playbook -i inventory/sample/hosts.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
 ```
 And this prepares all container images localy (at the ansible runner node) without installing
 or upgrading related stuff or trying to upload container to K8s cluster nodes:
 ```
-ansible-playbook -i inventory/inventory.ini cluster.yml \
+ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
     -e download_run_once=true -e download_localhost=true \
     --tags download --skip-tags upload,upgrade
 ```
diff --git a/docs/getting-started.md b/docs/getting-started.md
index cb809a13b4e6dd8f8b5a2547071824cde9036ec7..961d1a9cfd821aed2fde568c5f4ff69134023ce5 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -6,7 +6,7 @@ Building your own inventory
 
 Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
 an example inventory located
-[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/inventory.example).
+[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/sample/hosts.ini).
 
 You can use an
 [inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
@@ -19,9 +19,9 @@ certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` hel
 Example inventory generator usage:
 
 ```
-cp -r inventory my_inventory
+cp -r inventory/sample inventory/mycluster
 declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
-CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS[@]}
+CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
 ```
 
 Starting custom deployment
@@ -33,7 +33,7 @@ and start the deployment:
 **IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars**
 
 ```
-ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \
+ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
   --private-key=~/.ssh/private_key
 ```
 
@@ -47,7 +47,7 @@ You may want to add **worker** nodes to your existing cluster. This can be done
 - Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
 - Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
 ```
-ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \
+ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
   --private-key=~/.ssh/private_key
 ```
 
diff --git a/docs/upgrades.md b/docs/upgrades.md
index 6f1d913174a27c6ccc8374346fe7e4339adccd3f..6297976ddb5194fff154a81f270af8b9c8441a11 100644
--- a/docs/upgrades.md
+++ b/docs/upgrades.md
@@ -24,13 +24,13 @@ If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could
 deploy the following way:
 
 ```
-ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.3
+ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.3
 ```
 
 And then repeat with v1.4.6 as kube_version:
 
 ```
-ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6
+ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.6
 ```
 
 #### Graceful upgrade
@@ -44,7 +44,7 @@ deployed.
 ```
 git fetch origin
 git checkout origin/master
-ansible-playbook upgrade-cluster.yml -b -i inventory/inventory.cfg -e kube_version=v1.6.0
+ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.6.0
 ```
 
 After a successul upgrade, the Server Version should be updated:
diff --git a/docs/vsphere.md b/docs/vsphere.md
index 8f91cf078c57f04660e653294be37895b1ad503e..f61c93edd94cf9988e497c29b8715df62e2776ec 100644
--- a/docs/vsphere.md
+++ b/docs/vsphere.md
@@ -16,7 +16,7 @@ After this step you should have:
 
 ## Kubespray configuration
 
-Fist you must define the cloud provider in `inventory/group_vars/all.yml` and set it to `vsphere`.
+Fist you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`.
 ```yml
 cloud_provider: vsphere
 ```
@@ -58,7 +58,7 @@ vsphere_resource_pool: "K8s-Pool"
 Once the configuration is set, you can execute the playbook again to apply the new configuration
 ```
 cd kubespray
-ansible-playbook -i inventory/inventory.cfg -b -v cluster.yml
+ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml
 ```
 
 You'll find some usefull examples [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere) to test your configuration.
diff --git a/docs/weave.md b/docs/weave.md
index be68a4efd9364cd46f66326b34af5598ddb66b0e..9fcb14f80a2893cadef4319efe6b6d0f30886124 100644
--- a/docs/weave.md
+++ b/docs/weave.md
@@ -12,7 +12,7 @@ Weave encryption is supported for all communication
 * To use Weave encryption, specify a strong password (if no password, no encrytion)
 
 ```
-# In file ./inventory/group_vars/k8s-cluster.yml
+# In file ./inventory/sample/group_vars/k8s-cluster.yml
 weave_password: EnterPasswordHere
 ```
 
@@ -77,14 +77,14 @@ The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters depl
 * Switch from consensus mode to seed mode
 
 ```
-# In file ./inventory/group_vars/k8s-cluster.yml
+# In file ./inventory/sample/group_vars/k8s-cluster.yml
 weave_mode_seed: true
 ```
 
 These two variables are only used when `weave_mode_seed` is set to `true` (**/!\ do not manually change these values**)
 
 ```
-# In file ./inventory/group_vars/k8s-cluster.yml
+# In file ./inventory/sample/group_vars/k8s-cluster.yml
 weave_seed: uninitialized
 weave_peers: uninitialized
 ```
diff --git a/inventory/local/group_vars b/inventory/local/group_vars
new file mode 120000
index 0000000000000000000000000000000000000000..a30ba6832d07a2546ccbd2ae40aa9012864cc9e1
--- /dev/null
+++ b/inventory/local/group_vars
@@ -0,0 +1 @@
+../sample/group_vars
\ No newline at end of file
diff --git a/inventory/local-tests.cfg b/inventory/local/hosts.ini
similarity index 100%
rename from inventory/local-tests.cfg
rename to inventory/local/hosts.ini
diff --git a/inventory/group_vars/all.yml b/inventory/sample/group_vars/all.yml
similarity index 95%
rename from inventory/group_vars/all.yml
rename to inventory/sample/group_vars/all.yml
index 214026fe61b2ca2673e6b965f7140e0b5462cefe..29b14903d0adbe4aae91dcc39a8b3f27d9ea6a0a 100644
--- a/inventory/group_vars/all.yml
+++ b/inventory/sample/group_vars/all.yml
@@ -96,8 +96,8 @@ bin_dir: /usr/local/bin
 
 ## Uncomment to enable experimental kubeadm deployment mode
 #kubeadm_enabled: false
-#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6  chars=ascii_lowercase,digits') }}"
-#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
+#kubeadm_token_first: "{{ lookup('password', inventory_dir + '/credentials/kubeadm_token_first length=6  chars=ascii_lowercase,digits') }}"
+#kubeadm_token_second: "{{ lookup('password', inventory_dir + '/credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
 #kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
 #
 ## Set these proxy values in order to update package manager and docker daemon to use proxies
diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml
similarity index 98%
rename from inventory/group_vars/k8s-cluster.yml
rename to inventory/sample/group_vars/k8s-cluster.yml
index a6f6a119505d8becccdef19fb410d3f9cc0338dc..f3b4ec730362024d59102f1e6691c269889b81da 100644
--- a/inventory/group_vars/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s-cluster.yml
@@ -37,7 +37,7 @@ kube_log_level: 2
 
 # Users to create for basic auth in Kubernetes API via HTTP
 # Optionally add groups for user
-kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
 kube_users:
   kube:
     pass: "{{kube_api_pwd}}"
diff --git a/inventory/inventory.example b/inventory/sample/hosts.ini
similarity index 100%
rename from inventory/inventory.example
rename to inventory/sample/hosts.ini
diff --git a/roles/network_plugin/weave/tasks/seed.yml b/roles/network_plugin/weave/tasks/seed.yml
index 1bca07cd3765ac313e10a96fe012e282ccfb1e90..2765267e58fbbfe68c1df885666de8ebff9f8b0e 100644
--- a/roles/network_plugin/weave/tasks/seed.yml
+++ b/roles/network_plugin/weave/tasks/seed.yml
@@ -33,7 +33,7 @@
 
 - name: Weave seed | Save seed
   lineinfile:
-    dest: "./inventory/group_vars/k8s-cluster.yml"
+    dest: "{{ inventory_dir }}/group_vars/k8s-cluster.yml"
     state: present
     regexp: '^weave_seed:'
     line: 'weave_seed: {{ seed }}'
@@ -45,7 +45,7 @@
 
 - name: Weave seed | Save peers
   lineinfile:
-    dest: "./inventory/group_vars/k8s-cluster.yml"
+    dest: "{{ inventory_dir }}/group_vars/k8s-cluster.yml"
     state: present
     regexp: '^weave_peers:'
     line: 'weave_peers: {{ peers }}'
@@ -53,4 +53,4 @@
   delegate_to: 127.0.0.1
   run_once: true
   tags:
-    - confweave
\ No newline at end of file
+    - confweave
diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml
index 4bbb66b11a7ee342cecd89c0b6b518203320b916..00cd4b7d09670b845fc4feb3debb2442364fea69 100644
--- a/roles/vault/defaults/main.yml
+++ b/roles/vault/defaults/main.yml
@@ -115,7 +115,7 @@ vault_pki_mounts:
     roles:
       - name: vault
         group: vault
-        password: "{{ lookup('password', 'credentials/vault/vault length=15') }}"
+        password: "{{ lookup('password', inventory_dir + '/credentials/vault/vault length=15') }}"
         policy_rules: default
         role_options: default
   etcd:
@@ -127,7 +127,7 @@ vault_pki_mounts:
     roles:
       - name: etcd
         group: etcd
-        password: "{{ lookup('password', 'credentials/vault/etcd length=15') }}"
+        password: "{{ lookup('password', inventory_dir + '/credentials/vault/etcd length=15') }}"
         policy_rules: default
         role_options:
           allow_any_name: true
@@ -142,7 +142,7 @@ vault_pki_mounts:
     roles:
       - name: kube-master
         group: kube-master
-        password: "{{ lookup('password', 'credentials/vault/kube-master length=15') }}"
+        password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-master length=15') }}"
         policy_rules: default
         role_options:
           allow_any_name: true
@@ -150,7 +150,7 @@ vault_pki_mounts:
           organization: "system:masters"
       - name: kube-node
         group: k8s-cluster
-        password: "{{ lookup('password', 'credentials/vault/kube-node length=15') }}"
+        password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-node length=15') }}"
         policy_rules: default
         role_options:
           allow_any_name: true
@@ -158,7 +158,7 @@ vault_pki_mounts:
           organization: "system:nodes"
       - name: kube-proxy
         group: k8s-cluster
-        password: "{{ lookup('password', 'credentials/vault/kube-proxy length=15') }}"
+        password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy length=15') }}"
         policy_rules: default
         role_options:
           allow_any_name: true
diff --git a/setup.cfg b/setup.cfg
index e9ed882ece8958cf827ea46422f7536f67a87967..2327160ad35455e4d9362deab56f0c3b1405ce33 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -32,12 +32,12 @@ data_files =
         LICENSE
         README.md
     /usr/share/doc/kubespray/inventory/ =
-        inventory/inventory.example
+        inventory/sample/hosts.ini
     /etc/kubespray/ =
         ansible.cfg
-    /etc/kubespray/inventory/group_vars/ =
-        inventory/group_vars/all.yml
-        inventory/group_vars/k8s-cluster.yml
+    /etc/kubespray/inventory/sample/group_vars/ =
+        inventory/sample/group_vars/all.yml
+        inventory/sample/group_vars/k8s-cluster.yml
 
 [wheel]
 universal = 1
diff --git a/tests/support/aws.groovy b/tests/support/aws.groovy
index e49b3517b4fe185ea6e7a1b4054a50f02be20fb2..a5ce89b8f0973440cefb4059b5f18ad509ffc4c7 100644
--- a/tests/support/aws.groovy
+++ b/tests/support/aws.groovy
@@ -1,5 +1,5 @@
 def run(username, credentialsId, ami, network_plugin, aws_access, aws_secret) {
-      def inventory_path = pwd() + "/inventory/inventory-test.ini"
+      def inventory_path = pwd() + "/inventory/sample/hosts.ini"
       dir('tests') {
           wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) {
               try {
diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml
index 504023b595934abf76710e88cc36fa20b18ab04f..de5e3a84a46f90349cea7a62dd83ff8d7b41f14c 100644
--- a/tests/testcases/010_check-apiserver.yml
+++ b/tests/testcases/010_check-apiserver.yml
@@ -6,7 +6,7 @@
     uri:
       url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port }}/api/v1"
       user: kube
-      password: "{{ lookup('password', '../../credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+      password: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
       validate_certs: no
       status_code: 200,401
     when: not kubeadm_enabled|default(false)