diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml
deleted file mode 100644
index 05d775f903e612a6c81ddd8d476db74374b52557..0000000000000000000000000000000000000000
--- a/inventory/sample/group_vars/all.yml
+++ /dev/null
@@ -1,155 +0,0 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-#Directory where etcd data stored
-etcd_data_dir: /var/lib/etcd
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-## The access_ip variable is used to define how other nodes should access
-## the node.  This is used in flannel to allow other flannel nodes to see
-## this node for example.  The access_ip is really useful AWS and Google
-## environments where the nodes are accessed remotely by the "public" ip,
-## but don't know about that address themselves.
-#access_ip: 1.1.1.1
-
-### ETCD: disable peer client cert authentication.
-# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
-#etcd_peer_client_auth: true
-
-## External LB example config
-## apiserver_loadbalancer_domain_name: "elb.some.domain"
-#loadbalancer_apiserver:
-#  address: 1.2.3.4
-#  port: 1234
-
-## Internal loadbalancers for apiservers
-#loadbalancer_apiserver_localhost: true
-
-## Local loadbalancer should use this port instead, if defined.
-## Defaults to kube_apiserver_port (6443)
-#nginx_kube_apiserver_port: 8443
-
-### OTHER OPTIONAL VARIABLES
-## For some things, kubelet needs to load kernel modules.  For example, dynamic kernel services are needed
-## for mounting persistent volumes into containers.  These may not be loaded by preinstall kubernetes
-## processes.  For example, ceph and rbd backed volumes.  Set to true to allow kubelet to load kernel
-## modules.
-#kubelet_load_modules: false
-
-## Internal network total size. This is the prefix of the
-## entire network. Must be unused in your environment.
-#kube_network_prefix: 18
-
-## With calico it is possible to distributed routes with border routers of the datacenter.
-## Warning : enabling router peering will disable calico's default behavior ('node mesh').
-## The subnets of each nodes will be distributed by the datacenter router
-#peer_with_router: false
-
-## Upstream dns servers used by dnsmasq
-#upstream_dns_servers:
-#  - 8.8.8.8
-#  - 8.8.4.4
-
-## There are some changes specific to the cloud providers
-## for instance we need to encapsulate packets with some network plugins
-## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
-## When openstack is used make sure to source in the openstack credentials
-## like you would do when using nova-client before starting the playbook.
-#cloud_provider:
-
-## When azure is used, you need to also set the following variables.
-## see docs/azure.md for details on how to get these values
-#azure_tenant_id:
-#azure_subscription_id:
-#azure_aad_client_id:
-#azure_aad_client_secret:
-#azure_resource_group:
-#azure_location:
-#azure_subnet_name:
-#azure_security_group_name:
-#azure_vnet_name:
-#azure_vnet_resource_group:
-#azure_route_table_name:
-
-## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
-#openstack_blockstorage_version: "v1/v2/auto (default)"
-## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
-#openstack_lbaas_enabled: True
-#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
-## To enable automatic floating ip provisioning, specify a subnet.
-#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
-## Override default LBaaS behavior
-#openstack_lbaas_use_octavia: False
-#openstack_lbaas_method: "ROUND_ROBIN"
-#openstack_lbaas_provider: "haproxy"
-#openstack_lbaas_create_monitor: "yes"
-#openstack_lbaas_monitor_delay: "1m"
-#openstack_lbaas_monitor_timeout: "30s"
-#openstack_lbaas_monitor_max_retries: "3"
-
-## When Oracle Cloud Infrastructure is used, set these variables
-#oci_private_key:
-#oci_region_id:
-#oci_tenancy_id:
-#oci_user_id:
-#oci_user_fingerprint:
-#oci_compartment_id:
-#oci_vnc_id:
-#oci_subnet1_id:
-#oci_subnet2_id:
-## Overide these default behaviors if you wish
-#oci_security_list_management: All
-# If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
-#oci_use_instance_principals: false
-#oci_cloud_controller_version: 0.5.0
-
-## Uncomment to enable experimental kubeadm deployment mode
-#kubeadm_enabled: false
-## Set these proxy values in order to update package manager and docker daemon to use proxies
-#http_proxy: ""
-#https_proxy: ""
-## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
-#no_proxy: ""
-
-## Uncomment this if you want to force overlay/overlay2 as docker storage driver
-## Please note that overlay2 is only supported on newer kernels
-#docker_storage_options: -s overlay2
-
-# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
-#docker_dns_servers_strict: false
-
-## Certificate Management
-## This setting determines whether certs are generated via scripts or whether a
-## cluster of Hashicorp's Vault is started to issue certificates (using etcd
-## as a backend). Options are "script" or "vault"
-#cert_management: script
-
-# Set to true to allow pre-checks to fail and continue deployment
-#ignore_assert_errors: false
-
-## Etcd auto compaction retention for mvcc key value store in hour
-#etcd_compaction_retention: 0
-
-## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
-#etcd_metrics: basic
-
-## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
-## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
-#etcd_memory_limit: "512M"
-
-## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
-## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
-## etcd documentation for more information.
-#etcd_quota_backend_bytes: "2G"
-
-# The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
-#kube_read_only_port: 10255
-
-# Does coreos need auto upgrade, default is true
-#coreos_auto_upgrade: true
-
-# Set true to download and cache container
-#download_container: true
-
diff --git a/inventory/sample/group_vars/all/all.yml b/inventory/sample/group_vars/all/all.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b066134d92c4121016033fdf31046b553c40be2a
--- /dev/null
+++ b/inventory/sample/group_vars/all/all.yml
@@ -0,0 +1,86 @@
+## Valid bootstrap options (required): ubuntu, coreos, centos, none
+## If the OS is not listed here, it means it doesn't require extra/bootstrap steps.
+## In example, python is not available on 'coreos' so it must be installed before
+## anything else. In the opposite, Debian has already all its dependencies fullfiled, then bootstrap_os should be set to `none`.
+bootstrap_os: none
+
+## Directory where etcd data stored
+etcd_data_dir: /var/lib/etcd
+
+## Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+## The access_ip variable is used to define how other nodes should access
+## the node.  This is used in flannel to allow other flannel nodes to see
+## this node for example.  The access_ip is really useful AWS and Google
+## environments where the nodes are accessed remotely by the "public" ip,
+## but don't know about that address themselves.
+#access_ip: 1.1.1.1
+
+
+## External LB example config
+## apiserver_loadbalancer_domain_name: "elb.some.domain"
+#loadbalancer_apiserver:
+#  address: 1.2.3.4
+#  port: 1234
+
+## Internal loadbalancers for apiservers
+#loadbalancer_apiserver_localhost: true
+
+## Local loadbalancer should use this port instead, if defined.
+## Defaults to kube_apiserver_port (6443)
+#nginx_kube_apiserver_port: 8443
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules.  For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers.  These may not be loaded by preinstall kubernetes
+## processes.  For example, ceph and rbd backed volumes.  Set to true to allow kubelet to load kernel
+## modules.
+#kubelet_load_modules: false
+
+## Internal network total size. This is the prefix of the
+## entire network. Must be unused in your environment.
+#kube_network_prefix: 18
+
+## With calico it is possible to distributed routes with border routers of the datacenter.
+## Warning : enabling router peering will disable calico's default behavior ('node mesh').
+## The subnets of each nodes will be distributed by the datacenter router
+#peer_with_router: false
+
+## Upstream dns servers used by dnsmasq
+#upstream_dns_servers:
+#  - 8.8.8.8
+#  - 8.8.4.4
+
+## There are some changes specific to the cloud providers
+## for instance we need to encapsulate packets with some network plugins
+## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
+## When openstack is used make sure to source in the openstack credentials
+## like you would do when using nova-client before starting the playbook.
+#cloud_provider:
+
+
+## Uncomment to enable experimental kubeadm deployment mode
+#kubeadm_enabled: false
+
+## Set these proxy values in order to update package manager and docker daemon to use proxies
+#http_proxy: ""
+#https_proxy: ""
+
+## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
+#no_proxy: ""
+
+## Certificate Management
+## This setting determines whether certs are generated via scripts or whether a
+## cluster of Hashicorp's Vault is started to issue certificates (using etcd
+## as a backend). Options are "script" or "vault"
+#cert_management: script
+
+## Set to true to allow pre-checks to fail and continue deployment
+#ignore_assert_errors: false
+
+## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
+#kube_read_only_port: 10255
+
+## Set true to download and cache container
+#download_container: true
diff --git a/inventory/sample/group_vars/all/azure.yml b/inventory/sample/group_vars/all/azure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..78d49c9b4d4857ed09c8e3b23bcc58d7337fcc7f
--- /dev/null
+++ b/inventory/sample/group_vars/all/azure.yml
@@ -0,0 +1,14 @@
+## When azure is used, you need to also set the following variables.
+## see docs/azure.md for details on how to get these values
+
+#azure_tenant_id:
+#azure_subscription_id:
+#azure_aad_client_id:
+#azure_aad_client_secret:
+#azure_resource_group:
+#azure_location:
+#azure_subnet_name:
+#azure_security_group_name:
+#azure_vnet_name:
+#azure_vnet_resource_group:
+#azure_route_table_name:
diff --git a/inventory/sample/group_vars/all/coreos.yml b/inventory/sample/group_vars/all/coreos.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a48f24ebbc3639f5d906e45393adb1bf170a655b
--- /dev/null
+++ b/inventory/sample/group_vars/all/coreos.yml
@@ -0,0 +1,2 @@
+## Does coreos need auto upgrade, default is true
+#coreos_auto_upgrade: true
diff --git a/inventory/sample/group_vars/all/docker.yml b/inventory/sample/group_vars/all/docker.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3fb169e33e85167356ba734816cfd9a644c937f4
--- /dev/null
+++ b/inventory/sample/group_vars/all/docker.yml
@@ -0,0 +1,35 @@
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+
+#docker_storage_options: -s overlay2
+
+## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+
+#docker_dns_servers_strict: false
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## Used to set docker daemon iptables options to true
+#docker_iptables_enabled: "true"
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+## An obvious use case is allowing insecure-registry access
+## to self hosted registries like so:
+docker_options: >-
+  --insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}
+  {%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
+  --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
+  --default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
+  --userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
+  {%- endif -%}
+
+docker_bin_dir: "/usr/bin"
+
+## If non-empty will override default system MounFlags value.
+## This option takes a mount propagation flag: shared, slave
+## or private, which control whether mounts in the file system
+## namespace set up for docker will receive or propagate mounts
+## and unmounts. Leave empty for system default
+docker_mount_flags:
diff --git a/inventory/sample/group_vars/all/oci.yml b/inventory/sample/group_vars/all/oci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fd83080dd80293b194bfebb1899089077c45a7d2
--- /dev/null
+++ b/inventory/sample/group_vars/all/oci.yml
@@ -0,0 +1,15 @@
+## When Oracle Cloud Infrastructure is used, set these variables
+#oci_private_key:
+#oci_region_id:
+#oci_tenancy_id:
+#oci_user_id:
+#oci_user_fingerprint:
+#oci_compartment_id:
+#oci_vnc_id:
+#oci_subnet1_id:
+#oci_subnet2_id:
+## Overide these default behaviors if you wish
+#oci_security_list_management: All
+# If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
+#oci_use_instance_principals: false
+#oci_cloud_controller_version: 0.5.0
diff --git a/inventory/sample/group_vars/all/openstack.yml b/inventory/sample/group_vars/all/openstack.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6347d0522f229bcb8e4be257f35be5b2e5b4f584
--- /dev/null
+++ b/inventory/sample/group_vars/all/openstack.yml
@@ -0,0 +1,15 @@
+## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
+#openstack_blockstorage_version: "v1/v2/auto (default)"
+## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
+#openstack_lbaas_enabled: True
+#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+## To enable automatic floating ip provisioning, specify a subnet.
+#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+## Override default LBaaS behavior
+#openstack_lbaas_use_octavia: False
+#openstack_lbaas_method: "ROUND_ROBIN"
+#openstack_lbaas_provider: "haproxy"
+#openstack_lbaas_create_monitor: "yes"
+#openstack_lbaas_monitor_delay: "1m"
+#openstack_lbaas_monitor_timeout: "30s"
+#openstack_lbaas_monitor_max_retries: "3"
diff --git a/inventory/sample/group_vars/etcd.yml b/inventory/sample/group_vars/etcd.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6f5347cb956c08b07f9aca0b312fa817a1a97f98
--- /dev/null
+++ b/inventory/sample/group_vars/etcd.yml
@@ -0,0 +1,18 @@
+## Etcd auto compaction retention for mvcc key value store in hour
+#etcd_compaction_retention: 0
+
+## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
+#etcd_metrics: basic
+
+## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
+## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
+#etcd_memory_limit: "512M"
+
+## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
+## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
+## etcd documentation for more information.
+#etcd_quota_backend_bytes: "2G"
+
+### ETCD: disable peer client cert authentication.
+# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
+#etcd_peer_client_auth: true
diff --git a/inventory/sample/group_vars/k8s-cluster/addons.yml b/inventory/sample/group_vars/k8s-cluster/addons.yml
new file mode 100644
index 0000000000000000000000000000000000000000..7c9057e715e2fb8047a48259eebbaa7142734a08
--- /dev/null
+++ b/inventory/sample/group_vars/k8s-cluster/addons.yml
@@ -0,0 +1,54 @@
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Monitoring apps for k8s
+efk_enabled: false
+
+# Helm deployment
+helm_enabled: false
+
+# Registry deployment
+registry_enabled: false
+# registry_namespace: "{{ system_namespace }}"
+# registry_storage_class: ""
+# registry_disk_size: "10Gi"
+
+# Local volume provisioner deployment
+local_volume_provisioner_enabled: false
+# local_volume_provisioner_namespace: "{{ system_namespace }}"
+# local_volume_provisioner_base_dir: /mnt/disks
+# local_volume_provisioner_mount_dir: /mnt/disks
+# local_volume_provisioner_storage_class: local-storage
+
+# CephFS provisioner deployment
+cephfs_provisioner_enabled: false
+# cephfs_provisioner_namespace: "cephfs-provisioner"
+# cephfs_provisioner_cluster: ceph
+# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
+# cephfs_provisioner_admin_id: admin
+# cephfs_provisioner_secret: secret
+# cephfs_provisioner_storage_class: cephfs
+# cephfs_provisioner_reclaim_policy: Delete
+# cephfs_provisioner_claim_root: /volumes
+# cephfs_provisioner_deterministic_names: true
+
+# Nginx ingress controller deployment
+ingress_nginx_enabled: false
+# ingress_nginx_host_network: false
+# ingress_nginx_nodeselector:
+#   node-role.kubernetes.io/master: "true"
+# ingress_nginx_namespace: "ingress-nginx"
+# ingress_nginx_insecure_port: 80
+# ingress_nginx_secure_port: 443
+# ingress_nginx_configmap:
+#   map-hash-bucket-size: "128"
+#   ssl-protocols: "SSLv2"
+# ingress_nginx_configmap_tcp_services:
+#   9000: "default/example-go:8080"
+# ingress_nginx_configmap_udp_services:
+#   53: "kube-system/kube-dns:53"
+
+# Cert manager deployment
+cert_manager_enabled: false
+# cert_manager_namespace: "cert-manager"
diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
similarity index 68%
rename from inventory/sample/group_vars/k8s-cluster.yml
rename to inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
index 749d71c42b471f37fde68c64f4ec5db803704f48..c11829d5246103253247210a08972b9a9e608d1f 100644
--- a/inventory/sample/group_vars/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
@@ -135,38 +135,11 @@ skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipad
 dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
 dns_domain: "{{ cluster_name }}"
 
-# Container runtime
-# docker for docker and crio for cri-o.
+## Container runtime
+## docker for docker and crio for cri-o.
 container_manager: docker
 
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## Used to set docker daemon iptables options to true
-#docker_iptables_enabled: "true"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-
-docker_options: >-
-  --insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}
-  {%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
-  --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
-  --default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
-  --userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
-  {%- endif -%}
-docker_bin_dir: "/usr/bin"
-
-## If non-empty will override default system MounFlags value.
-## This option takes a mount propagation flag: shared, slave
-## or private, which control whether mounts in the file system
-## namespace set up for docker will receive or propagate mounts
-## and unmounts. Leave empty for system default
-docker_mount_flags:
-
-# Settings for containerized control plane (etcd/kubelet/secrets)
+## Settings for containerized control plane (etcd/kubelet/secrets)
 etcd_deployment_type: docker
 kubelet_deployment_type: host
 vault_deployment_type: docker
@@ -181,64 +154,6 @@ kubernetes_audit: false
 # pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
 podsecuritypolicy_enabled: false
 
-# Kubernetes dashboard
-# RBAC required. see docs/getting-started.md for access details.
-dashboard_enabled: true
-
-# Monitoring apps for k8s
-efk_enabled: false
-
-# Helm deployment
-helm_enabled: false
-
-# Registry deployment
-registry_enabled: false
-# registry_namespace: "{{ system_namespace }}"
-# registry_storage_class: ""
-# registry_disk_size: "10Gi"
-
-# Local volume provisioner deployment
-local_volume_provisioner_enabled: false
-# local_volume_provisioner_namespace: "{{ system_namespace }}"
-# local_volume_provisioner_base_dir: /mnt/disks
-# local_volume_provisioner_mount_dir: /mnt/disks
-# local_volume_provisioner_storage_class: local-storage
-
-# CephFS provisioner deployment
-cephfs_provisioner_enabled: false
-# cephfs_provisioner_namespace: "cephfs-provisioner"
-# cephfs_provisioner_cluster: ceph
-# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
-# cephfs_provisioner_admin_id: admin
-# cephfs_provisioner_secret: secret
-# cephfs_provisioner_storage_class: cephfs
-# cephfs_provisioner_reclaim_policy: Delete
-# cephfs_provisioner_claim_root: /volumes
-# cephfs_provisioner_deterministic_names: true
-
-# Nginx ingress controller deployment
-ingress_nginx_enabled: false
-# ingress_nginx_host_network: false
-# ingress_nginx_nodeselector:
-#   node-role.kubernetes.io/master: "true"
-# ingress_nginx_namespace: "ingress-nginx"
-# ingress_nginx_insecure_port: 80
-# ingress_nginx_secure_port: 443
-# ingress_nginx_configmap:
-#   map-hash-bucket-size: "128"
-#   ssl-protocols: "SSLv2"
-# ingress_nginx_configmap_tcp_services:
-#   9000: "default/example-go:8080"
-# ingress_nginx_configmap_udp_services:
-#   53: "kube-system/kube-dns:53"
-
-# Cert manager deployment
-cert_manager_enabled: false
-# cert_manager_namespace: "cert-manager"
-
-# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
-persistent_volumes_enabled: false
-
 # Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
 # kubeconfig_localhost: false
 # Download kubectl onto the host that runs Ansible in {{ bin_dir }}
@@ -264,3 +179,6 @@ persistent_volumes_enabled: false
 ## See https://github.com/kubernetes-incubator/kubespray/issues/2141
 ## Set this variable to true to get rid of this issue
 volume_cross_zone_attachment: false
+
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
diff --git a/inventory/sample/hosts.ini b/inventory/sample/hosts.ini
index 3c324bed3e0ce02779b7c2d2d8e24df396d5e7d7..80c854d0a1773ee0ed790561c181d970151769dc 100644
--- a/inventory/sample/hosts.ini
+++ b/inventory/sample/hosts.ini
@@ -27,6 +27,6 @@
 # node5
 # node6
 
-# [k8s-cluster:children]
-# kube-master
-# kube-node
+[k8s-cluster:children]
+kube-master
+kube-node