diff --git a/docs/local-storage-provisioner.md b/docs/local-storage-provisioner.md
new file mode 100644
index 0000000000000000000000000000000000000000..9895cc473d23bd2c557fb5dbe26d705107ee463b
--- /dev/null
+++ b/docs/local-storage-provisioner.md
@@ -0,0 +1,67 @@
+# Local Storage Provisioner
+
+The local storage provisioner is NOT a dynamic storage provisioner as you would
+expect from a cloud provider. Instead, it simply creates PersistentVolumes for
+all manually created volumes located in the directory `local_volume_base_dir`.
+The default path is /mnt/disks and the rest of this doc will use that path as
+an example.
+
+## Examples to create local storage volumes
+
+### tmpfs method:
+
+  ```
+  for vol in vol1 vol2 vol3; do
+    mkdir /mnt/disks/$vol
+    mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
+  done
+  ```
+
+The tmpfs method is not recommended for production because the mount is not
+persistent and data will be deleted on reboot.
+
+### Mount physical disks
+
+  ```
+  mkdir /mnt/disks/ssd1
+  mount /dev/vdb1 /mnt/disks/ssd1
+  ```
+
+Physical disks are recommended for production environments because it offers
+complete isolation in terms of I/O and capacity.
+
+### File-backed sparsefile method
+
+  ```
+  truncate /mnt/disks/disk5 --size 2G
+  mkfs.ext4 /mnt/disks/disk5
+  mkdir /mnt/disks/vol5
+  mount /mnt/disks/disk5 /mnt/disks/vol5
+  ```
+
+If you have a development environment and only one disk, this is the best way
+to limit the quota of persistent volumes.
+
+### Simple directories
+  ```
+  for vol in vol6 vol7 vol8; do
+    mkdir /mnt/disks/$vol
+  done
+  ```
+
+This is also acceptable in a development environment, but there is no capacity
+management.
+
+## Usage notes
+
+The volume provisioner cannot calculate volume sizes correctly, so you should
+delete the daemonset pod on the relevant host after creating volumes. The pod
+will be recreated and read the size correctly.
+
+Make sure to make any mounts persist via /etc/fstab or with systemd mounts (for
+CoreOS/Container Linux). Pods with persistent volume claims will not be
+able to start if the mounts become unavailable.
+
+## Further reading
+
+Refer to the upstream docs here: https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume
diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml
index 6473c8d32d66103ef84af29bebf69ce30d8c4885..09f736af006c490ca9f6bc4bb56787880eaf750a 100644
--- a/inventory/group_vars/k8s-cluster.yml
+++ b/inventory/group_vars/k8s-cluster.yml
@@ -151,9 +151,12 @@ efk_enabled: false
 # Helm deployment
 helm_enabled: false
 
-# Istio depoyment
+# Istio deployment
 istio_enabled: false
 
+# Local volume provisioner deployment
+local_volumes_enabled: false
+
 # Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
 # kubeconfig_localhost: false
 # Download kubectl onto the host that runs Ansible in GITDIR/artifacts
diff --git a/roles/kubernetes-apps/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/local_volume_provisioner/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b29c1584944062d2d65ded7c9053bbaf08aef0f2
--- /dev/null
+++ b/roles/kubernetes-apps/local_volume_provisioner/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+local_volume_provisioner_bootstrap_image_repo: quay.io/external_storage/local-volume-provisioner-bootstrap
+local_volume_provisioner_bootstrap_image_tag: v1.0.0
+
+local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
+local_volume_provisioner_image_tag: v1.0.0
diff --git a/roles/kubernetes-apps/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/local_volume_provisioner/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4e590d96485d17f1b651ccef0b702dc2f5fe3660
--- /dev/null
+++ b/roles/kubernetes-apps/local_volume_provisioner/tasks/main.yml
@@ -0,0 +1,42 @@
+---
+- name: Local Volume Provisioner | Ensure base dir is created on all hosts
+  file:
+    path: "{{ local_volume_base_dir }}"
+    ensure: directory
+    owner: root
+    group: root
+    mode: 0700
+  delegate_to: "{{ item }}"
+  with_items: "{{ groups['k8s-cluster'] }}"
+  failed_when: false
+
+- name: Local Volume Provisioner | Create addon dir
+  file:
+    path: "{{ kube_config_dir }}/addons/local_volume_provisioner"
+    owner: root
+    group: root
+    mode: 0755
+    recurse: true
+
+- name: Local Volume Provisioner | Create manifests
+  template:
+    src: "{{item.file}}.j2"
+    dest: "{{kube_config_dir}}/addons/local_volume_provisioner/{{item.file}}"
+  with_items:
+    - {name: local-storage-provisioner-pv-binding, file: provisioner-admin-account.yml, type: clusterrolebinding}
+    - {name: local-volume-config, file: volume-config.yml, type: configmap}
+    - {name: local-volume-provisioner, file: provisioner-ds.yml, type: daemonset}
+  register: local_volume_manifests
+  when: inventory_hostname == groups['kube-master'][0]
+
+
+- name: Local Volume Provisioner | Apply manifests
+  kube:
+    name: "{{item.item.name}}"
+    namespace: "{{ system_namespace }}"
+    kubectl: "{{bin_dir}}/kubectl"
+    resource: "{{item.item.type}}"
+    filename: "{{kube_config_dir}}/addons/local_volume_provisioner/{{item.item.file}}"
+    state: "latest"
+  with_items: "{{ local_volume_manifests.results }}"
+  when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/local_volume_provisioner/templates/provisioner-admin-account.yml.j2 b/roles/kubernetes-apps/local_volume_provisioner/templates/provisioner-admin-account.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..5c5c2eb51c3cfcc32ef9ff4dc4147d0ef3413eab
--- /dev/null
+++ b/roles/kubernetes-apps/local_volume_provisioner/templates/provisioner-admin-account.yml.j2
@@ -0,0 +1,34 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: local-storage-admin
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: local-storage-provisioner-pv-binding
+  namespace: default
+subjects:
+- kind: ServiceAccount
+  name: local-storage-admin
+  namespace: default
+roleRef:
+  kind: ClusterRole
+  name: system:persistent-volume-provisioner
+  apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: local-storage-provisioner-node-binding
+  namespace: default
+subjects:
+- kind: ServiceAccount
+  name: local-storage-admin
+  namespace: default
+roleRef:
+  kind: ClusterRole
+  name: system:node
+  apiGroup: rbac.authorization.k8s.io
+
diff --git a/roles/kubernetes-apps/local_volume_provisioner/templates/provisioner-ds.yml.j2 b/roles/kubernetes-apps/local_volume_provisioner/templates/provisioner-ds.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..302b17a62020f7ab71bc4d5ce3474e5b17270e37
--- /dev/null
+++ b/roles/kubernetes-apps/local_volume_provisioner/templates/provisioner-ds.yml.j2
@@ -0,0 +1,42 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  name: local-volume-provisioner
+  namespace: "{{ system_namespace }}"
+spec:
+  template:
+    metadata:
+      labels:
+        app: local-volume-provisioner
+    spec:
+      containers:
+      - name: provisioner
+        image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }}
+        imagePullPolicy: {{ k8s_image_pull_policy }}
+        securityContext:
+          privileged: true
+        volumeMounts:
+        - name: discovery-vol
+          mountPath: "/local-disks"
+        - name: local-volume-config
+          mountPath: /etc/provisioner/config/
+        env:
+        - name: MY_NODE_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
+        - name: MY_NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+      volumes:
+      - name: discovery-vol
+        hostPath:
+          path: "{{ local_volume_base_dir }}"
+      - configMap:
+          defaultMode: 420
+          name: local-volume-config
+        name: local-volume-config
+      serviceAccount: local-storage-admin
diff --git a/roles/kubernetes-apps/local_volume_provisioner/templates/volume-config.yml.j2 b/roles/kubernetes-apps/local_volume_provisioner/templates/volume-config.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..97a61fe5cbc0587a7caa9645acf5625f99db6926
--- /dev/null
+++ b/roles/kubernetes-apps/local_volume_provisioner/templates/volume-config.yml.j2
@@ -0,0 +1,12 @@
+# The config map is used to configure local volume discovery for Local SSDs on GCE and GKE. 
+# It is a map from storage class to its mount configuration.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: local-volume-config
+  namespace: {{ system_namespace }}
+data:
+  storageClassMap: |
+    local-storage:
+      hostDir: "{{ local_volume_base_dir }}"
+      mountDir: "/mnt/local-storage/"
diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml
index 13aa11e75309c772c57d4c15808c8e97cf40f2b7..1cd093b33cc5dcf03e23dcfa790deb2e43aa1635 100644
--- a/roles/kubernetes-apps/meta/main.yml
+++ b/roles/kubernetes-apps/meta/main.yml
@@ -20,6 +20,14 @@ dependencies:
     tags:
       - apps
       - helm
+  - role: kubernetes-apps/local_volume_provisioner
+    when: local_volumes_enabled
+    tags:
+      - apps
+      - local_volume_provisioner
+      - storage
+  # istio role should be last because it takes a long time to initialize and
+  # will cause timeouts trying to start other addons.
   - role: kubernetes-apps/istio
     when: istio_enabled
     tags:
diff --git a/roles/kubernetes/node/templates/kubelet-container.j2 b/roles/kubernetes/node/templates/kubelet-container.j2
index 94c7f79a5a2fef6090489f7f29d9b2dd0a40c26a..1f5212dca3efb9f8f4300d0bb120c4dcfd8cc3b6 100644
--- a/roles/kubernetes/node/templates/kubelet-container.j2
+++ b/roles/kubernetes/node/templates/kubelet-container.j2
@@ -26,6 +26,7 @@
   -v /var/run:/var/run:rw \
   -v {{kube_config_dir}}:{{kube_config_dir}}:ro \
   -v /etc/os-release:/etc/os-release:ro \
+  -v {{ local_volume_base_dir }}:{{ local_volume_base_dir }}:shared \
   {{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
   ./hyperkube kubelet \
   "$@"
diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
index db7a4845ce40b892af67b0df6ac603698ca5a6d4..f602319f2f57b4aa3a47005149249f4e4c47b4d6 100644
--- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
@@ -32,6 +32,7 @@ ExecStart=/usr/bin/rkt run \
         --volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
         --volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
         --volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \
+        --volume local-volume-base-dir,target {{ local_volume_base_dir }},readOnly=false,recursive=true \
         --mount volume=etc-cni,target=/etc/cni \
         --mount volume=opt-cni,target=/opt/cni \
         --mount volume=var-lib-cni,target=/var/lib/cni \
@@ -49,6 +50,7 @@ ExecStart=/usr/bin/rkt run \
         --mount volume=var-lib-kubelet,target=/var/lib/kubelet \
         --mount volume=var-log,target=/var/log \
         --mount volume=hosts,target=/etc/hosts \
+        --mount volume=local-volume-base-dir,target={{ local_volume_base_dir }} \
         --stage1-from-dir=stage1-fly.aci \
 {% if kube_hyperkube_image_repo == "docker" %}
         --insecure-options=image \
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 3e69c6ddc12e0dd20ef7438505851d7f927a9247..17d769ab8efb072630d8834863c6bbc5bddc241c 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -136,10 +136,16 @@ kubectl_localhost: false
 
 # K8s image pull policy (imagePullPolicy)
 k8s_image_pull_policy: IfNotPresent
+
+# Addons which can be enabled
 efk_enabled: false
 helm_enabled: false
 istio_enabled: false
 enable_network_policy: false
+local_volumes_enabled: false
+
+# Base path for local volume provisioner addon
+local_volume_base_dir: /mnt/disks
 
 ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
 # openstack_blockstorage_version: "v1/v2/auto (default)"
@@ -160,7 +166,7 @@ rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
 
 ## List of key=value pairs that describe feature gates for
 ## the k8s cluster.
-kube_feature_gates: ['Initializers=true']
+kube_feature_gates: ['Initializers=true', 'PersistentLocalVolumes={{ local_volumes_enabled|string }}']
 
 # Vault data dirs.
 vault_base_dir: /etc/vault
diff --git a/tests/files/centos7-flannel-addons.yml b/tests/files/centos7-flannel-addons.yml
index 70da8d13e61a4bae0e065a979c44944814bd6ffa..8824df4a13fa1c58a07f7daf774f8576d1933a1d 100644
--- a/tests/files/centos7-flannel-addons.yml
+++ b/tests/files/centos7-flannel-addons.yml
@@ -9,6 +9,7 @@ kube_network_plugin: flannel
 helm_enabled: true
 istio_enabled: true
 efk_enabled: true
+local_volumes_enabled: true
 deploy_netchecker: true
 kubedns_min_replicas: 1
 cloud_provider: gce