diff --git a/docs/vsphere-csi.md b/docs/vsphere-csi.md
new file mode 100644
index 0000000000000000000000000000000000000000..15f3edf8ca71dc1191fc12f8402b0c5ec7d7e8f9
--- /dev/null
+++ b/docs/vsphere-csi.md
@@ -0,0 +1,90 @@
+# vSphere CSI Driver
+
+vSphere CSI driver allows you to provision volumes over a vSphere deployment. The Kubernetes historic in-tree cloud provider is deprecated and will be removed in future versions.
+
+To enable vSphere CSI driver, uncomment the `vsphere_csi_enabled` option in `group_vars/all/vsphere.yml` and set it to `true`.
+
+To set the number of replicas for the vSphere CSI controller, you can change `vsphere_csi_controller_replicas` option in `group_vars/all/vsphere.yml`.
+
+You need to source the vSphere credentials you use to deploy your machines that will host Kubernetes.
+
+| Variable                                    | Required | Type    | Choices                    | Default                   | Comment                                                        |
+|---------------------------------------------|----------|---------|----------------------------|---------------------------|----------------------------------------------------------------|
+| external_vsphere_vcenter_ip                 | TRUE     | string  |                            |                           | IP/URL of the vCenter                                          |
+| external_vsphere_vcenter_port               | TRUE     | string  |                            | "443"                     | Port of the vCenter API                                        |
+| external_vsphere_insecure                   | TRUE     | string  | "true", "false"            | "true"                    | set to "true" if the host above uses a self-signed cert        |
+| external_vsphere_user                       | TRUE     | string  |                            |                           | User name for vCenter with required privileges                 |
+| external_vsphere_password                   | TRUE     | string  |                            |                           | Password for vCenter                                           |
+| external_vsphere_datacenter                 | TRUE     | string  |                            |                           | Datacenter name to use                                         |
+| external_vsphere_kubernetes_cluster_id      | TRUE     | string  |                            | "kubernetes-cluster-id"   | Kubernetes cluster ID to use                                   |
+| vsphere_cloud_controller_image_tag          | TRUE     | string  |                            | "latest"                  | Kubernetes cluster ID to use                                   |
+| vsphere_syncer_image_tag                    | TRUE     | string  |                            | "v1.0.2"                  | Syncer image tag to use                                        |
+| vsphere_csi_attacher_image_tag              | TRUE     | string  |                            | "v1.1.1"                  | CSI attacher image tag to use                                  |
+| vsphere_csi_controller                      | TRUE     | string  |                            | "v1.0.2"                  | CSI controller image tag to use                                |
+| vsphere_csi_controller_replicas             | TRUE     | integer |                            | 1                         | Number of pods Kubernetes should deploy for the CSI controller |
+| vsphere_csi_liveness_probe_image_tag        | TRUE     | string  |                            | "v1.1.0"                  | CSI liveness probe image tag to use                            |
+| vsphere_csi_provisioner_image_tag           | TRUE     | string  |                            | "v1.2.2"                  | CSI provisioner image tag to use                               |
+| vsphere_csi_node_driver_registrar_image_tag | TRUE     | string  |                            | "v1.1.0"                  | CSI node driver registrat image tag to use                     |
+| vsphere_csi_driver_image_tag                | TRUE     | string  |                            | "v1.0.2"                  | CSI driver image tag to use                                    |
+
+## Usage example
+
+To test the dynamic provisioning using vSphere CSI driver, make sure to create a [storage policy](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#create-a-storage-policy) and [storage class](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#create-a-storageclass), then apply the following manifest:
+
+```yml
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: csi-pvc-vsphere
+spec:
+  accessModes:
+  - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+  storageClassName: Space-Efficient
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: nginx
+spec:
+  containers:
+  - image: nginx
+    imagePullPolicy: IfNotPresent
+    name: nginx
+    ports:
+    - containerPort: 80
+      protocol: TCP
+    volumeMounts:
+      - mountPath: /var/lib/www/html
+        name: csi-data-vsphere
+  volumes:
+  - name: csi-data-vsphere
+    persistentVolumeClaim:
+      claimName: csi-pvc-vsphere
+      readOnly: false
+```
+
+Apply this conf to your cluster: ```kubectl apply -f nginx.yml```
+
+You should see the PVC provisioned and bound:
+
+```ShellSession
+$ kubectl get pvc
+NAME              STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS      AGE
+csi-pvc-vsphere   Bound    pvc-dc7b1d21-ee41-45e1-98d9-e877cc1533ac   1Gi        RWO            Space-Efficient   10s
+```
+
+And the volume mounted to the Nginx Pod (wait until the Pod is Running):
+
+```ShellSession
+kubectl exec -it nginx -- df -h | grep /var/lib/www/html
+/dev/sdb         976M  2.6M  907M   1% /var/lib/www/html
+```
+
+## More info
+
+For further information about the vSphere CSI Driver, you can refer to the official [vSphere Cloud Provider documentation](https://cloud-provider-vsphere.sigs.k8s.io/container_storage_interface.html).
diff --git a/docs/vsphere.md b/docs/vsphere.md
index 55984c7f589f2d5779bd910fa3a51a0869a772bd..5881a38b33259f9c5fda53a8b5fd853fa81b9d3c 100644
--- a/docs/vsphere.md
+++ b/docs/vsphere.md
@@ -1,13 +1,75 @@
-# vSphere cloud provider
+# vSphere
 
-Kubespray can be deployed with vSphere as Cloud provider. This feature supports
+Kubespray can be deployed with vSphere as Cloud provider. This feature supports:
 
 - Volumes
 - Persistent Volumes
-- Storage Classes and provisioning of volumes.
-- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes.
+- Storage Classes and provisioning of volumes
+- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes
 
-## Prerequisites
+## Out-of-tree vSphere cloud provider
+
+### Prerequisites
+
+You need at first to configure your vSphere environment by following the [official documentation](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#prerequisites).
+
+After this step you should have:
+
+- vSphere upgraded to 6.7 U3 or later
+- VM hardware upgraded to version 15 or higher
+- UUID activated for each VM where Kubernetes will be deployed
+
+### Kubespray configuration
+
+First in `inventory/sample/group_vars/all.yml` you must set the cloud provider to `external` and external_cloud_provider to `external_cloud_provider`.
+
+```yml
+cloud_provider:  "external"
+external_cloud_provider: "vsphere"
+```
+
+Then, `inventory/sample/group_vars/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below.
+
+| Variable                               | Required | Type    | Choices                    | Default | Comment                                                                   |
+|----------------------------------------|----------|---------|----------------------------|---------|---------------------------------------------------------------------------|
+| external_vsphere_vcenter_ip            | TRUE     | string  |                            |                           | IP/URL of the vCenter                                   |
+| external_vsphere_vcenter_port          | TRUE     | string  |                            | "443"                     | Port of the vCenter API                                 |
+| external_vsphere_insecure              | TRUE     | string  | "true", "false"            | "true"                    | set to "true" if the host above uses a self-signed cert |
+| external_vsphere_user                  | TRUE     | string  |                            |                           | User name for vCenter with required privileges          |
+| external_vsphere_password              | TRUE     | string  |                            |                           | Password for vCenter                                    |
+| external_vsphere_datacenter            | TRUE     | string  |                            |                           | Datacenter name to use                                  |
+| external_vsphere_kubernetes_cluster_id | TRUE     | string  |                            | "kubernetes-cluster-id"   | Kubernetes cluster ID to use                            |
+| vsphere_csi_enabled                    | TRUE     | boolean |                            | false                     | Enable vSphere CSI                                      |
+
+Example configuration:
+
+```yml
+external_vsphere_vcenter_ip: "myvcenter.domain.com"
+external_vsphere_vcenter_port: "443"
+external_vsphere_insecure: "true"
+external_vsphere_user: "administrator@vsphere.local"
+external_vsphere_password: "K8s_admin"
+external_vsphere_datacenter: "DATACENTER_name"
+external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
+vsphere_csi_enabled: true
+```
+
+For a more fine-grained CSI setup, refer to the [vsphere-csi](vsphere-csi.md) documentation.
+
+### Deployment
+
+Once the configuration is set, you can execute the playbook again to apply the new configuration:
+
+```ShellSession
+cd kubespray
+ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml
+```
+
+You'll find some useful examples [here](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#sample-manifests-to-test-csi-driver-functionality) to test your configuration.
+
+## In-tree vSphere cloud provider ([deprecated](https://cloud-provider-vsphere.sigs.k8s.io/concepts/in_tree_vs_out_of_tree.html))
+
+### Prerequisites (deprecated)
 
 You need at first to configure your vSphere environment by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider).
 
@@ -18,7 +80,7 @@ After this step you should have:
 
 If you intend to leverage the [zone and region node labeling](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domain-beta-kubernetes-io-region), create a tag category for both the zone and region in vCenter.  The tags can then be applied at the host, cluster, datacenter, or folder level, and the cloud provider will walk the hierarchy to extract and apply the labels to the Kubernetes nodes.
 
-## Kubespray configuration
+### Kubespray configuration (deprecated)
 
 First you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`.
 
@@ -26,7 +88,7 @@ First you must define the cloud provider in `inventory/sample/group_vars/all.yml
 cloud_provider: vsphere
 ```
 
-Then, in the same file, you need to declare your vCenter credential following the description below.
+Then, in the same file, you need to declare your vCenter credentials following the description below.
 
 | Variable                     | Required | Type    | Choices                    | Default | Comment                                                                                                                                                                                       |
 |------------------------------|----------|---------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
@@ -45,7 +107,7 @@ Then, in the same file, you need to declare your vCenter credential following th
 | vsphere_zone_category        | FALSE    | string  |                            |         | Name of the tag category used to set the `failure-domain.beta.kubernetes.io/zone` label on nodes (Optional, only used for Kubernetes >= 1.12.0)                                                                                                                                                 |
 | vsphere_region_category      | FALSE    | string  |                            |         | Name of the tag category used to set the `failure-domain.beta.kubernetes.io/region` label on nodes (Optional, only used for Kubernetes >= 1.12.0)                                                                                                                                                 |
 
-Example configuration
+Example configuration:
 
 ```yml
 vsphere_vcenter_ip: "myvcenter.domain.com"
@@ -60,9 +122,9 @@ vsphere_scsi_controller_type: "pvscsi"
 vsphere_resource_pool: "K8s-Pool"
 ```
 
-## Deployment
+### Deployment (deprecated)
 
-Once the configuration is set, you can execute the playbook again to apply the new configuration
+Once the configuration is set, you can execute the playbook again to apply the new configuration:
 
 ```ShellSession
 cd kubespray
diff --git a/inventory/sample/group_vars/all/all.yml b/inventory/sample/group_vars/all/all.yml
index eb15b315c9376dc5f02b631788d3133aad6d3eb0..9e1ba0c6fd8dc58736e2976dbe795af05ad8f37f 100644
--- a/inventory/sample/group_vars/all/all.yml
+++ b/inventory/sample/group_vars/all/all.yml
@@ -54,8 +54,8 @@ loadbalancer_apiserver_healthcheck_port: 8081
 # cloud_provider:
 
 ## When cloud_provider is set to 'external', you can set the cloud controller to deploy
-## Supported cloud controllers are: 'openstack'
-## When openstack is used make sure to source in the openstack credentials
+## Supported cloud controllers are: 'openstack' and 'vsphere'
+## When openstack or vsphere are used make sure to source in the required fields
 # external_cloud_provider:
 
 ## Set these proxy values in order to update package manager and docker daemon to use proxies
diff --git a/inventory/sample/group_vars/all/vsphere.yml b/inventory/sample/group_vars/all/vsphere.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4ceae9fed4bc06cad1456712e241a700dec543de
--- /dev/null
+++ b/inventory/sample/group_vars/all/vsphere.yml
@@ -0,0 +1,20 @@
+## Values for the external vSphere Cloud Provider
+# external_vsphere_vcenter_ip: "myvcenter.domain.com"
+# external_vsphere_vcenter_port: "443"
+# external_vsphere_insecure: "true"
+# external_vsphere_user: "administrator@vsphere.local"
+# external_vsphere_password: "K8s_admin"
+# external_vsphere_datacenter: "DATACENTER_name"
+# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
+
+## Tags for the external vSphere Cloud Provider images
+# external_vsphere_cloud_controller_image_tag: "latest"
+# vsphere_syncer_image_tag: "v1.0.2"
+# vsphere_csi_attacher_image_tag: "v1.1.1"
+# vsphere_csi_controller: "v1.0.2"
+# vsphere_csi_liveness_probe_image_tag: "v1.1.0"
+# vsphere_csi_provisioner_image_tag: "v1.2.2"
+
+## To use vSphere CSI plugin to provision volumes set this value to true
+# vsphere_csi_enabled: true
+# vsphere_csi_controller_replicas: 1
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b5ce7f743443db1fcabeb65f39e07036e3317649
--- /dev/null
+++ b/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+external_vsphere_vcenter_port: "443"
+external_vsphere_insecure: "true"
+external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
+
+vsphere_syncer_image_tag: "v1.0.2"
+vsphere_csi_attacher_image_tag: "v1.1.1"
+vsphere_csi_controller: "v1.0.2"
+vsphere_csi_liveness_probe_image_tag: "v1.1.0"
+vsphere_csi_provisioner_image_tag: "v1.2.2"
+vsphere_csi_node_driver_registrar_image_tag: "v1.1.0"
+vsphere_csi_driver_image_tag: "v1.0.2"
+
+vsphere_csi_controller_replicas: 1
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8047d468dfc9e42be759b92448ed5155f8689f84
--- /dev/null
+++ b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
@@ -0,0 +1,44 @@
+---
+- include_tasks: vsphere-credentials-check.yml
+  tags: vsphere-csi-driver
+
+- name: vSphere CSI Driver | Generate CSI cloud-config
+  template:
+    src: "{{ item }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item }}"
+    mode: 0640
+  with_items:
+    - vsphere-csi-cloud-config
+  when: inventory_hostname == groups['kube-master'][0]
+  tags: vsphere-csi-driver
+
+- name: vSphere CSI Driver | Generate Manifests
+  template:
+    src: "{{ item }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item }}"
+  with_items:
+    - vsphere-csi-controller-rbac.yml
+    - vsphere-csi-controller-ss.yml
+    - vsphere-csi-node.yml
+  register: vsphere_csi_manifests
+  when: inventory_hostname == groups['kube-master'][0]
+  tags: vsphere-csi-driver
+
+- name: vSphere CSI Driver | Create a CSI secret
+  command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system"
+  when: inventory_hostname == groups['kube-master'][0]
+  tags: vsphere-csi-driver
+
+- name: vSphere CSI Driver | Apply Manifests
+  kube:
+    kubectl: "{{ bin_dir }}/kubectl"
+    filename: "{{ kube_config_dir }}/{{ item.item }}"
+    state: "latest"
+  with_items:
+    - "{{ vsphere_csi_manifests.results }}"
+  when:
+    - inventory_hostname == groups['kube-master'][0]
+    - not item is skipped
+  loop_control:
+    label: "{{ item.item }}"
+  tags: vsphere-csi-driver
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/tasks/vsphere-credentials-check.yml b/roles/kubernetes-apps/csi_driver/vsphere/tasks/vsphere-credentials-check.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3504f60c9c5151b990cac8a226bae6d69252ce58
--- /dev/null
+++ b/roles/kubernetes-apps/csi_driver/vsphere/tasks/vsphere-credentials-check.yml
@@ -0,0 +1,38 @@
+---
+- name: External vSphere Cloud Provider | check external_vsphere_vcenter_ip value
+  fail:
+    msg: "external_vsphere_vcenter_ip is missing"
+  when: external_vsphere_vcenter_ip is not defined or not external_vsphere_vcenter_ip
+
+- name: External vSphere Cloud Provider | check external_vsphere_vcenter_port value
+  fail:
+    msg: "external_vsphere_vcenter_port is missing"
+  when: external_vsphere_vcenter_port is not defined or not external_vsphere_vcenter_port
+
+- name: External vSphere Cloud Provider | check external_vsphere_insecure value
+  fail:
+    msg: "external_vsphere_insecure is missing"
+  when: external_vsphere_insecure is not defined or not external_vsphere_insecure
+
+- name: External vSphere Cloud Provider | check external_vsphere_user value
+  fail:
+    msg: "external_vsphere_user is missing"
+  when: external_vsphere_user is not defined or not external_vsphere_user
+
+- name: External vSphere Cloud Provider | check external_vsphere_password value
+  fail:
+    msg: "external_vsphere_password is missing"
+  when:
+    - external_vsphere_password is not defined or not external_vsphere_password
+
+- name: External vSphere Cloud Provider | check external_vsphere_datacenter value
+  fail:
+    msg: "external_vsphere_datacenter is missing"
+  when:
+    - external_vsphere_datacenter is not defined or not external_vsphere_datacenter
+
+- name: External vSphere Cloud Provider | check external_vsphere_kubernetes_cluster_id value
+  fail:
+    msg: "external_vsphere_kubernetes_cluster_id is missing"
+  when:
+    - external_vsphere_kubernetes_cluster_id is not defined or not external_vsphere_kubernetes_cluster_id
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-cloud-config.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-cloud-config.j2
new file mode 100644
index 0000000000000000000000000000000000000000..ee5033a21279a736bd2895053f81e00a6ddf9bb6
--- /dev/null
+++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-cloud-config.j2
@@ -0,0 +1,9 @@
+[Global]
+cluster-id = "{{ external_vsphere_kubernetes_cluster_id }}"
+
+[VirtualCenter "{{ external_vsphere_vcenter_ip }}"]
+insecure-flag = "{{ external_vsphere_insecure }}"
+user = "{{ external_vsphere_user }}"
+password = "{{ external_vsphere_password }}"
+port = "{{ external_vsphere_vcenter_port }}"
+datacenters = "{{ external_vsphere_datacenter }}"
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..a5decf3b277dc72351199f93adee61735cfe6eab
--- /dev/null
+++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2
@@ -0,0 +1,42 @@
+kind: ServiceAccount
+apiVersion: v1
+metadata:
+  name: vsphere-csi-controller
+  namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: vsphere-csi-controller-role
+rules:
+  - apiGroups: [""]
+    resources: ["nodes", "persistentvolumeclaims", "pods"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "update", "delete"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["get", "list", "watch", "create", "update", "patch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["csinodes"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: vsphere-csi-controller-binding
+subjects:
+  - kind: ServiceAccount
+    name: vsphere-csi-controller
+    namespace: kube-system
+roleRef:
+  kind: ClusterRole
+  name: vsphere-csi-controller-role
+  apiGroup: rbac.authorization.k8s.io
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-ss.yml.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-ss.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..a88a23bd507c434d528e81ca3a127357227faed6
--- /dev/null
+++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-ss.yml.j2
@@ -0,0 +1,128 @@
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+  name: vsphere-csi-controller
+  namespace: kube-system
+spec:
+  serviceName: vsphere-csi-controller
+  replicas: {{ vsphere_csi_controller_replicas }}
+  updateStrategy:
+    type: "RollingUpdate"
+  selector:
+    matchLabels:
+      app: vsphere-csi-controller
+  template:
+    metadata:
+      labels:
+        app: vsphere-csi-controller
+        role: vsphere-csi
+    spec:
+      serviceAccountName: vsphere-csi-controller
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+      tolerations:
+        - operator: "Exists"
+          key: node-role.kubernetes.io/master
+          effect: NoSchedule
+      dnsPolicy: "Default"
+      containers:
+        - name: csi-attacher
+          image: {{ quay_image_repo }}/k8scsi/csi-attacher:{{ vsphere_csi_attacher_image_tag }}
+          args:
+            - "--v=4"
+            - "--timeout=300s"
+            - "--csi-address=$(ADDRESS)"
+          env:
+            - name: ADDRESS
+              value: /csi/csi.sock
+          volumeMounts:
+            - mountPath: /csi
+              name: socket-dir
+        - name: vsphere-csi-controller
+          image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/driver:{{ vsphere_csi_controller }}
+          lifecycle:
+            preStop:
+              exec:
+                command: ["/bin/sh", "-c", "rm -rf /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com"]
+          args:
+            - "--v=4"
+          imagePullPolicy: "Always"
+          env:
+            - name: CSI_ENDPOINT
+              value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
+            - name: X_CSI_MODE
+              value: "controller"
+            - name: VSPHERE_CSI_CONFIG
+              value: "/etc/cloud/csi-vsphere.conf"
+          volumeMounts:
+            - mountPath: /etc/cloud
+              name: vsphere-config-volume
+              readOnly: true
+            - mountPath: /var/lib/csi/sockets/pluginproxy/
+              name: socket-dir
+          ports:
+            - name: healthz
+              containerPort: 9808
+              protocol: TCP
+          livenessProbe:
+            httpGet:
+              path: /healthz
+              port: healthz
+            initialDelaySeconds: 10
+            timeoutSeconds: 3
+            periodSeconds: 5
+            failureThreshold: 3
+        - name: liveness-probe
+          image: {{ quay_image_repo }}/k8scsi/livenessprobe:{{ vsphere_csi_liveness_probe_image_tag }}
+          args:
+            - "--csi-address=$(ADDRESS)"
+          env:
+            - name: ADDRESS
+              value: /var/lib/csi/sockets/pluginproxy/csi.sock
+          volumeMounts:
+            - mountPath: /var/lib/csi/sockets/pluginproxy/
+              name: socket-dir
+        - name: vsphere-syncer
+          image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/syncer:{{ vsphere_syncer_image_tag }}
+          args:
+            - "--v=2"
+          imagePullPolicy: "Always"
+          env:
+            - name: FULL_SYNC_INTERVAL_MINUTES
+              value: "30"
+            - name: VSPHERE_CSI_CONFIG
+              value: "/etc/cloud/csi-vsphere.conf"
+          volumeMounts:
+            - mountPath: /etc/cloud
+              name: vsphere-config-volume
+              readOnly: true
+        - name: csi-provisioner
+          image: {{ quay_image_repo }}/k8scsi/csi-provisioner:{{ vsphere_csi_provisioner_image_tag }}
+          args:
+            - "--v=4"
+            - "--timeout=300s"
+            - "--csi-address=$(ADDRESS)"
+            - "--feature-gates=Topology=true"
+            - "--strict-topology"
+          env:
+            - name: ADDRESS
+              value: /csi/csi.sock
+          volumeMounts:
+            - mountPath: /csi
+              name: socket-dir
+      volumes:
+        - name: vsphere-config-volume
+          secret:
+            secretName: vsphere-config-secret
+        - name: socket-dir
+          hostPath:
+            path: /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com
+            type: DirectoryOrCreate
+---
+apiVersion: storage.k8s.io/v1beta1
+kind: CSIDriver
+metadata:
+  name: csi.vsphere.vmware.com
+spec:
+  attachRequired: true
+  podInfoOnMount: false
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..24c39abfeaaa0947f30c3b6fd402ffce8ae4d390
--- /dev/null
+++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-node.yml.j2
@@ -0,0 +1,121 @@
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+  name: vsphere-csi-node
+  namespace: kube-system
+spec:
+  selector:
+    matchLabels:
+      app: vsphere-csi-node
+  updateStrategy:
+    type: "RollingUpdate"
+  template:
+    metadata:
+      labels:
+        app: vsphere-csi-node
+        role: vsphere-csi
+    spec:
+      dnsPolicy: "Default"
+      containers:
+        - name: node-driver-registrar
+          image: {{ quay_image_repo }}/k8scsi/csi-node-driver-registrar:{{ vsphere_csi_node_driver_registrar_image_tag }}
+          lifecycle:
+            preStop:
+              exec:
+                command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com-reg.sock"]
+          args:
+            - "--v=5"
+            - "--csi-address=$(ADDRESS)"
+            - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
+          env:
+            - name: ADDRESS
+              value: /csi/csi.sock
+            - name: DRIVER_REG_SOCK_PATH
+              value: /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com/csi.sock
+          securityContext:
+            privileged: true
+          volumeMounts:
+            - name: plugin-dir
+              mountPath: /csi
+            - name: registration-dir
+              mountPath: /registration
+        - name: vsphere-csi-node
+          image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/driver:{{ vsphere_csi_driver_image_tag }}
+          imagePullPolicy: "Always"
+          env:
+            - name: NODE_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+            - name: CSI_ENDPOINT
+              value: unix:///csi/csi.sock
+            - name: X_CSI_MODE
+              value: "node"
+            - name: X_CSI_SPEC_REQ_VALIDATION
+              value: "false"
+            # needed only for topology aware setups
+            #- name: VSPHERE_CSI_CONFIG
+            #  value: "/etc/cloud/csi-vsphere.conf" # here csi-vsphere.conf is the name of the file used for creating secret using "--from-file" flag
+          args:
+            - "--v=4"
+          securityContext:
+            privileged: true
+            capabilities:
+              add: ["SYS_ADMIN"]
+            allowPrivilegeEscalation: true
+          volumeMounts:
+            # needed only for topology aware setups
+            #- name: vsphere-config-volume
+            #  mountPath: /etc/cloud
+            #  readOnly: true
+            - name: plugin-dir
+              mountPath: /csi
+            - name: pods-mount-dir
+              mountPath: /var/lib/kubelet
+              # needed so that any mounts setup inside this container are
+              # propagated back to the host machine.
+              mountPropagation: "Bidirectional"
+            - name: device-dir
+              mountPath: /dev
+          ports:
+            - name: healthz
+              containerPort: 9808
+              protocol: TCP
+          livenessProbe:
+            httpGet:
+              path: /healthz
+              port: healthz
+            initialDelaySeconds: 10
+            timeoutSeconds: 3
+            periodSeconds: 5
+            failureThreshold: 3
+        - name: liveness-probe
+          image: {{ quay_image_repo }}/k8scsi/livenessprobe:{{ vsphere_csi_liveness_probe_image_tag }}
+          args:
+            - "--csi-address=$(ADDRESS)"
+          env:
+            - name: ADDRESS
+              value: /csi/csi.sock
+          volumeMounts:
+            - name: plugin-dir
+              mountPath: /csi
+      volumes:
+        # needed only for topology aware setups
+        #- name: vsphere-config-volume
+        #  secret:
+        #    secretName: vsphere-config-secret
+        - name: registration-dir
+          hostPath:
+            path: /var/lib/kubelet/plugins_registry
+            type: DirectoryOrCreate
+        - name: plugin-dir
+          hostPath:
+            path: /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com
+            type: DirectoryOrCreate
+        - name: pods-mount-dir
+          hostPath:
+            path: /var/lib/kubelet
+            type: Directory
+        - name: device-dir
+          hostPath:
+            path: /dev
diff --git a/roles/kubernetes-apps/external_cloud_controller/meta/main.yml b/roles/kubernetes-apps/external_cloud_controller/meta/main.yml
index 45eb9eb4c5ee8b4cc37a9712227070047440399c..b7d1cc6986ccfcf2130f09a68b341a0a1f79ee6d 100644
--- a/roles/kubernetes-apps/external_cloud_controller/meta/main.yml
+++ b/roles/kubernetes-apps/external_cloud_controller/meta/main.yml
@@ -10,3 +10,13 @@ dependencies:
     tags:
       - external-cloud-controller
       - external-openstack
+  - role: kubernetes-apps/external_cloud_controller/vsphere
+    when:
+      - cloud_provider is defined
+      - cloud_provider == "external"
+      - external_cloud_provider is defined
+      - external_cloud_provider == "vsphere"
+      - inventory_hostname == groups['kube-master'][0]
+    tags:
+      - external-cloud-controller
+      - external-vsphere
diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml b/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c4422f31bd5dc6d536185a3c0772cbee59262919
--- /dev/null
+++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+external_vsphere_vcenter_port: "443"
+external_vsphere_insecure: "true"
+
+external_vsphere_cloud_controller_image_tag: "latest"
diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml b/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6b49cfdf8c8ce02c4531e9896f053159d6584573
--- /dev/null
+++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml
@@ -0,0 +1,45 @@
+---
+- include_tasks: vsphere-credentials-check.yml
+  tags: external-vsphere
+
+- name: External vSphere Cloud Controller | Generate CPI cloud-config
+  template:
+    src: "{{ item }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item }}"
+    mode: 0640
+  with_items:
+    - external-vsphere-cpi-cloud-config
+  when: inventory_hostname == groups['kube-master'][0]
+  tags: external-vsphere
+
+- name: External vSphere Cloud Controller | Generate Manifests
+  template:
+    src: "{{ item }}.j2"
+    dest: "{{ kube_config_dir }}/{{ item }}"
+  with_items:
+    - external-vsphere-cpi-cloud-config-secret.yml
+    - external-vsphere-cloud-controller-manager-roles.yml
+    - external-vsphere-cloud-controller-manager-role-bindings.yml
+    - external-vsphere-cloud-controller-manager-ds.yml
+  register: external_vsphere_manifests
+  when: inventory_hostname == groups['kube-master'][0]
+  tags: external-vsphere
+
+- name: External vSphere Cloud Provider Interface | Create a CPI configMap
+  command: "{{ bin_dir }}/kubectl create configmap cloud-config --from-file=vsphere.conf={{ kube_config_dir }}/external-vsphere-cpi-cloud-config -n kube-system"
+  when: inventory_hostname == groups['kube-master'][0]
+  tags: external-vsphere
+
+- name: External vSphere Cloud Controller | Apply Manifests
+  kube:
+    kubectl: "{{ bin_dir }}/kubectl"
+    filename: "{{ kube_config_dir }}/{{ item.item }}"
+    state: "latest"
+  with_items:
+    - "{{ external_vsphere_manifests.results }}"
+  when:
+    - inventory_hostname == groups['kube-master'][0]
+    - not item is skipped
+  loop_control:
+    label: "{{ item.item }}"
+  tags: external-vsphere
diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/vsphere-credentials-check.yml b/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/vsphere-credentials-check.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b6c12b8bfce85251ebf24ec27d6494f48d0fd365
--- /dev/null
+++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/vsphere-credentials-check.yml
@@ -0,0 +1,32 @@
+---
+- name: External vSphere Cloud Provider | check external_vsphere_vcenter_ip value
+  fail:
+    msg: "external_vsphere_vcenter_ip is missing"
+  when: external_vsphere_vcenter_ip is not defined or not external_vsphere_vcenter_ip
+
+- name: External vSphere Cloud Provider | check external_vsphere_vcenter_port value
+  fail:
+    msg: "external_vsphere_vcenter_port is missing"
+  when: external_vsphere_vcenter_port is not defined or not external_vsphere_vcenter_port
+
+- name: External vSphere Cloud Provider | check external_vsphere_insecure value
+  fail:
+    msg: "external_vsphere_insecure is missing"
+  when: external_vsphere_insecure is not defined or not external_vsphere_insecure
+
+- name: External vSphere Cloud Provider | check external_vsphere_user value
+  fail:
+    msg: "external_vsphere_user is missing"
+  when: external_vsphere_user is not defined or not external_vsphere_user
+
+- name: External vSphere Cloud Provider | check external_vsphere_password value
+  fail:
+    msg: "external_vsphere_password is missing"
+  when:
+    - external_vsphere_password is not defined or not external_vsphere_password
+
+- name: External vSphere Cloud Provider | check external_vsphere_datacenter value
+  fail:
+    msg: "external_vsphere_datacenter is missing"
+  when:
+    - external_vsphere_datacenter is not defined or not external_vsphere_datacenter
diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-ds.yml.j2 b/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-ds.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..81120cec31db9f46d685202a9e3af9587c787bfe
--- /dev/null
+++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-ds.yml.j2
@@ -0,0 +1,71 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: cloud-controller-manager
+  namespace: kube-system
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: vsphere-cloud-controller-manager
+  namespace: kube-system
+  labels:
+    k8s-app: vsphere-cloud-controller-manager
+spec:
+  selector:
+    matchLabels:
+      k8s-app: vsphere-cloud-controller-manager
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: vsphere-cloud-controller-manager
+    spec:
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+      securityContext:
+        runAsUser: 0
+      tolerations:
+      - key: node.cloudprovider.kubernetes.io/uninitialized
+        value: "true"
+        effect: NoSchedule
+      - key: node-role.kubernetes.io/master
+        effect: NoSchedule
+      serviceAccountName: cloud-controller-manager
+      containers:
+        - name: vsphere-cloud-controller-manager
+          image: {{ gcr_image_repo }}/cloud-provider-vsphere/cpi/release/manager:{{ external_vsphere_cloud_controller_image_tag }}
+          args:
+            - --v=2
+            - --cloud-provider=vsphere
+            - --cloud-config=/etc/cloud/vsphere.conf
+          volumeMounts:
+            - mountPath: /etc/cloud
+              name: vsphere-config-volume
+              readOnly: true
+          resources:
+            requests:
+              cpu: 200m
+      hostNetwork: true
+      volumes:
+      - name: vsphere-config-volume
+        configMap:
+          name: cloud-config
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    component: cloud-controller-manager
+  name: vsphere-cloud-controller-manager
+  namespace: kube-system
+spec:
+  type: NodePort
+  ports:
+    - port: 43001
+      protocol: TCP
+      targetPort: 43001
+  selector:
+    component: cloud-controller-manager
diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-role-bindings.yml.j2 b/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-role-bindings.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..9f6107d33505b2ee265872712d2e14fb7346fe38
--- /dev/null
+++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-role-bindings.yml.j2
@@ -0,0 +1,35 @@
+apiVersion: v1
+items:
+- apiVersion: rbac.authorization.k8s.io/v1
+  kind: RoleBinding
+  metadata:
+    name: servicecatalog.k8s.io:apiserver-authentication-reader
+    namespace: kube-system
+  roleRef:
+    apiGroup: rbac.authorization.k8s.io
+    kind: Role
+    name: extension-apiserver-authentication-reader
+  subjects:
+  - apiGroup: ""
+    kind: ServiceAccount
+    name: cloud-controller-manager
+    namespace: kube-system
+  - apiGroup: ""
+    kind: User
+    name: cloud-controller-manager
+- apiVersion: rbac.authorization.k8s.io/v1
+  kind: ClusterRoleBinding
+  metadata:
+    name: system:cloud-controller-manager
+  roleRef:
+    apiGroup: rbac.authorization.k8s.io
+    kind: ClusterRole
+    name: system:cloud-controller-manager
+  subjects:
+  - kind: ServiceAccount
+    name: cloud-controller-manager
+    namespace: kube-system
+  - kind: User
+    name: cloud-controller-manager
+kind: List
+metadata: {}
diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-roles.yml.j2 b/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-roles.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..2ad7dc1322471831db935da96d55aa9796749548
--- /dev/null
+++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cloud-controller-manager-roles.yml.j2
@@ -0,0 +1,75 @@
+apiVersion: v1
+items:
+- apiVersion: rbac.authorization.k8s.io/v1
+  kind: ClusterRole
+  metadata:
+    name: system:cloud-controller-manager
+  rules:
+  - apiGroups:
+    - ""
+    resources:
+    - events
+    verbs:
+    - create
+    - patch
+    - update
+  - apiGroups:
+    - ""
+    resources:
+    - nodes
+    verbs:
+    - '*'
+  - apiGroups:
+    - ""
+    resources:
+    - nodes/status
+    verbs:
+    - patch
+  - apiGroups:
+    - ""
+    resources:
+    - services
+    verbs:
+    - list
+    - patch
+    - update
+    - watch
+  - apiGroups:
+    - ""
+    resources:
+    - serviceaccounts
+    verbs:
+    - create
+    - get
+    - list
+    - watch
+    - update
+  - apiGroups:
+    - ""
+    resources:
+    - persistentvolumes
+    verbs:
+    - get
+    - list
+    - update
+    - watch
+  - apiGroups:
+    - ""
+    resources:
+    - endpoints
+    verbs:
+    - create
+    - get
+    - list
+    - watch
+    - update
+  - apiGroups:
+    - ""
+    resources:
+    - secrets
+    verbs:
+    - get
+    - list
+    - watch
+kind: List
+metadata: {}
diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config-secret.yml.j2 b/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config-secret.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..5364f4263b481be5c634a6bc7355bcd3500a0273
--- /dev/null
+++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config-secret.yml.j2
@@ -0,0 +1,11 @@
+# This YAML file contains secret objects,
+# which are necessary to run external vsphere cloud controller.
+
+apiVersion: v1
+kind: Secret
+metadata:
+  name: cpi-global-secret
+  namespace: kube-system
+stringData:
+  {{ external_vsphere_vcenter_ip }}.username: "{{ external_vsphere_user }}"
+  {{ external_vsphere_vcenter_ip }}.password: "{{ external_vsphere_password }}"
diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config.j2 b/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config.j2
new file mode 100644
index 0000000000000000000000000000000000000000..a32d876c5ef3494a987836d13e2f95135235d8f2
--- /dev/null
+++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config.j2
@@ -0,0 +1,8 @@
+[Global]
+port = "{{ external_vsphere_vcenter_port }}"
+insecure-flag = "{{ external_vsphere_insecure }}"
+secret-name = "cpi-global-secret"
+secret-namespace = "kube-system"
+
+[VirtualCenter "{{ external_vsphere_vcenter_ip }}"]
+datacenters = "{{ external_vsphere_datacenter }}"
diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml
index 34fd366a33b06d1f74b712c29f8a795bcd45f3b5..c57eba5708cf71c584ec94b1846eaeea00315aff 100644
--- a/roles/kubernetes-apps/meta/main.yml
+++ b/roles/kubernetes-apps/meta/main.yml
@@ -61,6 +61,14 @@ dependencies:
       - gcp-pd-csi-driver
       - csi-driver
 
+  - role: kubernetes-apps/csi_driver/vsphere
+    when:
+      - vsphere_csi_enabled
+    tags:
+      - apps
+      - vsphere-csi-driver
+      - csi-driver
+
   - role: kubernetes-apps/persistent_volumes
     when:
       - persistent_volumes_enabled
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index c6e21a9799b9e76891740c0eeafbedf45fe1eea1..053be50d82680bc4f1add92e0bb27177079d393d 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -310,6 +310,7 @@ cinder_csi_enabled: false
 aws_ebs_csi_enabled: false
 azure_csi_enabled: false
 gcp_pd_csi_enabled: false
+vsphere_csi_enabled: false
 persistent_volumes_enabled: false
 cephfs_provisioner_enabled: false
 rbd_provisioner_enabled: false