diff --git a/docs/getting-started.md b/docs/getting-started.md
index 2402ac54fe4fa455a831afbc630d42f40d8f3101..90022996455028b4cd4975bd89a984eb399f4e47 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -51,11 +51,26 @@ Remove nodes
 
 You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
 
-- Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
-- Run the ansible-playbook command, substituting `remove-node.yml`:
+Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
+
+    ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
+        --private-key=~/.ssh/private_key
+
+
+We support two ways to select the nodes:
+
+- Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node you want to delete.
+```
+ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
+  --private-key=~/.ssh/private_key \
+  --extra-vars "node=nodename,nodename2"
+```
+or
+- Use `--limit nodename,nodename2` to select the node
 ```
 ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
-  --private-key=~/.ssh/private_key
+  --private-key=~/.ssh/private_key \
+  --limit nodename,nodename2"
 ```
 
 Connecting to Kubernetes
diff --git a/remove-node.yml b/remove-node.yml
index e39432f02d3962f9fa1c3835bccc8d070a2edf7a..0fae1a99425d97405abf9e38565f172cca2b4f90 100644
--- a/remove-node.yml
+++ b/remove-node.yml
@@ -5,7 +5,7 @@
     ansible_ssh_pipelining: true
   gather_facts: true
 
-- hosts: etcd:k8s-cluster:vault:calico-rr
+- hosts: "{{ node | default('etcd:k8s-cluster:vault:calico-rr') }}"
   vars_prompt:
     name: "delete_nodes_confirmation"
     prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
@@ -22,7 +22,7 @@
   roles:
     - { role: remove-node/pre-remove, tags: pre-remove }
 
-- hosts: kube-node
+- hosts: "{{ node | default('kube-node') }}"
   roles:
     - { role: kubespray-defaults }
     - { role: reset, tags: reset }
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index 30c75d1b412aa824976ca4cd5bf1bd7d23d00256..8017e85e26715e6717fe96a9eed7fd831700dd02 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -3,7 +3,7 @@
 - name: Delete node
   command: kubectl delete node {{ item }}
   with_items:
-    - "{{ groups['kube-node'] }}"
+    - "{{ node.split(',') | default(groups['kube-node']) }}"
   delegate_to: "{{ groups['kube-master']|first }}"
   run_once: true
   ignore_errors: yes
diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml
index 836309bbffc36c4fba77aa9a7e9afe38e6e6bda3..5db5fa13a89da55c3837a86f2759c9205a474039 100644
--- a/roles/remove-node/pre-remove/tasks/main.yml
+++ b/roles/remove-node/pre-remove/tasks/main.yml
@@ -9,7 +9,7 @@
       --timeout {{ drain_timeout }}
       --delete-local-data {{ item }}
   with_items:
-    - "{{ groups['kube-node'] }}"
+    - "{{ node.split(',') | default(groups['kube-node']) }}"
   failed_when: false
   delegate_to: "{{ groups['kube-master']|first }}"
   run_once: true