diff --git a/cluster.yml b/cluster.yml
index 6c6dd36c0d2d59daf005a85d3c90db655c28f18b..5573c69752c458ddf6b9e9502e06705029616f04 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -40,7 +40,11 @@
     - { role: kubespray-defaults }
     - { role: bootstrap-os, tags: bootstrap-os}
 
+- name: Gather facts
+  import_playbook: facts.yml
+
 - hosts: k8s-cluster:etcd
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -50,6 +54,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: etcd
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -61,6 +66,7 @@
       when: not etcd_kubeadm_enabled| default(false)
 
 - hosts: k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -72,6 +78,7 @@
       when: not etcd_kubeadm_enabled| default(false)
 
 - hosts: k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -79,6 +86,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: kube-master
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -87,6 +95,7 @@
     - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
 
 - hosts: k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -95,12 +104,14 @@
     - { role: kubernetes/node-label, tags: node-label }
 
 - hosts: calico-rr
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
     - { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
 
 - hosts: kube-master[0]
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -108,6 +119,7 @@
     - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
 
 - hosts: kube-master
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -118,6 +130,7 @@
     - { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
 
 - hosts: kube-master
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -125,6 +138,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
diff --git a/docs/nodes.md b/docs/nodes.md
index 8c79c974e74f7419dad2cbbb70a4a2594e8998ea..6eb987428c06b6746862cd8bbdbb916d02d46c34 100644
--- a/docs/nodes.md
+++ b/docs/nodes.md
@@ -12,6 +12,8 @@ This should be the easiest.
 
 You can use `--limit=node1` to limit Kubespray to avoid disturbing other nodes in the cluster.
 
+Before using `--limit` run playbook `facts.yml` without the limit to refresh facts cache for all nodes.
+
 ### 3) Drain the node that will be removed
 
 ```sh
diff --git a/facts.yml b/facts.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9296da46cfe39362d83958d38c78701adcb8c4f1
--- /dev/null
+++ b/facts.yml
@@ -0,0 +1,19 @@
+---
+- name: Gather facts
+  hosts: k8s-cluster:etcd:calico-rr
+  gather_facts: False
+  tasks:
+    - name: Gather minimal facts
+      setup:
+        gather_subset: '!all'
+
+    - name: Gather necessary facts
+      setup:
+        gather_subset: '!all,!min,network,hardware'
+        filter: "{{ item }}"
+      loop:
+        - ansible_distribution_major_version
+        - ansible_default_ipv4
+        - ansible_all_ipv4_addresses
+        - ansible_memtotal_mb
+        - ansible_swaptotal_mb
diff --git a/remove-node.yml b/remove-node.yml
index c2f33b3b116b5dd86a6e9c9f8e22df852fd1ebcd..db6f90529bfe3703b74040fdb08d2f955920e3bd 100644
--- a/remove-node.yml
+++ b/remove-node.yml
@@ -33,15 +33,18 @@
     - { role: kubespray-defaults }
     - { role: remove-node/pre-remove, tags: pre-remove }
 
+- name: Gather facts
+  import_playbook: facts.yml
+
 - hosts: "{{ node | default('kube-node') }}"
-  gather_facts: yes
+  gather_facts: no
   roles:
     - { role: kubespray-defaults }
     - { role: reset, tags: reset, when: reset_nodes|default(True) }
 
 # Currently cannot remove first master or etcd
 - hosts: "{{ node | default('kube-master[1:]:etcd[:1]') }}"
-  gather_facts: yes
+  gather_facts: no
   roles:
     - { role: kubespray-defaults }
     - { role: remove-node/post-remove, tags: post-remove }
diff --git a/reset.yml b/reset.yml
index 14cbf94008f26bf059005b73b68090fd7180c56b..21c92f7898c7d3e9eba068463ca8f07f7e10efb1 100644
--- a/reset.yml
+++ b/reset.yml
@@ -19,10 +19,11 @@
     - { role: kubespray-defaults}
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
 
-- hosts: all
-  gather_facts: true
+- name: Gather facts
+  import_playbook: facts.yml
 
 - hosts: etcd:k8s-cluster:calico-rr
+  gather_facts: False
   vars_prompt:
     name: "reset_confirmation"
     prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
diff --git a/scale.yml b/scale.yml
index 08b9a0290fc685314d9663ed1cc6abe46065f019..1dc7eb60cd94293395a9b0c5e39edce56d6b6c3e 100644
--- a/scale.yml
+++ b/scale.yml
@@ -41,8 +41,12 @@
     - { role: kubespray-defaults }
     - { role: bootstrap-os, tags: bootstrap-os}
 
+- name: Gather facts
+  import_playbook: facts.yml
+
 - name: Generate the etcd certificates beforehand
   hosts: etcd
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -58,6 +62,7 @@
 
 - name: Target only workers to get kubelet installed and checking in on any new nodes
   hosts: kube-node
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index a61c3f452c88dd176115fa505374cb51a565d3c9..83c1502c333908fc228b00d9c9dbaf2d4d2185d6 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -44,8 +44,12 @@
     - { role: kubespray-defaults }
     - { role: bootstrap-os, tags: bootstrap-os}
 
+- name: Gather facts
+  import_playbook: facts.yml
+
 - name: Download images to ansible host cache via first kube-master node
   hosts: kube-master[0]
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost"}
@@ -55,6 +59,7 @@
 
 - name: Prepare nodes for upgrade
   hosts: k8s-cluster:etcd:calico-rr
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -64,6 +69,7 @@
 
 - name: Upgrade container engine on non-cluster nodes
   hosts: etcd:calico-rr:!k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles:
@@ -72,6 +78,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: etcd
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -83,6 +90,7 @@
       when: not etcd_kubeadm_enabled | default(false)
 
 - hosts: k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -94,6 +102,7 @@
       when: not etcd_kubeadm_enabled | default(false)
 
 - name: Handle upgrades to master components first to maintain backwards compat.
+  gather_facts: False
   hosts: kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: 1
@@ -112,6 +121,7 @@
 
 - name: Upgrade calico and external cloud provider on all masters and nodes
   hosts: kube-master:kube-node
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles:
@@ -123,6 +133,7 @@
 
 - name: Finally handle worker upgrades, based on given batch size
   hosts: kube-node:!kube-master
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles:
@@ -136,6 +147,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: kube-master[0]
+  gather_facts: False
   any_errors_fatal: true
   roles:
     - { role: kubespray-defaults }
@@ -143,6 +155,7 @@
     - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
 
 - hosts: calico-rr
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -150,6 +163,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: kube-master
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -157,6 +171,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }