From 27a268df33858f43026c8939d5544b3bc2aca2c7 Mon Sep 17 00:00:00 2001
From: Lovro Seder <vrovro@gmail.com>
Date: Sat, 18 Apr 2020 01:23:36 +0200
Subject: [PATCH] Gather just the necessary facts (#5955)

* Gather just the necessary facts

* Move fact gathering to separate playbook.
---
 cluster.yml         | 14 ++++++++++++++
 docs/nodes.md       |  2 ++
 facts.yml           | 19 +++++++++++++++++++
 remove-node.yml     |  7 +++++--
 reset.yml           |  5 +++--
 scale.yml           |  5 +++++
 upgrade-cluster.yml | 15 +++++++++++++++
 7 files changed, 63 insertions(+), 4 deletions(-)
 create mode 100644 facts.yml

diff --git a/cluster.yml b/cluster.yml
index 6c6dd36c0..5573c6975 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -40,7 +40,11 @@
     - { role: kubespray-defaults }
     - { role: bootstrap-os, tags: bootstrap-os}
 
+- name: Gather facts
+  import_playbook: facts.yml
+
 - hosts: k8s-cluster:etcd
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -50,6 +54,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: etcd
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -61,6 +66,7 @@
       when: not etcd_kubeadm_enabled| default(false)
 
 - hosts: k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -72,6 +78,7 @@
       when: not etcd_kubeadm_enabled| default(false)
 
 - hosts: k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -79,6 +86,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: kube-master
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -87,6 +95,7 @@
     - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
 
 - hosts: k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -95,12 +104,14 @@
     - { role: kubernetes/node-label, tags: node-label }
 
 - hosts: calico-rr
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
     - { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
 
 - hosts: kube-master[0]
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -108,6 +119,7 @@
     - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
 
 - hosts: kube-master
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -118,6 +130,7 @@
     - { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
 
 - hosts: kube-master
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -125,6 +138,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
diff --git a/docs/nodes.md b/docs/nodes.md
index 8c79c974e..6eb987428 100644
--- a/docs/nodes.md
+++ b/docs/nodes.md
@@ -12,6 +12,8 @@ This should be the easiest.
 
 You can use `--limit=node1` to limit Kubespray to avoid disturbing other nodes in the cluster.
 
+Before using `--limit` run playbook `facts.yml` without the limit to refresh facts cache for all nodes.
+
 ### 3) Drain the node that will be removed
 
 ```sh
diff --git a/facts.yml b/facts.yml
new file mode 100644
index 000000000..9296da46c
--- /dev/null
+++ b/facts.yml
@@ -0,0 +1,19 @@
+---
+- name: Gather facts
+  hosts: k8s-cluster:etcd:calico-rr
+  gather_facts: False
+  tasks:
+    - name: Gather minimal facts
+      setup:
+        gather_subset: '!all'
+
+    - name: Gather necessary facts
+      setup:
+        gather_subset: '!all,!min,network,hardware'
+        filter: "{{ item }}"
+      loop:
+        - ansible_distribution_major_version
+        - ansible_default_ipv4
+        - ansible_all_ipv4_addresses
+        - ansible_memtotal_mb
+        - ansible_swaptotal_mb
diff --git a/remove-node.yml b/remove-node.yml
index c2f33b3b1..db6f90529 100644
--- a/remove-node.yml
+++ b/remove-node.yml
@@ -33,15 +33,18 @@
     - { role: kubespray-defaults }
     - { role: remove-node/pre-remove, tags: pre-remove }
 
+- name: Gather facts
+  import_playbook: facts.yml
+
 - hosts: "{{ node | default('kube-node') }}"
-  gather_facts: yes
+  gather_facts: no
   roles:
     - { role: kubespray-defaults }
     - { role: reset, tags: reset, when: reset_nodes|default(True) }
 
 # Currently cannot remove first master or etcd
 - hosts: "{{ node | default('kube-master[1:]:etcd[:1]') }}"
-  gather_facts: yes
+  gather_facts: no
   roles:
     - { role: kubespray-defaults }
     - { role: remove-node/post-remove, tags: post-remove }
diff --git a/reset.yml b/reset.yml
index 14cbf9400..21c92f789 100644
--- a/reset.yml
+++ b/reset.yml
@@ -19,10 +19,11 @@
     - { role: kubespray-defaults}
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
 
-- hosts: all
-  gather_facts: true
+- name: Gather facts
+  import_playbook: facts.yml
 
 - hosts: etcd:k8s-cluster:calico-rr
+  gather_facts: False
   vars_prompt:
     name: "reset_confirmation"
     prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
diff --git a/scale.yml b/scale.yml
index 08b9a0290..1dc7eb60c 100644
--- a/scale.yml
+++ b/scale.yml
@@ -41,8 +41,12 @@
     - { role: kubespray-defaults }
     - { role: bootstrap-os, tags: bootstrap-os}
 
+- name: Gather facts
+  import_playbook: facts.yml
+
 - name: Generate the etcd certificates beforehand
   hosts: etcd
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -58,6 +62,7 @@
 
 - name: Target only workers to get kubelet installed and checking in on any new nodes
   hosts: kube-node
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index a61c3f452..83c1502c3 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -44,8 +44,12 @@
     - { role: kubespray-defaults }
     - { role: bootstrap-os, tags: bootstrap-os}
 
+- name: Gather facts
+  import_playbook: facts.yml
+
 - name: Download images to ansible host cache via first kube-master node
   hosts: kube-master[0]
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost"}
@@ -55,6 +59,7 @@
 
 - name: Prepare nodes for upgrade
   hosts: k8s-cluster:etcd:calico-rr
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -64,6 +69,7 @@
 
 - name: Upgrade container engine on non-cluster nodes
   hosts: etcd:calico-rr:!k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles:
@@ -72,6 +78,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: etcd
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -83,6 +90,7 @@
       when: not etcd_kubeadm_enabled | default(false)
 
 - hosts: k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -94,6 +102,7 @@
       when: not etcd_kubeadm_enabled | default(false)
 
 - name: Handle upgrades to master components first to maintain backwards compat.
+  gather_facts: False
   hosts: kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: 1
@@ -112,6 +121,7 @@
 
 - name: Upgrade calico and external cloud provider on all masters and nodes
   hosts: kube-master:kube-node
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles:
@@ -123,6 +133,7 @@
 
 - name: Finally handle worker upgrades, based on given batch size
   hosts: kube-node:!kube-master
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles:
@@ -136,6 +147,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: kube-master[0]
+  gather_facts: False
   any_errors_fatal: true
   roles:
     - { role: kubespray-defaults }
@@ -143,6 +155,7 @@
     - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
 
 - hosts: calico-rr
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -150,6 +163,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: kube-master
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
@@ -157,6 +171,7 @@
   environment: "{{ proxy_env }}"
 
 - hosts: k8s-cluster
+  gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults }
-- 
GitLab