From 2ec1c93897dadf2141c8a40595a6216d68b2516d Mon Sep 17 00:00:00 2001
From: Max Gautier <mg@max.gautier.name>
Date: Sat, 21 Sep 2024 14:09:09 +0200
Subject: [PATCH] Test group membership with group_names

Testing for group membership with group names makes Kubespray more
tolerant towards the structure of the inventory.
Where 'inventory_hostname in groups["some_group"] would fail if
"some_group" is not defined, '"some_group" in group_names' would not.
---
 roles/download/tasks/main.yml                      |  2 +-
 roles/etcd/tasks/check_certs.yml                   |  8 ++++----
 roles/etcd/tasks/gen_certs_script.yml              | 12 ++++++------
 roles/etcd/tasks/main.yml                          |  6 +++---
 .../csi_driver/cinder/tasks/main.yml               |  2 +-
 .../control-plane/tasks/kubeadm-setup.yml          |  2 +-
 .../control-plane/tasks/kubeadm-upgrade.yml        |  2 +-
 .../kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml |  2 +-
 roles/kubernetes/node/tasks/install.yml            |  2 +-
 roles/kubernetes/preinstall/handlers/main.yml      | 14 +++++++-------
 .../preinstall/tasks/0040-verify-settings.yml      |  6 +++---
 .../preinstall/tasks/0050-create_directories.yml   | 14 +++++++-------
 roles/kubernetes/tokens/tasks/gen_tokens.yml       |  2 +-
 roles/kubespray-defaults/defaults/main/main.yml    |  2 +-
 roles/network_plugin/calico/tasks/install.yml      | 12 ++++++------
 .../calico/tasks/peer_with_calico_rr.yml           |  2 +-
 .../calico/tasks/peer_with_router.yml              |  8 ++++----
 roles/network_plugin/cilium/tasks/install.yml      |  2 +-
 .../network_plugin/kube-router/tasks/annotate.yml  |  6 +++---
 roles/remove-node/post-remove/tasks/main.yml       |  2 +-
 roles/remove-node/remove-etcd-node/tasks/main.yml  | 10 +++++-----
 roles/reset/tasks/main.yml                         |  2 +-
 22 files changed, 60 insertions(+), 60 deletions(-)

diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml
index 3309ab88e..93f8bb55a 100644
--- a/roles/download/tasks/main.yml
+++ b/roles/download/tasks/main.yml
@@ -11,7 +11,7 @@
   include_tasks: prep_kubeadm_images.yml
   when:
     - not skip_downloads | default(false)
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
   tags:
     - download
     - upload
diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml
index 440685aa7..51ce00b21 100644
--- a/roles/etcd/tasks/check_certs.yml
+++ b/roles/etcd/tasks/check_certs.yml
@@ -21,7 +21,7 @@
     get_checksum: true
     get_mime: false
   register: etcd_member_certs
-  when: inventory_hostname in groups['etcd']
+  when: ('etcd' in group_names)
   with_items:
     - ca.pem
     - member-{{ inventory_hostname }}.pem
@@ -33,7 +33,7 @@
   stat:
     path: "{{ etcd_cert_dir }}/{{ item }}"
   register: etcd_node_certs
-  when: inventory_hostname in groups['k8s_cluster']
+  when: ('k8s_cluster' in group_names)
   with_items:
     - ca.pem
     - node-{{ inventory_hostname }}.pem
@@ -99,7 +99,7 @@
   set_fact:
     etcd_member_requires_sync: true
   when:
-    - inventory_hostname in groups['etcd']
+    - ('etcd' in group_names)
     - (not etcd_member_certs.results[0].stat.exists | default(false)) or
       (not etcd_member_certs.results[1].stat.exists | default(false)) or
       (not etcd_member_certs.results[2].stat.exists | default(false)) or
@@ -115,7 +115,7 @@
   set_fact:
     kubernetes_host_requires_sync: true
   when:
-    - inventory_hostname in groups['k8s_cluster'] and
+    - ('k8s_cluster' in group_names) and
       inventory_hostname not in groups['etcd']
     - (not etcd_node_certs.results[0].stat.exists | default(false)) or
       (not etcd_node_certs.results[1].stat.exists | default(false)) or
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index 934b5eb37..325d537b5 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -79,7 +79,7 @@
         {% endfor %}]"
   delegate_to: "{{ groups['etcd'][0] }}"
   when:
-    - inventory_hostname in groups['etcd']
+    - ('etcd' in group_names)
     - sync_certs | default(false)
     - inventory_hostname != groups['etcd'][0]
   notify: Set etcd_secret_changed
@@ -93,7 +93,7 @@
     mode: "0640"
   with_items: "{{ etcd_master_certs.results }}"
   when:
-    - inventory_hostname in groups['etcd']
+    - ('etcd' in group_names)
     - sync_certs | default(false)
     - inventory_hostname != groups['etcd'][0]
   loop_control:
@@ -110,7 +110,7 @@
         {% endfor %}]"
   delegate_to: "{{ groups['etcd'][0] }}"
   when:
-    - inventory_hostname in groups['etcd']
+    - ('etcd' in group_names)
     - inventory_hostname != groups['etcd'][0]
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
@@ -125,7 +125,7 @@
     mode: "0640"
   with_items: "{{ etcd_master_node_certs.results }}"
   when:
-    - inventory_hostname in groups['etcd']
+    - ('etcd' in group_names)
     - inventory_hostname != groups['etcd'][0]
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
@@ -135,7 +135,7 @@
 - name: Gen_certs | Generate etcd certs
   include_tasks: gen_nodes_certs_script.yml
   when:
-    - inventory_hostname in groups['kube_control_plane'] and
+    - ('kube_control_plane' in group_names) and
         sync_certs | default(false) and inventory_hostname not in groups['etcd']
 
 - name: Gen_certs | Generate etcd certs on nodes if needed
@@ -143,7 +143,7 @@
   when:
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
-    - inventory_hostname in groups['k8s_cluster'] and
+    - ('k8s_cluster' in group_names) and
         sync_certs | default(false) and inventory_hostname not in groups['etcd']
 
 - name: Gen_certs | check certificate permissions
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index b6e365884..5687264a5 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -25,7 +25,7 @@
   when:
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
   tags:
     - etcd-secrets
 
@@ -37,7 +37,7 @@
   when:
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
   tags:
     - master    # master tag is deprecated and replaced by control-plane
     - control-plane
@@ -49,7 +49,7 @@
   when:
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
   tags:
     - master    # master tag is deprecated and replaced by control-plane
     - control-plane
diff --git a/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml b/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml
index f2d1026e8..fe5ae0b63 100644
--- a/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml
@@ -9,7 +9,7 @@
   loop_control:
     loop_var: delegate_host_to_write_cacert
   when:
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
     - cinder_cacert is defined
     - cinder_cacert | length > 0
 
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
index 62fa2be5b..53babe9b1 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
@@ -243,5 +243,5 @@
   delegate_to: "{{ first_kube_control_plane }}"
   with_items:
     - "node-role.kubernetes.io/control-plane:NoSchedule-"
-  when: inventory_hostname in groups['kube_node']
+  when: ('kube_node' in group_names)
   failed_when: false
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
index 9609c2f3d..99d351e17 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
@@ -3,7 +3,7 @@
   uri:
     url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
     validate_certs: false
-  when: inventory_hostname in groups['kube_control_plane']
+  when: ('kube_control_plane' in group_names)
   register: _result
   retries: 60
   delay: 5
diff --git a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml
index 13420c0b9..3d0f4b3c0 100644
--- a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml
+++ b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml
@@ -51,7 +51,7 @@
   register: "etcd_client_cert_serial_result"
   changed_when: false
   when:
-    - inventory_hostname in groups['k8s_cluster'] | union(groups['calico_rr'] | default([])) | unique | sort
+    - group_names | intersect(['k8s_cluster', 'calico_rr']) | length > 0
   tags:
     - network
 
diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml
index a89ba94ef..cfa90ed17 100644
--- a/roles/kubernetes/node/tasks/install.yml
+++ b/roles/kubernetes/node/tasks/install.yml
@@ -8,7 +8,7 @@
   tags:
     - kubeadm
   when:
-    - not inventory_hostname in groups['kube_control_plane']
+    - not ('kube_control_plane' in group_names)
 
 - name: Install | Copy kubelet binary from download dir
   copy:
diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml
index cc69fe42c..34502d565 100644
--- a/roles/kubernetes/preinstall/handlers/main.yml
+++ b/roles/kubernetes/preinstall/handlers/main.yml
@@ -35,7 +35,7 @@
     get_checksum: false
     get_mime: false
   register: kube_apiserver_set
-  when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
+  when: ('kube_control_plane' in group_names) and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
   listen: Preinstall | propagate resolvconf to k8s components
 
 # FIXME(mattymo): Also restart for kubeadm mode
@@ -46,7 +46,7 @@
     get_checksum: false
     get_mime: false
   register: kube_controller_set
-  when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
+  when: ('kube_control_plane' in group_names) and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
   listen: Preinstall | propagate resolvconf to k8s components
 
 - name: Preinstall | restart kube-controller-manager docker
@@ -55,7 +55,7 @@
     executable: /bin/bash
   when:
     - container_manager == "docker"
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
     - kube_controller_set.stat.exists
@@ -71,7 +71,7 @@
   until: preinstall_restart_controller_manager.rc == 0
   when:
     - container_manager in ['crio', 'containerd']
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
     - kube_controller_set.stat.exists
@@ -83,7 +83,7 @@
     executable: /bin/bash
   when:
     - container_manager == "docker"
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
     - kube_apiserver_set.stat.exists
@@ -99,7 +99,7 @@
   delay: 1
   when:
     - container_manager in ['crio', 'containerd']
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
     - kube_apiserver_set.stat.exists
@@ -116,7 +116,7 @@
   delay: 1
   when:
     - dns_late
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
     - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos
diff --git a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
index 51dae8f9d..867cfb2ed 100644
--- a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
@@ -65,14 +65,14 @@
     that: ansible_memtotal_mb >= minimal_master_memory_mb
   when:
     - not ignore_assert_errors
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
 
 - name: Stop if memory is too small for nodes
   assert:
     that: ansible_memtotal_mb >= minimal_node_memory_mb
   when:
     - not ignore_assert_errors
-    - inventory_hostname in groups['kube_node']
+    - ('kube_node' in group_names)
 
 # This command will fail if cgroups are not enabled on the node.
 # For reference: https://kubernetes.io/docs/concepts/architecture/cgroups/#check-cgroup-version
@@ -92,7 +92,7 @@
     msg: "Do not schedule more pods on a node than inet addresses are available."
   when:
     - not ignore_assert_errors
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
     - kube_network_node_prefix is defined
     - kube_network_plugin != 'calico'
 
diff --git a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
index 80d873f89..7c4072c95 100644
--- a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
+++ b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
@@ -5,7 +5,7 @@
     state: directory
     owner: "{{ kube_owner }}"
     mode: "0755"
-  when: inventory_hostname in groups['k8s_cluster']
+  when: ('k8s_cluster' in group_names)
   become: true
   tags:
     - kubelet
@@ -30,7 +30,7 @@
     state: directory
     owner: root
     mode: "0755"
-  when: inventory_hostname in groups['k8s_cluster']
+  when: ('k8s_cluster' in group_names)
   become: true
   tags:
     - kubelet
@@ -55,7 +55,7 @@
     get_mime: false
   register: kube_cert_compat_dir_check
   when:
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
     - kube_cert_dir != kube_cert_compat_dir
 
 - name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498)
@@ -65,7 +65,7 @@
     state: link
     mode: "0755"
   when:
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
     - kube_cert_dir != kube_cert_compat_dir
     - not kube_cert_compat_dir_check.stat.exists
 
@@ -80,7 +80,7 @@
     - "/opt/cni/bin"
   when:
     - kube_network_plugin in ["calico", "weave", "flannel", "cilium", "kube-ovn", "kube-router", "macvlan"]
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
   tags:
     - network
     - cilium
@@ -100,7 +100,7 @@
     - "/var/lib/calico"
   when:
     - kube_network_plugin == "calico"
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
   tags:
     - network
     - calico
@@ -115,7 +115,7 @@
     mode: "{{ local_volume_provisioner_directory_mode }}"
   with_items: "{{ local_volume_provisioner_storage_classes.keys() | list }}"
   when:
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
     - local_volume_provisioner_enabled
   tags:
     - persistent_volumes
diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml
index c85886a80..67b45f9ae 100644
--- a/roles/kubernetes/tokens/tasks/gen_tokens.yml
+++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml
@@ -57,7 +57,7 @@
   args:
     executable: /bin/bash
   when:
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
     - sync_tokens | default(false)
     - inventory_hostname != groups['kube_control_plane'][0]
     - tokens_data.stdout
diff --git a/roles/kubespray-defaults/defaults/main/main.yml b/roles/kubespray-defaults/defaults/main/main.yml
index e2d0ec22e..71c88d817 100644
--- a/roles/kubespray-defaults/defaults/main/main.yml
+++ b/roles/kubespray-defaults/defaults/main/main.yml
@@ -273,7 +273,7 @@ kubelet_shutdown_grace_period: 60s
 kubelet_shutdown_grace_period_critical_pods: 20s
 
 # Whether to deploy the container engine
-deploy_container_engine: "{{ inventory_hostname in groups['k8s_cluster'] or etcd_deployment_type == 'docker' }}"
+deploy_container_engine: "{{ 'k8s_cluster' in group_names or etcd_deployment_type == 'docker' }}"
 
 # Container for runtime
 container_manager: containerd
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index 1d3b02339..2d80b110c 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -121,7 +121,7 @@
 
 - name: Calico | kdd specific configuration
   when:
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
     - calico_datastore == "kdd"
   block:
     - name: Calico | Check if extra directory is needed
@@ -321,7 +321,7 @@
     nodeToNodeMeshEnabled: "false"
   when:
     - peer_with_router | default(false) or peer_with_calico_rr | default(false)
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
   run_once: true
 
 - name: Calico | Configure Calico BGP
@@ -382,7 +382,7 @@
     - {name: kubernetes-services-endpoint, file: kubernetes-services-endpoint.yml, type: cm }
   register: calico_node_manifests
   when:
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
     - rbac_enabled or item.type not in rbac_resources
 
 - name: Calico | Create calico manifests for typha
@@ -394,7 +394,7 @@
     - {name: calico, file: calico-typha.yml, type: typha}
   register: calico_node_typha_manifest
   when:
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
     - typha_enabled
 
 - name: Calico | get calico apiserver caBundle
@@ -421,7 +421,7 @@
     - {name: calico, file: calico-apiserver.yml, type: calico-apiserver}
   register: calico_apiserver_manifest
   when:
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
     - calico_apiserver_enabled
 
 - name: Start Calico resources
@@ -473,7 +473,7 @@
   with_items:
     - {name: calico, file: calico-ipamconfig.yml, type: ipam}
   when:
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
     - calico_datastore == "kdd"
 
 - name: Calico | Create ipamconfig resources
diff --git a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml
index 9d216bd20..53b49c1c4 100644
--- a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml
+++ b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml
@@ -32,7 +32,7 @@
   when:
     - calico_rr_id is defined
     - calico_group_id is defined
-    - inventory_hostname in groups['calico_rr']
+    - ('calico_rr' in group_names)
 
 - name: Calico | Configure peering with route reflectors at global scope
   command:
diff --git a/roles/network_plugin/calico/tasks/peer_with_router.yml b/roles/network_plugin/calico/tasks/peer_with_router.yml
index 5c25f09ca..e9b4feced 100644
--- a/roles/network_plugin/calico/tasks/peer_with_router.yml
+++ b/roles/network_plugin/calico/tasks/peer_with_router.yml
@@ -28,7 +28,7 @@
     cmd: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }}"
   register: output_get_node
   when:
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
     - local_as is defined
     - groups['calico_rr'] | default([]) | length == 0
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
@@ -50,7 +50,7 @@
   until: output.rc == 0
   delay: "{{ retry_stagger | random + 3 }}"
   when:
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
     - local_as is defined
     - groups['calico_rr'] | default([]) | length == 0
     - output_get_node.rc == 0
@@ -77,7 +77,7 @@
   until: output.rc == 0
   delay: "{{ retry_stagger | random + 3 }}"
   when:
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
     - local_as is defined
     - groups['calico_rr'] | default([]) | length == 0
     - output_get_node.rc != 0
@@ -110,4 +110,4 @@
     - "{{ peers | default([]) | selectattr('scope', 'undefined') | list | union(peers | default([]) | selectattr('scope', 'defined') | selectattr('scope', 'equalto', 'node') | list ) }}"
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when:
-    - inventory_hostname in groups['k8s_cluster']
+    - ('k8s_cluster' in group_names)
diff --git a/roles/network_plugin/cilium/tasks/install.yml b/roles/network_plugin/cilium/tasks/install.yml
index 7da39644b..e6e7e31b4 100644
--- a/roles/network_plugin/cilium/tasks/install.yml
+++ b/roles/network_plugin/cilium/tasks/install.yml
@@ -59,7 +59,7 @@
     - {name: cilium, file: sa.yml, type: sa}
   register: cilium_node_manifests
   when:
-    - inventory_hostname in groups['kube_control_plane']
+    - ('kube_control_plane' in group_names)
     - item.when | default(True) | bool
 
 - name: Cilium | Create Cilium Hubble manifests
diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml
index 67d57a2d3..9cb7f6e7c 100644
--- a/roles/network_plugin/kube-router/tasks/annotate.yml
+++ b/roles/network_plugin/kube-router/tasks/annotate.yml
@@ -4,18 +4,18 @@
   with_items:
   - "{{ kube_router_annotations_master }}"
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
-  when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane']
+  when: kube_router_annotations_master is defined and 'kube_control_plane' in group_names
 
 - name: Kube-router | Add annotations on kube_node
   command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_node }}"
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
-  when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node']
+  when: kube_router_annotations_node is defined and 'kube_node' in group_names
 
 - name: Kube-router | Add common annotations on all servers
   command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_all }}"
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
-  when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s_cluster']
+  when: kube_router_annotations_all is defined and 'k8s_cluster' in group_names
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index 473e49f55..46c50c1c3 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -5,7 +5,7 @@
   when:
     - groups['kube_control_plane'] | length > 0
     # ignore servers that are not nodes
-    - inventory_hostname in groups['k8s_cluster'] and kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines
+    - ('k8s_cluster' in group_names) and kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines
   retries: "{{ delete_node_retries }}"
   # Sometimes the api-server can have a short window of indisponibility when we delete a control plane node
   delay: "{{ delete_node_delay_seconds }}"
diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml
index f0f0e17c9..eea48169a 100644
--- a/roles/remove-node/remove-etcd-node/tasks/main.yml
+++ b/roles/remove-node/remove-etcd-node/tasks/main.yml
@@ -6,7 +6,7 @@
   register: remove_node_ip
   when:
     - groups['kube_control_plane'] | length > 0
-    - inventory_hostname in groups['etcd']
+    - ('etcd' in group_names)
     - ip is not defined
     - access_ip is not defined
   delegate_to: "{{ groups['etcd'] | first }}"
@@ -16,14 +16,14 @@
   set_fact:
     node_ip: "{{ ip | default(access_ip | default(remove_node_ip.stdout)) | trim }}"
   when:
-    - inventory_hostname in groups['etcd']
+    - ('etcd' in group_names)
 
 - name: Make sure node_ip is set
   assert:
     that: node_ip is defined and node_ip | length > 0
     msg: "Etcd node ip is not set !"
   when:
-    - inventory_hostname in groups['etcd']
+    - ('etcd' in group_names)
 
 - name: Lookup etcd member id
   shell: "set -o pipefail && {{ bin_dir }}/etcdctl member list | grep -w {{ node_ip }} | cut -d, -f1"
@@ -42,7 +42,7 @@
     ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}"
     ETCDCTL_ENDPOINTS: "https://127.0.0.1:2379"
   delegate_to: "{{ groups['etcd'] | first }}"
-  when: inventory_hostname in groups['etcd']
+  when: ('etcd' in group_names)
 
 - name: Remove etcd member from cluster
   command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
@@ -54,5 +54,5 @@
     ETCDCTL_ENDPOINTS: "https://127.0.0.1:2379"
   delegate_to: "{{ groups['etcd'] | first }}"
   when:
-    - inventory_hostname in groups['etcd']
+    - ('etcd' in group_names)
     - etcd_member_id.stdout | length > 0
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 97bdbf597..b68a1777b 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -211,7 +211,7 @@
   command: "ipvsadm -C"
   ignore_errors: true  # noqa ignore-errors
   when:
-    - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster']
+    - kube_proxy_mode == 'ipvs' and 'k8s_cluster' in group_names
 
 - name: Reset | check kube-ipvs0 network device
   stat:
-- 
GitLab