From 486b223e01960e66dc2f2177d739f8b7b8799485 Mon Sep 17 00:00:00 2001
From: Kenichi Omichi <ken1ohmichi@gmail.com>
Date: Tue, 23 Mar 2021 17:26:05 -0700
Subject: [PATCH] Replace kube-master with kube_control_plane (#7256)

This replaces kube-master with kube_control_plane because of [1]:

  The Kubernetes project is moving away from wording that is
  considered offensive. A new working group WG Naming was created
  to track this work, and the word "master" was declared as offensive.
  A proposal was formalized for replacing the word "master" with
  "control plane". This means it should be removed from source code,
  documentation, and user-facing configuration from Kubernetes and
  its sub-projects.

NOTE: The reason why this changes it to kube_control_plane not
      kube-control-plane is for valid group names on ansible.

[1]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md#motivation
---
 .gitlab-ci.yml                                |  2 +-
 .gitlab-ci/packet.yml                         |  4 +--
 Vagrantfile                                   |  4 +--
 cluster.yml                                   | 17 +++++++---
 .../aws_inventory/kubespray-aws-inventory.py  |  4 +--
 .../generate-inventory/templates/inventory.j2 |  6 ++--
 .../templates/inventory.j2                    |  6 ++--
 .../generate-templates/templates/masters.json |  2 +-
 contrib/inventory_builder/inventory.py        | 18 +++++-----
 .../inventory_builder/tests/test_inventory.py |  4 +--
 .../network-storage/glusterfs/glusterfs.yml   |  2 +-
 .../glusterfs/inventory.example               |  4 +--
 .../kubernetes-pv/ansible/tasks/main.yaml     |  4 +--
 .../heketi/heketi-tear-down.yml               |  2 +-
 contrib/network-storage/heketi/heketi.yml     |  2 +-
 .../heketi/inventory.yml.sample               |  2 +-
 contrib/terraform/aws/README.md               |  2 +-
 .../terraform/aws/create-infrastructure.tf    |  2 +-
 contrib/terraform/aws/modules/iam/main.tf     | 10 +++---
 contrib/terraform/aws/modules/iam/outputs.tf  |  4 +--
 contrib/terraform/aws/templates/inventory.tpl |  4 +--
 .../exoscale/templates/inventory.tpl          |  6 ++--
 contrib/terraform/gcp/generate-inventory.sh   |  6 ++--
 .../openstack/modules/compute/main.tf         |  8 ++---
 contrib/terraform/packet/kubespray.tf         |  4 +--
 .../terraform/upcloud/templates/inventory.tpl |  4 +--
 .../terraform/vsphere/templates/inventory.tpl |  4 +--
 .../vault-secrets/tasks/gen_certs_vault.yml   | 30 ++++++++--------
 .../tasks/sync_kube_master_certs.yml          |  8 ++---
 .../tasks/sync_kube_node_certs.yml            |  2 +-
 contrib/vault/roles/vault/defaults/main.yml   |  8 ++---
 .../roles/vault/tasks/bootstrap/main.yml      |  2 +-
 .../tasks/bootstrap/sync_vault_certs.yml      |  2 +-
 .../vault/roles/vault/tasks/cluster/main.yml  |  2 +-
 docs/ansible.md                               | 12 +++----
 docs/aws.md                                   |  6 ++--
 docs/calico.md                                |  6 ++--
 docs/downloads.md                             |  2 +-
 docs/getting-started.md                       | 10 +++---
 docs/ha-mode.md                               | 16 ++++-----
 docs/integration.md                           |  2 +-
 docs/large-deployments.md                     |  2 +-
 docs/nodes.md                                 | 12 +++----
 docs/recover-control-plane.md                 |  8 ++---
 docs/test_cases.md                            |  4 +--
 docs/upgrades.md                              |  2 +-
 docs/vars.md                                  |  2 +-
 .../migrate_openstack_provider.yml            |  4 +--
 extra_playbooks/upgrade-only-k8s.yml          |  4 +--
 inventory/local/hosts.ini                     |  4 +--
 inventory/sample/inventory.ini                |  4 +--
 recover-control-plane.yml                     | 13 +++++--
 remove-node.yml                               | 13 +++++--
 reset.yml                                     |  9 +++++
 .../containerd/molecule/default/molecule.yml  |  2 +-
 .../cri-o/molecule/default/molecule.yml       |  8 ++---
 .../molecule/default/molecule.yml             |  4 +--
 roles/download/defaults/main.yml              | 24 ++++++-------
 roles/download/tasks/main.yml                 |  2 +-
 .../ansible/tasks/cleanup_dns.yml             |  6 ++--
 .../kubernetes-apps/ansible/tasks/coredns.yml |  4 +--
 .../ansible/tasks/dashboard.yml               |  4 +--
 roles/kubernetes-apps/ansible/tasks/main.yml  | 10 +++---
 .../ansible/tasks/netchecker.yml              |  4 +--
 .../ansible/tasks/nodelocaldns.yml            |  4 +--
 .../cloud_controller/oci/tasks/main.yml       |  8 ++---
 .../cluster_roles/tasks/main.yml              | 26 +++++++-------
 .../cluster_roles/tasks/oci.yml               |  4 +--
 .../nvidia_gpu/tasks/main.yml                 |  4 +--
 .../container_runtimes/crun/tasks/main.yaml   |  4 +--
 .../kata_containers/tasks/main.yaml           |  4 +--
 .../csi_driver/aws_ebs/tasks/main.yml         |  4 +--
 .../csi_driver/azuredisk/tasks/main.yml       |  8 ++---
 .../csi_driver/cinder/tasks/main.yml          |  8 ++---
 .../csi_driver/csi_crd/tasks/main.yml         |  4 +--
 .../csi_driver/gcp_pd/tasks/main.yml          |  8 ++---
 .../csi_driver/vsphere/tasks/main.yml         | 10 +++---
 .../external_cloud_controller/meta/main.yml   |  4 +--
 .../openstack/tasks/main.yml                  |  8 ++---
 .../vsphere/tasks/main.yml                    | 10 +++---
 .../cephfs_provisioner/tasks/main.yml         | 12 +++----
 .../local_path_provisioner/tasks/main.yml     |  6 ++--
 .../local_volume_provisioner/tasks/main.yml   |  4 +--
 .../external_provisioner/meta/main.yml        |  2 +-
 .../rbd_provisioner/tasks/main.yml            | 12 +++----
 .../alb_ingress_controller/tasks/main.yml     |  4 +--
 .../ambassador/tasks/main.yml                 | 10 +++---
 .../cert_manager/tasks/main.yml               | 16 ++++-----
 .../ingress_nginx/tasks/main.yml              |  6 ++--
 roles/kubernetes-apps/meta/main.yml           | 18 +++++-----
 roles/kubernetes-apps/metallb/tasks/main.yml  | 14 ++++----
 .../metrics_server/tasks/main.yml             | 10 +++---
 .../network_plugin/canal/tasks/main.yml       |  2 +-
 .../network_plugin/cilium/tasks/main.yml      |  4 +--
 .../network_plugin/flannel/tasks/main.yml     |  2 +-
 .../network_plugin/kube-ovn/tasks/main.yml    |  2 +-
 .../network_plugin/kube-router/tasks/main.yml |  4 +--
 .../network_plugin/multus/tasks/main.yml      |  2 +-
 .../network_plugin/ovn4nfv/tasks/main.yml     |  2 +-
 .../network_plugin/weave/tasks/main.yml       |  4 +--
 .../aws-ebs-csi/tasks/main.yml                |  4 +--
 .../azuredisk-csi/tasks/main.yml              |  4 +--
 .../cinder-csi/tasks/main.yml                 |  4 +--
 .../gcp-pd-csi/tasks/main.yml                 |  4 +--
 .../openstack/tasks/main.yml                  |  4 +--
 .../policy_controller/calico/tasks/main.yml   |  4 +--
 roles/kubernetes-apps/registry/tasks/main.yml |  8 ++---
 .../snapshots/cinder-csi/tasks/main.yml       |  4 +--
 .../snapshot-controller/tasks/main.yml        |  4 +--
 .../control-plane/tasks/encrypt-at-rest.yml   |  2 +-
 .../control-plane/tasks/kubeadm-secondary.yml | 12 +++----
 .../control-plane/tasks/kubeadm-setup.yml     | 28 +++++++--------
 .../control-plane/tasks/kubeadm-upgrade.yml   |  6 ++--
 .../templates/k8s-certs-renew.timer.j2        |  2 +-
 .../templates/kubeadm-config.v1beta2.yaml.j2  |  2 +-
 .../kubeadm/tasks/kubeadm_etcd_node.yml       |  2 +-
 roles/kubernetes/kubeadm/tasks/main.yml       | 12 +++----
 roles/kubernetes/node-label/tasks/main.yml    |  4 +--
 roles/kubernetes/node/tasks/install.yml       |  2 +-
 .../templates/loadbalancer/haproxy.cfg.j2     |  2 +-
 .../node/templates/loadbalancer/nginx.conf.j2 |  2 +-
 roles/kubernetes/preinstall/handlers/main.yml | 12 +++----
 .../preinstall/tasks/0020-verify-settings.yml | 12 +++----
 .../kubernetes/tokens/tasks/check-tokens.yml  |  4 +--
 roles/kubernetes/tokens/tasks/gen_tokens.yml  | 16 ++++-----
 roles/kubespray-defaults/defaults/main.yaml   |  8 ++---
 roles/kubespray-defaults/tasks/no_proxy.yml   |  2 +-
 roles/network_plugin/calico/tasks/check.yml   |  2 +-
 roles/network_plugin/calico/tasks/install.yml | 34 +++++++++----------
 roles/network_plugin/calico/tasks/pre.yml     |  2 +-
 .../calico/templates/calicoctl.kdd.sh.j2      |  2 +-
 roles/network_plugin/canal/tasks/main.yml     |  2 +-
 roles/network_plugin/cilium/tasks/install.yml |  2 +-
 roles/network_plugin/cilium/tasks/main.yml    |  2 +-
 roles/network_plugin/flannel/tasks/main.yml   |  2 +-
 roles/network_plugin/kube-ovn/tasks/main.yml  |  4 +--
 .../kube-router/tasks/annotate.yml            | 10 +++---
 .../network_plugin/kube-router/tasks/main.yml |  2 +-
 roles/network_plugin/macvlan/tasks/main.yml   |  2 +-
 roles/network_plugin/ovn4nfv/tasks/main.yml   |  4 +--
 .../control-plane/tasks/main.yml              | 14 ++++----
 roles/remove-node/post-remove/tasks/main.yml  |  2 +-
 roles/remove-node/pre-remove/tasks/main.yml   |  4 +--
 roles/upgrade/post-upgrade/tasks/main.yml     |  2 +-
 roles/upgrade/pre-upgrade/tasks/main.yml      | 10 +++---
 scale.yml                                     | 15 ++++++--
 .../roles/packet-ci/templates/inventory.j2    | 23 +++++++++++++
 tests/scripts/testcases_run.sh                |  2 +-
 tests/templates/inventory-aws.j2              |  6 ++--
 tests/templates/inventory-do.j2               | 10 +++---
 tests/templates/inventory-gce.j2              | 12 +++----
 tests/testcases/010_check-apiserver.yml       |  2 +-
 tests/testcases/015_check-nodes-ready.yml     |  2 +-
 tests/testcases/020_check-pods-running.yml    |  2 +-
 tests/testcases/030_check-network.yml         |  2 +-
 tests/testcases/040_check-network-adv.yml     | 22 ++++++------
 tests/testcases/100_check-k8s-conformance.yml |  2 +-
 .../roles/cluster-dump/tasks/main.yml         |  6 ++--
 upgrade-cluster.yml                           | 23 +++++++++----
 159 files changed, 564 insertions(+), 485 deletions(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 8161944e7..412f63d3c 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -30,7 +30,7 @@ variables:
   MITOGEN_ENABLE: "false"
   ANSIBLE_LOG_LEVEL: "-vv"
   RECOVER_CONTROL_PLANE_TEST: "false"
-  RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
+  RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
 
 before_script:
   - ./tests/scripts/rebase.sh
diff --git a/.gitlab-ci/packet.yml b/.gitlab-ci/packet.yml
index e5e9e4c27..a34976360 100644
--- a/.gitlab-ci/packet.yml
+++ b/.gitlab-ci/packet.yml
@@ -223,7 +223,7 @@ packet_ubuntu18-calico-ha-recover:
   when: on_success
   variables:
     RECOVER_CONTROL_PLANE_TEST: "true"
-    RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
+    RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
 
 packet_ubuntu18-calico-ha-recover-noquorum:
   stage: deploy-part3
@@ -231,4 +231,4 @@ packet_ubuntu18-calico-ha-recover-noquorum:
   when: on_success
   variables:
     RECOVER_CONTROL_PLANE_TEST: "true"
-    RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"
+    RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
diff --git a/Vagrantfile b/Vagrantfile
index 1c43f280b..5ee9e4637 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -253,9 +253,9 @@ Vagrant.configure("2") do |config|
           #ansible.tags = ['download']
           ansible.groups = {
             "etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
-            "kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
+            "kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
             "kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
-            "k8s-cluster:children" => ["kube-master", "kube-node"],
+            "k8s-cluster:children" => ["kube_control_plane", "kube-node"],
           }
         end
       end
diff --git a/cluster.yml b/cluster.yml
index cf6942a6e..6a169e9b0 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -2,6 +2,15 @@
 - name: Check ansible version
   import_playbook: ansible_version.yml
 
+- name: Add kube-master nodes to kube_control_plane
+  # This is for old inventory which contains kube-master instead of kube_control_plane
+  hosts: kube-master
+  gather_facts: false
+  tasks:
+    - name: add nodes to kube_control_plane group
+      group_by:
+        key: 'kube_control_plane'
+
 - hosts: bastion[0]
   gather_facts: False
   environment: "{{ proxy_disable_env }}"
@@ -66,7 +75,7 @@
     - { role: kubespray-defaults }
     - { role: kubernetes/node, tags: node }
 
-- hosts: kube-master
+- hosts: kube_control_plane
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -94,7 +103,7 @@
     - { role: kubespray-defaults }
     - { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
 
-- hosts: kube-master[0]
+- hosts: kube_control_plane[0]
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -102,7 +111,7 @@
     - { role: kubespray-defaults }
     - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
 
-- hosts: kube-master
+- hosts: kube_control_plane
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -114,7 +123,7 @@
     - { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
     - { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
 
-- hosts: kube-master
+- hosts: kube_control_plane
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
diff --git a/contrib/aws_inventory/kubespray-aws-inventory.py b/contrib/aws_inventory/kubespray-aws-inventory.py
index 91a848b62..46ad6a063 100755
--- a/contrib/aws_inventory/kubespray-aws-inventory.py
+++ b/contrib/aws_inventory/kubespray-aws-inventory.py
@@ -35,7 +35,7 @@ class SearchEC2Tags(object):
     hosts['_meta'] = { 'hostvars': {} }
 
     ##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
-    for group in ["kube-master", "kube-node", "etcd"]:
+    for group in ["kube_control_plane", "kube-node", "etcd"]:
       hosts[group] = []
       tag_key = "kubespray-role"
       tag_value = ["*"+group+"*"]
@@ -70,7 +70,7 @@ class SearchEC2Tags(object):
         hosts[group].append(dns_name)
         hosts['_meta']['hostvars'][dns_name] = ansible_host
         
-    hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
+    hosts['k8s-cluster'] = {'children':['kube_control_plane', 'kube-node']}
     print(json.dumps(hosts, sort_keys=True, indent=2))
 
 SearchEC2Tags()
diff --git a/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory/templates/inventory.j2
index cd93a2bb6..8a13cc635 100644
--- a/contrib/azurerm/roles/generate-inventory/templates/inventory.j2
+++ b/contrib/azurerm/roles/generate-inventory/templates/inventory.j2
@@ -7,9 +7,9 @@
 {% endif %}
 {% endfor %}
 
-[kube-master]
+[kube_control_plane]
 {% for vm in vm_list %}
-{% if 'kube-master' in vm.tags.roles %}
+{% if 'kube_control_plane' in vm.tags.roles %}
 {{ vm.name }}
 {% endif %}
 {% endfor %}
@@ -30,4 +30,4 @@
 
 [k8s-cluster:children]
 kube-node
-kube-master
+kube_control_plane
diff --git a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2
index 21f7bbf1c..61183cd1d 100644
--- a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2
+++ b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2
@@ -7,9 +7,9 @@
 {% endif %}
 {% endfor %}
 
-[kube-master]
+[kube_control_plane]
 {% for vm in vm_roles_list %}
-{% if 'kube-master' in vm.tags.roles %}
+{% if 'kube_control_plane' in vm.tags.roles %}
 {{ vm.name }}
 {% endif %}
 {% endfor %}
@@ -30,5 +30,5 @@
 
 [k8s-cluster:children]
 kube-node
-kube-master
+kube_control_plane
 
diff --git a/contrib/azurerm/roles/generate-templates/templates/masters.json b/contrib/azurerm/roles/generate-templates/templates/masters.json
index 69a42cb68..b299383a6 100644
--- a/contrib/azurerm/roles/generate-templates/templates/masters.json
+++ b/contrib/azurerm/roles/generate-templates/templates/masters.json
@@ -144,7 +144,7 @@
         "[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
       ],
       "tags": {
-        "roles": "kube-master,etcd"
+        "roles": "kube_control_plane,etcd"
       },
       "apiVersion": "{{apiVersion}}",
       "properties": {
diff --git a/contrib/inventory_builder/inventory.py b/contrib/inventory_builder/inventory.py
index 66d618474..814085a73 100644
--- a/contrib/inventory_builder/inventory.py
+++ b/contrib/inventory_builder/inventory.py
@@ -44,7 +44,7 @@ import re
 import subprocess
 import sys
 
-ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
+ROLES = ['all', 'kube_control_plane', 'kube-node', 'etcd', 'k8s-cluster',
          'calico-rr']
 PROTECTED_NAMES = ROLES
 AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
@@ -299,21 +299,23 @@ class KubesprayInventory(object):
 
     def set_kube_control_plane(self, hosts):
         for host in hosts:
-            self.add_host_to_group('kube-master', host)
+            self.add_host_to_group('kube_control_plane', host)
 
     def set_all(self, hosts):
         for host, opts in hosts.items():
             self.add_host_to_group('all', host, opts)
 
     def set_k8s_cluster(self):
-        k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
+        k8s_cluster = {'children': {'kube_control_plane': None,
+                                    'kube-node': None}}
         self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
 
     def set_calico_rr(self, hosts):
         for host in hosts:
-            if host in self.yaml_config['all']['children']['kube-master']:
+            if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
                 self.debug("Not adding {0} to calico-rr group because it "
-                           "conflicts with kube-master group".format(host))
+                           "conflicts with kube_control_plane "
+                           "group".format(host))
                 continue
             if host in self.yaml_config['all']['children']['kube-node']:
                 self.debug("Not adding {0} to calico-rr group because it "
@@ -330,10 +332,10 @@ class KubesprayInventory(object):
                                "group.".format(host))
                     continue
             if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD:  # noqa
-                if host in self.yaml_config['all']['children']['kube-master']['hosts']:  # noqa
+                if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']:  # noqa
                     self.debug("Not adding {0} to kube-node group because of "
-                               "scale deployment and host is in kube-master "
-                               "group.".format(host))
+                               "scale deployment and host is in "
+                               "kube_control_plane group.".format(host))
                     continue
             self.add_host_to_group('kube-node', host)
 
diff --git a/contrib/inventory_builder/tests/test_inventory.py b/contrib/inventory_builder/tests/test_inventory.py
index afcbe7500..c76990240 100644
--- a/contrib/inventory_builder/tests/test_inventory.py
+++ b/contrib/inventory_builder/tests/test_inventory.py
@@ -223,7 +223,7 @@ class TestInventory(unittest.TestCase):
             None)
 
     def test_set_kube_control_plane(self):
-        group = 'kube-master'
+        group = 'kube_control_plane'
         host = 'node1'
 
         self.inv.set_kube_control_plane([host])
@@ -242,7 +242,7 @@ class TestInventory(unittest.TestCase):
 
     def test_set_k8s_cluster(self):
         group = 'k8s-cluster'
-        expected_hosts = ['kube-node', 'kube-master']
+        expected_hosts = ['kube-node', 'kube_control_plane']
 
         self.inv.set_k8s_cluster()
         for host in expected_hosts:
diff --git a/contrib/network-storage/glusterfs/glusterfs.yml b/contrib/network-storage/glusterfs/glusterfs.yml
index e5b6f1301..8146dfc06 100644
--- a/contrib/network-storage/glusterfs/glusterfs.yml
+++ b/contrib/network-storage/glusterfs/glusterfs.yml
@@ -19,6 +19,6 @@
   roles:
     - { role: glusterfs/client }
 
-- hosts: kube-master[0]
+- hosts: kube_control_plane[0]
   roles:
     - { role: kubernetes-pv }
diff --git a/contrib/network-storage/glusterfs/inventory.example b/contrib/network-storage/glusterfs/inventory.example
index 15fbad0a8..dc77b4b0a 100644
--- a/contrib/network-storage/glusterfs/inventory.example
+++ b/contrib/network-storage/glusterfs/inventory.example
@@ -14,7 +14,7 @@
 # gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc  ip=10.3.0.8 
 # gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc  ip=10.3.0.9 
 
-# [kube-master]
+# [kube_control_plane]
 # node1
 # node2
 
@@ -32,7 +32,7 @@
 
 # [k8s-cluster:children]
 # kube-node
-# kube-master
+# kube_control_plane
 
 # [gfs-cluster]
 # gfs_node1
diff --git a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
index baf8356b6..5ed8f6944 100644
--- a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
+++ b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
@@ -8,7 +8,7 @@
     - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
     - { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
   register: gluster_pv
-  when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
+  when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
 
 - name: Kubernetes Apps | Set GlusterFS endpoint and PV
   kube:
@@ -19,4 +19,4 @@
     filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
     state: "{{ item.changed | ternary('latest','present') }}"
   with_items: "{{ gluster_pv.results }}"
-  when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
+  when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
diff --git a/contrib/network-storage/heketi/heketi-tear-down.yml b/contrib/network-storage/heketi/heketi-tear-down.yml
index 92b9f92d6..9e2d1f45a 100644
--- a/contrib/network-storage/heketi/heketi-tear-down.yml
+++ b/contrib/network-storage/heketi/heketi-tear-down.yml
@@ -1,5 +1,5 @@
 ---
-- hosts: kube-master[0]
+- hosts: kube_control_plane[0]
   roles:
     - { role: tear-down }
 
diff --git a/contrib/network-storage/heketi/heketi.yml b/contrib/network-storage/heketi/heketi.yml
index 3ec719e95..2309267b1 100644
--- a/contrib/network-storage/heketi/heketi.yml
+++ b/contrib/network-storage/heketi/heketi.yml
@@ -3,7 +3,7 @@
   roles:
     - { role: prepare }
 
-- hosts: kube-master[0]
+- hosts: kube_control_plane[0]
   tags:
     - "provision"
   roles:
diff --git a/contrib/network-storage/heketi/inventory.yml.sample b/contrib/network-storage/heketi/inventory.yml.sample
index 7d488d1ba..46adbed44 100644
--- a/contrib/network-storage/heketi/inventory.yml.sample
+++ b/contrib/network-storage/heketi/inventory.yml.sample
@@ -7,7 +7,7 @@ all:
             vars:
                 kubelet_fail_swap_on: false
             children:
-                kube-master:
+                kube_control_plane:
                     hosts:
                         node1:
                 etcd:
diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md
index ea58de93e..993d2bb84 100644
--- a/contrib/terraform/aws/README.md
+++ b/contrib/terraform/aws/README.md
@@ -122,7 +122,7 @@ You can use the following set of commands to get the kubeconfig file from your n
 
 ```commandline
 # Get the controller's IP address.
-CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube-master\]" -A 1 | tail -n 1)
+CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1)
 CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
 
 # Get the hostname of the load balancer.
diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf
index 72aa27c79..1c7f036b7 100644
--- a/contrib/terraform/aws/create-infrastructure.tf
+++ b/contrib/terraform/aws/create-infrastructure.tf
@@ -84,7 +84,7 @@ resource "aws_instance" "k8s-master" {
 
   vpc_security_group_ids = module.aws-vpc.aws_security_group
 
-  iam_instance_profile = module.aws-iam.kube-master-profile
+  iam_instance_profile = module.aws-iam.kube_control_plane-profile
   key_name             = var.AWS_SSH_KEY_NAME
 
   tags = merge(var.default_tags, map(
diff --git a/contrib/terraform/aws/modules/iam/main.tf b/contrib/terraform/aws/modules/iam/main.tf
index 1dc3c3a6a..a35afc7e5 100644
--- a/contrib/terraform/aws/modules/iam/main.tf
+++ b/contrib/terraform/aws/modules/iam/main.tf
@@ -1,6 +1,6 @@
 #Add AWS Roles for Kubernetes
 
-resource "aws_iam_role" "kube-master" {
+resource "aws_iam_role" "kube_control_plane" {
   name = "kubernetes-${var.aws_cluster_name}-master"
 
   assume_role_policy = <<EOF
@@ -40,9 +40,9 @@ EOF
 
 #Add AWS Policies for Kubernetes
 
-resource "aws_iam_role_policy" "kube-master" {
+resource "aws_iam_role_policy" "kube_control_plane" {
   name = "kubernetes-${var.aws_cluster_name}-master"
-  role = aws_iam_role.kube-master.id
+  role = aws_iam_role.kube_control_plane.id
 
   policy = <<EOF
 {
@@ -130,9 +130,9 @@ EOF
 
 #Create AWS Instance Profiles
 
-resource "aws_iam_instance_profile" "kube-master" {
+resource "aws_iam_instance_profile" "kube_control_plane" {
   name = "kube_${var.aws_cluster_name}_master_profile"
-  role = aws_iam_role.kube-master.name
+  role = aws_iam_role.kube_control_plane.name
 }
 
 resource "aws_iam_instance_profile" "kube-worker" {
diff --git a/contrib/terraform/aws/modules/iam/outputs.tf b/contrib/terraform/aws/modules/iam/outputs.tf
index e8a276617..724e46cc8 100644
--- a/contrib/terraform/aws/modules/iam/outputs.tf
+++ b/contrib/terraform/aws/modules/iam/outputs.tf
@@ -1,5 +1,5 @@
-output "kube-master-profile" {
-  value = aws_iam_instance_profile.kube-master.name
+output "kube_control_plane-profile" {
+  value = aws_iam_instance_profile.kube_control_plane.name
 }
 
 output "kube-worker-profile" {
diff --git a/contrib/terraform/aws/templates/inventory.tpl b/contrib/terraform/aws/templates/inventory.tpl
index beb4f76a9..d8fe2f995 100644
--- a/contrib/terraform/aws/templates/inventory.tpl
+++ b/contrib/terraform/aws/templates/inventory.tpl
@@ -7,7 +7,7 @@ ${public_ip_address_bastion}
 [bastion]
 ${public_ip_address_bastion}
 
-[kube-master]
+[kube_control_plane]
 ${list_master}
 
 
@@ -21,7 +21,7 @@ ${list_etcd}
 
 [k8s-cluster:children]
 kube-node
-kube-master
+kube_control_plane
 
 
 [k8s-cluster:vars]
diff --git a/contrib/terraform/exoscale/templates/inventory.tpl b/contrib/terraform/exoscale/templates/inventory.tpl
index fd9a03484..27b9e60f3 100644
--- a/contrib/terraform/exoscale/templates/inventory.tpl
+++ b/contrib/terraform/exoscale/templates/inventory.tpl
@@ -2,10 +2,10 @@
 ${connection_strings_master}
 ${connection_strings_worker}
 
-[kube-master]
+[kube_control_plane]
 ${list_master}
 
-[kube-master:vars]
+[kube_control_plane:vars]
 supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
 
 [etcd]
@@ -15,5 +15,5 @@ ${list_master}
 ${list_worker}
 
 [k8s-cluster:children]
-kube-master
+kube_control_plane
 kube-node
diff --git a/contrib/terraform/gcp/generate-inventory.sh b/contrib/terraform/gcp/generate-inventory.sh
index 36cbcd776..d266b1899 100755
--- a/contrib/terraform/gcp/generate-inventory.sh
+++ b/contrib/terraform/gcp/generate-inventory.sh
@@ -50,13 +50,13 @@ for name in "${WORKER_NAMES[@]}"; do
 done
 
 echo ""
-echo "[kube-master]"
+echo "[kube_control_plane]"
 for name in "${MASTER_NAMES[@]}"; do
   echo "${name}"
 done
 
 echo ""
-echo "[kube-master:vars]"
+echo "[kube_control_plane:vars]"
 echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate
 echo ""
 echo "[etcd]"
@@ -72,5 +72,5 @@ done
 
 echo ""
 echo "[k8s-cluster:children]"
-echo "kube-master"
+echo "kube_control_plane"
 echo "kube-node"
diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf
index c8d9f4ff3..5084468f5 100644
--- a/contrib/terraform/openstack/modules/compute/main.tf
+++ b/contrib/terraform/openstack/modules/compute/main.tf
@@ -245,7 +245,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
 
   metadata = {
     ssh_user         = var.ssh_user
-    kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
+    kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
@@ -292,7 +292,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
 
   metadata = {
     ssh_user         = var.ssh_user
-    kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
+    kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
@@ -379,7 +379,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
 
   metadata = {
     ssh_user         = var.ssh_user
-    kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
+    kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
@@ -421,7 +421,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
 
   metadata = {
     ssh_user         = var.ssh_user
-    kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
+    kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
diff --git a/contrib/terraform/packet/kubespray.tf b/contrib/terraform/packet/kubespray.tf
index 568db0dd7..00cf21ff0 100644
--- a/contrib/terraform/packet/kubespray.tf
+++ b/contrib/terraform/packet/kubespray.tf
@@ -19,7 +19,7 @@ resource "packet_device" "k8s_master" {
   operating_system = var.operating_system
   billing_cycle    = var.billing_cycle
   project_id       = var.packet_project_id
-  tags             = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"]
+  tags             = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane", "etcd", "kube-node"]
 }
 
 resource "packet_device" "k8s_master_no_etcd" {
@@ -32,7 +32,7 @@ resource "packet_device" "k8s_master_no_etcd" {
   operating_system = var.operating_system
   billing_cycle    = var.billing_cycle
   project_id       = var.packet_project_id
-  tags             = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master"]
+  tags             = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane"]
 }
 
 resource "packet_device" "k8s_etcd" {
diff --git a/contrib/terraform/upcloud/templates/inventory.tpl b/contrib/terraform/upcloud/templates/inventory.tpl
index 26d65e67b..cb453e3ea 100644
--- a/contrib/terraform/upcloud/templates/inventory.tpl
+++ b/contrib/terraform/upcloud/templates/inventory.tpl
@@ -3,7 +3,7 @@
 ${connection_strings_master}
 ${connection_strings_worker}
 
-[kube-master]
+[kube_control_plane]
 ${list_master}
 
 [etcd]
@@ -13,5 +13,5 @@ ${list_master}
 ${list_worker}
 
 [k8s-cluster:children]
-kube-master
+kube_control_plane
 kube-node
diff --git a/contrib/terraform/vsphere/templates/inventory.tpl b/contrib/terraform/vsphere/templates/inventory.tpl
index 26d65e67b..cb453e3ea 100644
--- a/contrib/terraform/vsphere/templates/inventory.tpl
+++ b/contrib/terraform/vsphere/templates/inventory.tpl
@@ -3,7 +3,7 @@
 ${connection_strings_master}
 ${connection_strings_worker}
 
-[kube-master]
+[kube_control_plane]
 ${list_master}
 
 [etcd]
@@ -13,5 +13,5 @@ ${list_master}
 ${list_worker}
 
 [k8s-cluster:children]
-kube-master
+kube_control_plane
 kube-node
diff --git a/contrib/vault/roles/kubernetes/vault-secrets/tasks/gen_certs_vault.yml b/contrib/vault/roles/kubernetes/vault-secrets/tasks/gen_certs_vault.yml
index 8a847b002..75f155060 100644
--- a/contrib/vault/roles/kubernetes/vault-secrets/tasks/gen_certs_vault.yml
+++ b/contrib/vault/roles/kubernetes/vault-secrets/tasks/gen_certs_vault.yml
@@ -1,30 +1,30 @@
 ---
 - import_tasks: sync_kube_master_certs.yml
-  when: inventory_hostname in groups['kube-master']
+  when: inventory_hostname in groups['kube_control_plane']
 
 - import_tasks: sync_kube_node_certs.yml
   when: inventory_hostname in groups['k8s-cluster']
 
-# Issue admin certs to kube-master hosts
+# Issue admin certs to kube_control_plane hosts
 - include_tasks: ../../../vault/tasks/shared/issue_cert.yml
   vars:
     issue_cert_common_name: "admin"
     issue_cert_copy_ca: "{{ item == kube_admin_certs_needed|first }}"
     issue_cert_file_group: "{{ kube_cert_group }}"
     issue_cert_file_owner: kube
-    issue_cert_hosts: "{{ groups['kube-master'] }}"
+    issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
     issue_cert_path: "{{ item }}"
-    issue_cert_role: kube-master
+    issue_cert_role: kube_control_plane
     issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
     issue_cert_mount_path: "{{ kube_vault_mount_path }}"
   with_items: "{{ kube_admin_certs_needed|d([]) }}"
-  when: inventory_hostname in groups['kube-master']
+  when: inventory_hostname in groups['kube_control_plane']
 
 - name: gen_certs_vault | Set fact about certificate alt names
   set_fact:
     kube_cert_alt_names: >-
       {{
-      groups['kube-master'] +
+      groups['kube_control_plane'] +
       ['kubernetes.default.svc.'+cluster_name, 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] +
       ['localhost']
       }}
@@ -36,18 +36,18 @@
   when: loadbalancer_apiserver is defined
   run_once: true
 
-# Issue master components certs to kube-master hosts
+# Issue master components certs to kube_control_plane hosts
 - include_tasks: ../../../vault/tasks/shared/issue_cert.yml
   vars:
     issue_cert_common_name: "kubernetes"
     issue_cert_alt_names: "{{ kube_cert_alt_names }}"
     issue_cert_file_group: "{{ kube_cert_group }}"
     issue_cert_file_owner: kube
-    issue_cert_hosts: "{{ groups['kube-master'] }}"
+    issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
     issue_cert_run_once: true
     issue_cert_ip_sans: >-
         [
-        {%- for host in groups['kube-master']  -%}
+        {%- for host in groups['kube_control_plane']  -%}
         "{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
         {%- if hostvars[host]['ip'] is defined -%}
         "{{ hostvars[host]['ip'] }}",
@@ -61,11 +61,11 @@
         "127.0.0.1","::1","{{ kube_apiserver_ip }}"
         ]
     issue_cert_path: "{{ item }}"
-    issue_cert_role: kube-master
+    issue_cert_role: kube_control_plane
     issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
     issue_cert_mount_path: "{{ kube_vault_mount_path }}"
   with_items: "{{ kube_master_components_certs_needed|d([]) }}"
-  when: inventory_hostname in groups['kube-master']
+  when: inventory_hostname in groups['kube_control_plane']
   notify: set secret_changed
 
 # Issue node certs to k8s-cluster nodes
@@ -100,7 +100,7 @@
   with_items: "{{ kube_proxy_certs_needed|d([]) }}"
   when: inventory_hostname in groups['k8s-cluster']
 
-# Issue front proxy cert to kube-master hosts
+# Issue front proxy cert to kube_control_plane hosts
 - include_tasks: ../../../vault/tasks/shared/issue_cert.yml
   vars:
     issue_cert_common_name: "front-proxy-client"
@@ -109,10 +109,10 @@
     issue_cert_alt_names: "{{ kube_cert_alt_names }}"
     issue_cert_file_group: "{{ kube_cert_group }}"
     issue_cert_file_owner: kube
-    issue_cert_hosts: "{{ groups['kube-master'] }}"
+    issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
     issue_cert_ip_sans: >-
         [
-        {%- for host in groups['kube-master']  -%}
+        {%- for host in groups['kube_control_plane']  -%}
         "{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
         {%- if hostvars[host]['ip'] is defined -%}
         "{{ hostvars[host]['ip'] }}",
@@ -130,5 +130,5 @@
     issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
     issue_cert_mount_path: "{{ kube_vault_mount_path }}"
   with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}"
-  when: inventory_hostname in groups['kube-master']
+  when: inventory_hostname in groups['kube_control_plane']
   notify: set secret_changed
diff --git a/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_master_certs.yml b/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_master_certs.yml
index 50e1a01e7..6db7c9ddf 100644
--- a/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_master_certs.yml
+++ b/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_master_certs.yml
@@ -29,7 +29,7 @@
     sync_file: "{{ item }}"
     sync_file_dir: "{{ kube_cert_dir }}"
     sync_file_group: "{{ kube_cert_group }}"
-    sync_file_hosts: "{{ groups['kube-master'] }}"
+    sync_file_hosts: "{{ groups['kube_control_plane'] }}"
     sync_file_is_cert: true
     sync_file_owner: kube
   with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"]
@@ -49,7 +49,7 @@
     sync_file: front-proxy-ca.pem
     sync_file_dir: "{{ kube_cert_dir }}"
     sync_file_group: "{{ kube_cert_group }}"
-    sync_file_hosts: "{{ groups['kube-master'] }}"
+    sync_file_hosts: "{{ groups['kube_control_plane'] }}"
     sync_file_owner: kube
 
 - name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem
@@ -61,7 +61,7 @@
     sync_file: "{{ item }}"
     sync_file_dir: "{{ kube_cert_dir }}"
     sync_file_group: "{{ kube_cert_group }}"
-    sync_file_hosts: "{{ groups['kube-master'] }}"
+    sync_file_hosts: "{{ groups['kube_control_plane'] }}"
     sync_file_is_cert: true
     sync_file_owner: kube
   with_items: ["front-proxy-client.pem"]
@@ -81,7 +81,7 @@
     sync_file: ca.pem
     sync_file_dir: "{{ kube_cert_dir }}"
     sync_file_group: "{{ kube_cert_group }}"
-    sync_file_hosts: "{{ groups['kube-master'] }}"
+    sync_file_hosts: "{{ groups['kube_control_plane'] }}"
     sync_file_owner: kube
 
 - name: sync_kube_master_certs | Unset sync_file_results after ca.pem
diff --git a/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_node_certs.yml b/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_node_certs.yml
index eecb4cfdf..059359a58 100644
--- a/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_node_certs.yml
+++ b/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_node_certs.yml
@@ -14,7 +14,7 @@
     sync_file_owner: kube
   with_items: "{{ kube_node_cert_list|default([]) }}"
 
-- name: sync_kube_node_certs | Set facts for kube-master sync_file results
+- name: sync_kube_node_certs | Set facts for kube_control_plane sync_file results
   set_fact:
     kube_node_certs_needed: "{{ kube_node_certs_needed|default([]) + [item.path] }}"
   with_items: "{{ sync_file_results|d([]) }}"
diff --git a/contrib/vault/roles/vault/defaults/main.yml b/contrib/vault/roles/vault/defaults/main.yml
index 0b27e03ff..ececdc2ad 100644
--- a/contrib/vault/roles/vault/defaults/main.yml
+++ b/contrib/vault/roles/vault/defaults/main.yml
@@ -166,16 +166,16 @@ vault_pki_mounts:
     description: "Kubernetes Root CA"
     cert_dir: "{{ kube_cert_dir }}"
     roles:
-      - name: kube-master
-        group: kube-master
-        password: "{{ lookup('password', credentials_dir + '/vault/kube-master.creds length=15') }}"
+      - name: kube_control_plane
+        group: kube_control_plane
+        password: "{{ lookup('password', credentials_dir + '/vault/kube_control_plane.creds length=15') }}"
         policy_rules: default
         role_options:
           allow_any_name: true
           enforce_hostnames: false
           organization: "system:masters"
       - name: front-proxy-client
-        group: kube-master
+        group: kube_control_plane
         password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
         policy_rules: default
         role_options:
diff --git a/contrib/vault/roles/vault/tasks/bootstrap/main.yml b/contrib/vault/roles/vault/tasks/bootstrap/main.yml
index e4e67d11f..419e22e1b 100644
--- a/contrib/vault/roles/vault/tasks/bootstrap/main.yml
+++ b/contrib/vault/roles/vault/tasks/bootstrap/main.yml
@@ -51,7 +51,7 @@
     gen_ca_mount_path: "/{{ vault_pki_mounts.vault.name }}"
     gen_ca_vault_headers: "{{ vault_headers }}"
     gen_ca_vault_options: "{{ vault_ca_options.vault }}"
-    gen_ca_copy_group: "kube-master"
+    gen_ca_copy_group: "kube_control_plane"
   when: >-
         inventory_hostname in groups.vault
         and not vault_cluster_is_initialized
diff --git a/contrib/vault/roles/vault/tasks/bootstrap/sync_vault_certs.yml b/contrib/vault/roles/vault/tasks/bootstrap/sync_vault_certs.yml
index cf499099a..28c438ca7 100644
--- a/contrib/vault/roles/vault/tasks/bootstrap/sync_vault_certs.yml
+++ b/contrib/vault/roles/vault/tasks/bootstrap/sync_vault_certs.yml
@@ -21,7 +21,7 @@
   vars:
     sync_file: "ca.pem"
     sync_file_dir: "{{ vault_cert_dir }}"
-    sync_file_hosts: "{{ groups['kube-master'] }}"
+    sync_file_hosts: "{{ groups['kube_control_plane'] }}"
     sync_file_owner: vault
     sync_file_group: root
     sync_file_is_cert: false
diff --git a/contrib/vault/roles/vault/tasks/cluster/main.yml b/contrib/vault/roles/vault/tasks/cluster/main.yml
index 3ed23b2cc..74b399c25 100644
--- a/contrib/vault/roles/vault/tasks/cluster/main.yml
+++ b/contrib/vault/roles/vault/tasks/cluster/main.yml
@@ -35,7 +35,7 @@
     gen_ca_mount_path: "/{{ vault_pki_mounts.kube.name }}"
     gen_ca_vault_headers: "{{ vault_headers }}"
     gen_ca_vault_options: "{{ vault_ca_options.kube }}"
-    gen_ca_copy_group: "kube-master"
+    gen_ca_copy_group: "kube_control_plane"
   when: inventory_hostname in groups.vault
 
 - include_tasks: ../shared/auth_backend.yml
diff --git a/docs/ansible.md b/docs/ansible.md
index 848862ff5..202a79333 100644
--- a/docs/ansible.md
+++ b/docs/ansible.md
@@ -5,7 +5,7 @@
 The inventory is composed of 3 groups:
 
 * **kube-node** : list of kubernetes nodes where the pods will run.
-* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run.
+* **kube_control_plane** : list of servers where kubernetes control plane components (apiserver, scheduler, controller) will run.
 * **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose.
 
 Note: do not modify the children of _k8s-cluster_, like putting
@@ -18,9 +18,9 @@ k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd
 
 When _kube-node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads.
 If you want it a standalone, make sure those groups do not intersect.
-If you want the server to act both as master and node, the server must be defined
-on both groups _kube-master_ and _kube-node_. If you want a standalone and
-unschedulable master, the server must be defined only in the _kube-master_ and
+If you want the server to act both as control-plane and node, the server must be defined
+on both groups _kube_control_plane_ and _kube-node_. If you want a standalone and
+unschedulable master, the server must be defined only in the _kube_control_plane_ and
 not _kube-node_.
 
 There are also two special groups:
@@ -40,7 +40,7 @@ node4 ansible_host=95.54.0.15 ip=10.3.0.4
 node5 ansible_host=95.54.0.16 ip=10.3.0.5
 node6 ansible_host=95.54.0.17 ip=10.3.0.6
 
-[kube-master]
+[kube_control_plane]
 node1
 node2
 
@@ -58,7 +58,7 @@ node6
 
 [k8s-cluster:children]
 kube-node
-kube-master
+kube_control_plane
 ```
 
 ## Group vars and overriding variables precedence
diff --git a/docs/aws.md b/docs/aws.md
index c76d1cfdf..0e680e0d1 100644
--- a/docs/aws.md
+++ b/docs/aws.md
@@ -35,11 +35,11 @@ This will produce an inventory that is passed into Ansible that looks like the f
   ],
   "k8s-cluster": {
     "children": [
-      "kube-master",
+      "kube_control_plane",
       "kube-node"
     ]
   },
-  "kube-master": [
+  "kube_control_plane": [
     "ip-172-31-3-xxx.us-east-2.compute.internal"
   ],
   "kube-node": [
@@ -51,7 +51,7 @@ This will produce an inventory that is passed into Ansible that looks like the f
 Guide:
 
 - Create instances in AWS as needed.
-- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
+- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube-node`. You can also share roles like `kube_control_plane, etcd`
 - Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
 - Set the following AWS credentials and info as environment variables in your terminal:
 
diff --git a/docs/calico.md b/docs/calico.md
index 7e5f86568..45d1b0e90 100644
--- a/docs/calico.md
+++ b/docs/calico.md
@@ -122,7 +122,7 @@ recommended here:
 You need to edit your inventory and add:
 
 * `calico-rr` group with nodes in it. `calico-rr` can be combined with
-  `kube-node` and/or `kube-master`. `calico-rr` group also must be a child
+  `kube-node` and/or `kube_control_plane`. `calico-rr` group also must be a child
    group of `k8s-cluster` group.
 * `cluster_id` by route reflector node/group (see details
 [here](https://hub.docker.com/r/calico/routereflector/))
@@ -138,7 +138,7 @@ node3 ansible_ssh_host=10.210.1.13 ip=10.210.1.13
 node4 ansible_ssh_host=10.210.1.14 ip=10.210.1.14
 node5 ansible_ssh_host=10.210.1.15 ip=10.210.1.15
 
-[kube-master]
+[kube_control_plane]
 node2
 node3
 
@@ -155,7 +155,7 @@ node5
 
 [k8s-cluster:children]
 kube-node
-kube-master
+kube_control_plane
 calico-rr
 
 [calico-rr]
diff --git a/docs/downloads.md b/docs/downloads.md
index 781543add..4369120d4 100644
--- a/docs/downloads.md
+++ b/docs/downloads.md
@@ -8,7 +8,7 @@ Kubespray supports several download/upload modes. The default is:
 
 There is also a "pull once, push many" mode as well:
 
-* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube-master`.
+* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube_control_plane`.
 * Set ``download_localhost: True`` to make localhost the download delegate. This can be useful if cluster nodes cannot access external addresses. To use this requires that docker is installed and running on the ansible master and that the current user is either in the docker group or can do passwordless sudo, to be able to access docker.
 
 NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the docker instance on that node, instead of just the images required for that node.
diff --git a/docs/getting-started.md b/docs/getting-started.md
index 6b3cd1882..18a50e017 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -76,16 +76,16 @@ var in inventory.
 
 ## Connecting to Kubernetes
 
-By default, Kubespray configures kube-master hosts with insecure access to
+By default, Kubespray configures kube_control_plane hosts with insecure access to
 kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
 because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
-generated will point to localhost (on kube-masters) and kube-node hosts will
+generated will point to localhost (on kube_control_planes) and kube-node hosts will
 connect either to a localhost nginx proxy or to a loadbalancer if configured.
 More details on this process are in the [HA guide](/docs/ha-mode.md).
 
 Kubespray permits connecting to the cluster remotely on any IP of any
-kube-master host on port 6443 by default. However, this requires
-authentication. One can get a kubeconfig from kube-master hosts
+kube_control_plane host on port 6443 by default. However, this requires
+authentication. One can get a kubeconfig from kube_control_plane hosts
 (see [below](#accessing-kubernetes-api)) or connect with a [username and password](/docs/vars.md#user-accounts).
 
 For more information on kubeconfig and accessing a Kubernetes cluster, refer to
@@ -119,7 +119,7 @@ kubectl proxy
 
 ## Accessing Kubernetes API
 
-The main client of Kubernetes is `kubectl`. It is installed on each kube-master
+The main client of Kubernetes is `kubectl`. It is installed on each kube_control_plane
 host and can optionally be configured on your ansible host by setting
 `kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:
 
diff --git a/docs/ha-mode.md b/docs/ha-mode.md
index ddf330715..668558f17 100644
--- a/docs/ha-mode.md
+++ b/docs/ha-mode.md
@@ -32,7 +32,7 @@ If you choose to NOT use the local internal loadbalancer, you will need to
 configure your own loadbalancer to achieve HA. Note that deploying a
 loadbalancer is up to a user and is not covered by ansible roles in Kubespray.
 By default, it only configures a non-HA endpoint, which points to the
-`access_ip` or IP address of the first server node in the `kube-master` group.
+`access_ip` or IP address of the first server node in the `kube_control_plane` group.
 It can also configure clients to use endpoints for a given loadbalancer type.
 The following diagram shows how traffic to the apiserver is directed.
 
@@ -102,16 +102,16 @@ exclusive to `loadbalancer_apiserver_localhost`.
 
 Access API endpoints are evaluated automatically, as the following:
 
-| Endpoint type                | kube-master      | non-master              | external              |
-|------------------------------|------------------|-------------------------|-----------------------|
-| Local LB (default)           | `https://bip:sp` | `https://lc:nsp`        | `https://m[0].aip:sp` |
-| Local LB + Unmanaged here LB | `https://bip:sp` | `https://lc:nsp`        | `https://ext`         |
-| External LB, no internal     | `https://bip:sp` | `<https://lb:lp>`       | `https://lb:lp`       |
-| No ext/int LB                | `https://bip:sp` | `<https://m[0].aip:sp>` | `https://m[0].aip:sp` |
+| Endpoint type                | kube_control_plane | non-master              | external              |
+|------------------------------|--------------------|-------------------------|-----------------------|
+| Local LB (default)           | `https://bip:sp`   | `https://lc:nsp`        | `https://m[0].aip:sp` |
+| Local LB + Unmanaged here LB | `https://bip:sp`   | `https://lc:nsp`        | `https://ext`         |
+| External LB, no internal     | `https://bip:sp`   | `<https://lb:lp>`       | `https://lb:lp`       |
+| No ext/int LB                | `https://bip:sp`   | `<https://m[0].aip:sp>` | `https://m[0].aip:sp` |
 
 Where:
 
-* `m[0]` - the first node in the `kube-master` group;
+* `m[0]` - the first node in the `kube_control_plane` group;
 * `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
 * `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
 * `lc` - localhost;
diff --git a/docs/integration.md b/docs/integration.md
index 4eab2535b..09c044fa8 100644
--- a/docs/integration.md
+++ b/docs/integration.md
@@ -62,7 +62,7 @@ You could rename *all.yml* config to something else, i.e. *kubespray.yml* and cr
      kubemaster
      kubemaster-ha
 
-     [kube-master:children]
+     [kube_control_plane:children]
      kubemaster
      kubemaster-ha
 
diff --git a/docs/large-deployments.md b/docs/large-deployments.md
index 8b8ebef4e..130bcf0e7 100644
--- a/docs/large-deployments.md
+++ b/docs/large-deployments.md
@@ -39,7 +39,7 @@ For a large scaled deployments, consider the following configuration changes:
 
 * Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
   from host/network interruption much quicker with calico-rr. Note that
-  calico-rr role must be on a host without kube-master or kube-node role (but
+  calico-rr role must be on a host without kube_control_plane or kube-node role (but
   etcd role is okay).
 
 * Check out the
diff --git a/docs/nodes.md b/docs/nodes.md
index 60844794d..f369a5f3d 100644
--- a/docs/nodes.md
+++ b/docs/nodes.md
@@ -2,9 +2,9 @@
 
 Modified from [comments in #3471](https://github.com/kubernetes-sigs/kubespray/issues/3471#issuecomment-530036084)
 
-## Limitation: Removal of first kube-master and etcd-master
+## Limitation: Removal of first kube_control_plane and etcd-master
 
-Currently you can't remove the first node in your kube-master and etcd-master list. If you still want to remove this node you have to:
+Currently you can't remove the first node in your kube_control_plane and etcd-master list. If you still want to remove this node you have to:
 
 ### 1) Change order of current masters
 
@@ -12,7 +12,7 @@ Modify the order of your master list by pushing your first entry to any other po
 
 ```yaml
   children:
-    kube-master:
+    kube_control_plane:
       hosts:
         node-1:
         node-2:
@@ -33,7 +33,7 @@ change your inventory to:
 
 ```yaml
   children:
-    kube-master:
+    kube_control_plane:
       hosts:
         node-2:
         node-3:
@@ -103,10 +103,10 @@ You need to make sure there are always an odd number of etcd nodes in the cluste
 
 ### 1) Add the new node running cluster.yml
 
-Update the inventory and run `cluster.yml` passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`.
+Update the inventory and run `cluster.yml` passing `--limit=etcd,kube_control_plane -e ignore_assert_errors=yes`.
 If the node you want to add as an etcd node is already a worker or master node in your cluster, you have to remove him first using `remove-node.yml`.
 
-Run `upgrade-cluster.yml` also passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`. This is necessary to update all etcd configuration in the cluster.  
+Run `upgrade-cluster.yml` also passing `--limit=etcd,kube_control_plane -e ignore_assert_errors=yes`. This is necessary to update all etcd configuration in the cluster.  
 
 At this point, you will have an even number of nodes.
 Everything should still be working, and you should only have problems if the cluster decides to elect a new etcd leader before you remove a node.
diff --git a/docs/recover-control-plane.md b/docs/recover-control-plane.md
index d24a4c73a..b454310f0 100644
--- a/docs/recover-control-plane.md
+++ b/docs/recover-control-plane.md
@@ -5,8 +5,8 @@ To recover from broken nodes in the control plane use the "recover\-control\-pla
 
 * Backup what you can
 * Provision new nodes to replace the broken ones
-* Place the surviving nodes of the control plane first in the "etcd" and "kube-master" groups
-* Add the new nodes below the surviving control plane nodes in the "etcd" and "kube-master" groups
+* Place the surviving nodes of the control plane first in the "etcd" and "kube\_control\_plane" groups
+* Add the new nodes below the surviving control plane nodes in the "etcd" and "kube\_control\_plane" groups
 
 Examples of what broken means in this context:
 
@@ -20,9 +20,9 @@ __Note that you need at least one functional node to be able to recover using th
 ## Runbook
 
 * Move any broken etcd nodes into the "broken\_etcd" group, make sure the "etcd\_member\_name" variable is set.
-* Move any broken master nodes into the "broken\_kube-master" group.
+* Move any broken master nodes into the "broken\_kube\_control\_plane" group.
 
-Then run the playbook with ```--limit etcd,kube-master``` and increase the number of ETCD retries by setting ```-e etcd_retries=10``` or something even larger. The amount of retries required is difficult to predict.
+Then run the playbook with ```--limit etcd,kube_control_plane``` and increase the number of ETCD retries by setting ```-e etcd_retries=10``` or something even larger. The amount of retries required is difficult to predict.
 
 When finished you should have a fully working control plane again.
 
diff --git a/docs/test_cases.md b/docs/test_cases.md
index 3b572d8b8..2a8f5e920 100644
--- a/docs/test_cases.md
+++ b/docs/test_cases.md
@@ -3,10 +3,10 @@
 There are four node layout types: `default`, `separate`, `ha`, and `scale`.
 
 `default` is a non-HA two nodes setup with one separate `kube-node`
-and the `etcd` group merged with the `kube-master`.
+and the `etcd` group merged with the `kube_control_plane`.
 
 `separate` layout is when there is only node of each type, which includes
- a kube-master, kube-node, and etcd cluster member.
+ a kube_control_plane, kube-node, and etcd cluster member.
 
 `ha` layout consists of two etcd nodes, two masters and a single worker node,
 with role intersection.
diff --git a/docs/upgrades.md b/docs/upgrades.md
index b325b619f..0a3d6c779 100644
--- a/docs/upgrades.md
+++ b/docs/upgrades.md
@@ -41,7 +41,7 @@ The var ```-e upgrade_cluster_setup=true``` is needed to be set in order to migr
 Kubespray also supports cordon, drain and uncordoning of nodes when performing
 a cluster upgrade. There is a separate playbook used for this purpose. It is
 important to note that upgrade-cluster.yml can only be used for upgrading an
-existing cluster. That means there must be at least 1 kube-master already
+existing cluster. That means there must be at least 1 kube_control_plane already
 deployed.
 
 ```ShellSession
diff --git a/docs/vars.md b/docs/vars.md
index a97eee035..310f3f29f 100644
--- a/docs/vars.md
+++ b/docs/vars.md
@@ -36,7 +36,7 @@ Some variables of note include:
 * *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip
   and access_ip are undefined
 * *loadbalancer_apiserver* - If defined, all hosts will connect to this
-  address instead of localhost for kube-masters and kube-master[0] for
+  address instead of localhost for kube_control_planes and kube_control_plane[0] for
   kube-nodes. See more details in the
   [HA guide](/docs/ha-mode.md).
 * *loadbalancer_apiserver_localhost* - makes all hosts to connect to
diff --git a/extra_playbooks/migrate_openstack_provider.yml b/extra_playbooks/migrate_openstack_provider.yml
index 0e1584470..9d4adbaa9 100644
--- a/extra_playbooks/migrate_openstack_provider.yml
+++ b/extra_playbooks/migrate_openstack_provider.yml
@@ -1,5 +1,5 @@
 ---
-- hosts: kube-node:kube-master
+- hosts: kube-node:kube_control_plane
   tasks:
     - name: Remove old cloud provider config
       file:
@@ -7,7 +7,7 @@
         state: absent
       with_items:
         - /etc/kubernetes/cloud_config
-- hosts: kube-master[0]
+- hosts: kube_control_plane[0]
   tasks:
     - name: Include kubespray-default variables
       include_vars: ../roles/kubespray-defaults/defaults/main.yaml
diff --git a/extra_playbooks/upgrade-only-k8s.yml b/extra_playbooks/upgrade-only-k8s.yml
index 89e8ed1dc..5bdbd012d 100644
--- a/extra_playbooks/upgrade-only-k8s.yml
+++ b/extra_playbooks/upgrade-only-k8s.yml
@@ -34,7 +34,7 @@
     - { role: kubernetes/preinstall, tags: preinstall }
 
 - name: Handle upgrades to master components first to maintain backwards compat.
-  hosts: kube-master
+  hosts: kube_control_plane
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: 1
   roles:
@@ -47,7 +47,7 @@
     - { role: upgrade/post-upgrade, tags: post-upgrade }
 
 - name: Finally handle worker upgrades, based on given batch size
-  hosts: kube-node:!kube-master
+  hosts: kube-node:!kube_control_plane
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles:
diff --git a/inventory/local/hosts.ini b/inventory/local/hosts.ini
index 7834d27c0..551941080 100644
--- a/inventory/local/hosts.ini
+++ b/inventory/local/hosts.ini
@@ -1,6 +1,6 @@
 node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases
 
-[kube-master]
+[kube_control_plane]
 node1
 
 [etcd]
@@ -11,5 +11,5 @@ node1
 
 [k8s-cluster:children]
 kube-node
-kube-master
+kube_control_plane
 calico-rr
diff --git a/inventory/sample/inventory.ini b/inventory/sample/inventory.ini
index 6babb4e9e..b450bc068 100644
--- a/inventory/sample/inventory.ini
+++ b/inventory/sample/inventory.ini
@@ -13,7 +13,7 @@
 # [bastion]
 # bastion ansible_host=x.x.x.x ansible_user=some_user
 
-[kube-master]
+[kube_control_plane]
 # node1
 # node2
 # node3
@@ -33,6 +33,6 @@
 [calico-rr]
 
 [k8s-cluster:children]
-kube-master
+kube_control_plane
 kube-node
 calico-rr
diff --git a/recover-control-plane.yml b/recover-control-plane.yml
index 26be30769..c2b28d093 100644
--- a/recover-control-plane.yml
+++ b/recover-control-plane.yml
@@ -2,6 +2,15 @@
 - name: Check ansible version
   import_playbook: ansible_version.yml
 
+- name: Add kube-master nodes to kube_control_plane
+  # This is for old inventory which contains kube-master instead of kube_control_plane
+  hosts: kube-master
+  gather_facts: false
+  tasks:
+    - name: add nodes to kube_control_plane group
+      group_by:
+        key: 'kube_control_plane'
+
 - hosts: bastion[0]
   gather_facts: False
   environment: "{{ proxy_disable_env }}"
@@ -15,7 +24,7 @@
     - { role: kubespray-defaults}
     - { role: recover_control_plane/etcd }
 
-- hosts: "{{ groups['kube-master'] | first }}"
+- hosts: "{{ groups['kube_control_plane'] | first }}"
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults}
@@ -23,7 +32,7 @@
 
 - include: cluster.yml
 
-- hosts: "{{ groups['kube-master'] }}"
+- hosts: "{{ groups['kube_control_plane'] }}"
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults}
diff --git a/remove-node.yml b/remove-node.yml
index b78b71907..27c886035 100644
--- a/remove-node.yml
+++ b/remove-node.yml
@@ -2,6 +2,15 @@
 - name: Check ansible version
   import_playbook: ansible_version.yml
 
+- name: Add kube-master nodes to kube_control_plane
+  # This is for old inventory which contains kube-master instead of kube_control_plane
+  hosts: kube-master
+  gather_facts: false
+  tasks:
+    - name: add nodes to kube_control_plane group
+      group_by:
+        key: 'kube_control_plane'
+
 - hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}"
   gather_facts: no
   environment: "{{ proxy_disable_env }}"
@@ -17,7 +26,7 @@
         msg: "Delete nodes confirmation failed"
       when: delete_nodes_confirmation != "yes"
 
-- hosts: kube-master[0]
+- hosts: kube_control_plane[0]
   gather_facts: no
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -35,7 +44,7 @@
     - { role: reset, tags: reset, when: reset_nodes|default(True)|bool }
 
 # Currently cannot remove first master or etcd
-- hosts: "{{ node | default('kube-master[1:]:etcd[1:]') }}"
+- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
   gather_facts: no
   environment: "{{ proxy_disable_env }}"
   roles:
diff --git a/reset.yml b/reset.yml
index e053e101c..81f2389d4 100644
--- a/reset.yml
+++ b/reset.yml
@@ -2,6 +2,15 @@
 - name: Check ansible version
   import_playbook: ansible_version.yml
 
+- name: Add kube-master nodes to kube_control_plane
+  # This is for old inventory which contains kube-master instead of kube_control_plane
+  hosts: kube-master
+  gather_facts: false
+  tasks:
+    - name: add nodes to kube_control_plane group
+      group_by:
+        key: 'kube_control_plane'
+
 - hosts: bastion[0]
   gather_facts: False
   environment: "{{ proxy_disable_env }}"
diff --git a/roles/container-engine/containerd/molecule/default/molecule.yml b/roles/container-engine/containerd/molecule/default/molecule.yml
index b49d73ce0..48f7b5dd0 100644
--- a/roles/container-engine/containerd/molecule/default/molecule.yml
+++ b/roles/container-engine/containerd/molecule/default/molecule.yml
@@ -12,7 +12,7 @@ platforms:
     cpus: 2
     memory: 1024
     groups:
-      - kube-master
+      - kube_control_plane
 provisioner:
   name: ansible
   env:
diff --git a/roles/container-engine/cri-o/molecule/default/molecule.yml b/roles/container-engine/cri-o/molecule/default/molecule.yml
index a6c36acba..574d49139 100644
--- a/roles/container-engine/cri-o/molecule/default/molecule.yml
+++ b/roles/container-engine/cri-o/molecule/default/molecule.yml
@@ -12,25 +12,25 @@ platforms:
     cpus: 2
     memory: 1024
     groups:
-      - kube-master
+      - kube_control_plane
   - name: centos7
     box: centos/7
     cpus: 2
     memory: 1024
     groups:
-      - kube-master
+      - kube_control_plane
   - name: centos8
     box: centos/8
     cpus: 2
     memory: 1024
     groups:
-      - kube-master
+      - kube_control_plane
   - name: fedora
     box: fedora/33-cloud-base
     cpus: 2
     memory: 1024
     groups:
-      - kube-master
+      - kube_control_plane
 provisioner:
   name: ansible
   env:
diff --git a/roles/container-engine/kata-containers/molecule/default/molecule.yml b/roles/container-engine/kata-containers/molecule/default/molecule.yml
index 8cccf8dfc..164a47083 100644
--- a/roles/container-engine/kata-containers/molecule/default/molecule.yml
+++ b/roles/container-engine/kata-containers/molecule/default/molecule.yml
@@ -15,14 +15,14 @@ platforms:
     memory: 1024
     nested: true
     groups:
-      - kube-master
+      - kube_control_plane
   - name: ubuntu20
     box: generic/ubuntu2004
     cpus: 1
     memory: 1024
     nested: true
     groups:
-      - kube-master
+      - kube_control_plane
 provisioner:
   name: ansible
   env:
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index e35711459..67450f446 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -30,7 +30,7 @@ download_container: true
 # if this is set to true, uses the localhost for download_run_once mode
 # (requires docker and sudo to access docker). You may want this option for
 # local caching of docker images or for Flatcar Container Linux by Kinvolk cluster nodes.
-# Otherwise, uses the first node in the kube-master group to store images
+# Otherwise, uses the first node in the kube_control_plane group to store images
 # in the download_run_once mode.
 download_localhost: false
 
@@ -42,8 +42,8 @@ download_always_pull: false
 # SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
 download_validate_certs: true
 
-# Use the first kube-master if download_localhost is not set
-download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube-master'][0] }}{% endif %}"
+# Use the first kube_control_plane if download_localhost is not set
+download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube_control_plane'][0] }}{% endif %}"
 
 # Arch of Docker images and needed packages
 image_arch: "{{host_architecture | default('amd64')}}"
@@ -733,7 +733,7 @@ downloads:
     owner: "root"
     mode: "0755"
     groups:
-    - kube-master
+    - kube_control_plane
 
   crictl:
     file: true
@@ -883,7 +883,7 @@ downloads:
     owner: "root"
     mode: "0755"
     groups:
-    - kube-master
+    - kube_control_plane
 
   weave_kube:
     enabled: "{{ kube_network_plugin == 'weave' }}"
@@ -973,7 +973,7 @@ downloads:
     tag: "{{ coredns_image_tag }}"
     sha256: "{{ coredns_digest_checksum|default(None) }}"
     groups:
-    - kube-master
+    - kube_control_plane
 
   nodelocaldns:
     enabled: "{{ enable_nodelocaldns }}"
@@ -991,7 +991,7 @@ downloads:
     tag: "{{ dnsautoscaler_image_tag }}"
     sha256: "{{ dnsautoscaler_digest_checksum|default(None) }}"
     groups:
-    - kube-master
+    - kube_control_plane
 
   testbox:
     enabled: false
@@ -1011,7 +1011,7 @@ downloads:
     owner: "root"
     mode: "0755"
     groups:
-    - kube-master
+    - kube_control_plane
 
   registry:
     enabled: "{{ registry_enabled }}"
@@ -1038,7 +1038,7 @@ downloads:
     tag: "{{ metrics_server_image_tag }}"
     sha256: "{{ metrics_server_digest_checksum|default(None) }}"
     groups:
-    - kube-master
+    - kube_control_plane
 
   addon_resizer:
     # Currently addon_resizer is only used by metrics server
@@ -1048,7 +1048,7 @@ downloads:
     tag: "{{ addon_resizer_image_tag }}"
     sha256: "{{ addon_resizer_digest_checksum|default(None) }}"
     groups:
-    - kube-master
+    - kube_control_plane
 
   local_volume_provisioner:
     enabled: "{{ local_volume_provisioner_enabled }}"
@@ -1219,7 +1219,7 @@ downloads:
     tag: "{{ dashboard_image_tag }}"
     sha256: "{{ dashboard_digest_checksum|default(None) }}"
     groups:
-    - kube-master
+    - kube_control_plane
 
   dashboard_metrics_scrapper:
     enabled: "{{ dashboard_enabled }}"
@@ -1228,7 +1228,7 @@ downloads:
     tag: "{{ dashboard_metrics_scraper_tag }}"
     sha256: "{{ dashboard_digest_checksum|default(None) }}"
     groups:
-    - kube-master
+    - kube_control_plane
 
 download_defaults:
   container: false
diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml
index 2fa45929f..394228354 100644
--- a/roles/download/tasks/main.yml
+++ b/roles/download/tasks/main.yml
@@ -18,7 +18,7 @@
   include_tasks: prep_kubeadm_images.yml
   when:
     - not skip_downloads|default(false)
-    - inventory_hostname in groups['kube-master']
+    - inventory_hostname in groups['kube_control_plane']
   tags:
     - download
     - upload
diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
index 8d3020875..2f774cfcd 100644
--- a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
+++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
@@ -6,7 +6,7 @@
   ignore_errors: true
   when:
     - dns_mode in ['coredns', 'coredns_dual']
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Apps | Delete kubeadm CoreDNS
   kube:
@@ -17,7 +17,7 @@
     state: absent
   when:
     - dns_mode in ['coredns', 'coredns_dual']
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - createdby_annotation.stdout != 'kubespray'
 
 - name: Kubernetes Apps | Delete kubeadm Kube-DNS service
@@ -29,4 +29,4 @@
     state: absent
   when:
     - dns_mode in ['coredns', 'coredns_dual']
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/ansible/tasks/coredns.yml b/roles/kubernetes-apps/ansible/tasks/coredns.yml
index bb959966b..0bbb269a0 100644
--- a/roles/kubernetes-apps/ansible/tasks/coredns.yml
+++ b/roles/kubernetes-apps/ansible/tasks/coredns.yml
@@ -20,7 +20,7 @@
     clusterIP: "{{ skydns_server }}"
   when:
     - dns_mode in ['coredns', 'coredns_dual']
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - coredns
 
@@ -38,6 +38,6 @@
     coredns_ordinal_suffix: "-secondary"
   when:
     - dns_mode == 'coredns_dual'
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - coredns
diff --git a/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/roles/kubernetes-apps/ansible/tasks/dashboard.yml
index ba6c13b2b..94c041d14 100644
--- a/roles/kubernetes-apps/ansible/tasks/dashboard.yml
+++ b/roles/kubernetes-apps/ansible/tasks/dashboard.yml
@@ -6,7 +6,7 @@
   with_items:
     - { file: dashboard.yml, type: deploy, name: kubernetes-dashboard }
   register: manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Apps | Start dashboard
   kube:
@@ -17,4 +17,4 @@
     filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml
index 25ffa7270..75ee477b0 100644
--- a/roles/kubernetes-apps/ansible/tasks/main.yml
+++ b/roles/kubernetes-apps/ansible/tasks/main.yml
@@ -9,12 +9,12 @@
   until: result.status == 200
   retries: 20
   delay: 1
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Apps | Cleanup DNS
   import_tasks: cleanup_dns.yml
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - upgrade
     - coredns
@@ -24,7 +24,7 @@
   import_tasks: "coredns.yml"
   when:
     - dns_mode in ['coredns', 'coredns_dual']
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - coredns
 
@@ -32,7 +32,7 @@
   import_tasks: "nodelocaldns.yml"
   when:
     - enable_nodelocaldns
-    - inventory_hostname == groups['kube-master'] | first
+    - inventory_hostname == groups['kube_control_plane'] | first
   tags:
     - nodelocaldns
 
@@ -50,7 +50,7 @@
     - "{{ nodelocaldns_manifests.results | default({}) }}"
   when:
     - dns_mode != 'none'
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   register: resource_result
   until: resource_result is succeeded
diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
index 81121c53b..46252929a 100644
--- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml
+++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml
@@ -28,7 +28,7 @@
   with_items: "{{ netchecker_templates }}"
   register: manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Apps | Start Netchecker Resources
   kube:
@@ -39,4 +39,4 @@
     filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0] and not item is skipped
+  when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml
index 378dbc92f..ce79ceed4 100644
--- a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml
+++ b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml
@@ -10,7 +10,7 @@
     secondaryclusterIP: "{{ skydns_server_secondary }}"
   when:
     - enable_nodelocaldns
-    - inventory_hostname == groups['kube-master'] | first
+    - inventory_hostname == groups['kube_control_plane'] | first
   tags:
     - nodelocaldns
     - coredns
@@ -39,7 +39,7 @@
       {%- endif -%}
   when:
     - enable_nodelocaldns
-    - inventory_hostname == groups['kube-master'] | first
+    - inventory_hostname == groups['kube_control_plane'] | first
   tags:
     - nodelocaldns
     - coredns
diff --git a/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml b/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
index cec7deaca..ecf6f511d 100644
--- a/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
+++ b/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
@@ -7,7 +7,7 @@
   template:
     src: controller-manager-config.yml.j2
     dest: "{{ kube_config_dir }}/controller-manager-config.yml"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: oci
 
 - name: "OCI Cloud Controller | Slurp Configuration"
@@ -18,14 +18,14 @@
 - name: "OCI Cloud Controller | Encode Configuration"
   set_fact:
     controller_manager_config_base64: "{{ controller_manager_config.content }}"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: oci
 
 - name: "OCI Cloud Controller | Generate Manifests"
   template:
     src: oci-cloud-provider.yml.j2
     dest: "{{ kube_config_dir }}/oci-cloud-provider.yml"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: oci
 
 - name: "OCI Cloud Controller | Apply Manifests"
@@ -33,5 +33,5 @@
     kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/oci-cloud-provider.yml"
     state: latest
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: oci
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index 91955ea8a..2f5f110af 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -9,14 +9,14 @@
   until: result.status == 200
   retries: 10
   delay: 6
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Apps | Check AppArmor status
   command: which apparmor_parser
   register: apparmor_status
   when:
     - podsecuritypolicy_enabled
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   failed_when: false
 
 - name: Kubernetes Apps | Set apparmor_enabled
@@ -24,7 +24,7 @@
     apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
   when:
     - podsecuritypolicy_enabled
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Apps | Render templates for PodSecurityPolicy
   template:
@@ -37,7 +37,7 @@
     - {file: psp-crb.yml, type: rolebinding, name: psp-crb}
   when:
     - podsecuritypolicy_enabled
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy
   kube:
@@ -52,7 +52,7 @@
   delay: 6
   with_items: "{{ psp_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   loop_control:
     label: "{{ item.item.file }}"
@@ -64,7 +64,7 @@
   register: node_crb_manifest
   when:
     - rbac_enabled
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Apply workaround to allow all nodes with cert O=system:nodes to register
   kube:
@@ -80,7 +80,7 @@
   when:
     - rbac_enabled
     - node_crb_manifest.changed
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet
   template:
@@ -90,7 +90,7 @@
   when:
     - rbac_enabled
     - kubelet_authorization_mode_webhook
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags: node-webhook
 
 - name: Apply webhook ClusterRole
@@ -104,7 +104,7 @@
     - rbac_enabled
     - kubelet_authorization_mode_webhook
     - node_webhook_cr_manifest.changed
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags: node-webhook
 
 - name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole
@@ -115,7 +115,7 @@
   when:
     - rbac_enabled
     - kubelet_authorization_mode_webhook
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags: node-webhook
 
 - name: Grant system:nodes the webhook ClusterRole
@@ -129,7 +129,7 @@
     - rbac_enabled
     - kubelet_authorization_mode_webhook
     - node_webhook_crb_manifest.changed
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags: node-webhook
 
 - include_tasks: oci.yml
@@ -140,7 +140,7 @@
 
 - name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
   copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml
-  when: inventory_hostname == groups['kube-master']|last
+  when: inventory_hostname == groups['kube_control_plane']|last
 
 - name: PriorityClass | Create k8s-cluster-critical
   kube:
@@ -149,4 +149,4 @@
     resource: "PriorityClass"
     filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
     state: latest
-  when: inventory_hostname == groups['kube-master']|last
+  when: inventory_hostname == groups['kube_control_plane']|last
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
index 22b39b3d4..72142eae6 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml
@@ -6,7 +6,7 @@
   when:
   - cloud_provider is defined
   - cloud_provider == 'oci'
-  - inventory_hostname == groups['kube-master'][0]
+  - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Apply OCI RBAC
   kube:
@@ -15,4 +15,4 @@
   when:
   - cloud_provider is defined
   - cloud_provider == 'oci'
-  - inventory_hostname == groups['kube-master'][0]
+  - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
index fd3ea42fa..75a0b8a10 100644
--- a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
@@ -38,7 +38,7 @@
     - { name: k8s-device-plugin-nvidia-daemonset, file: k8s-device-plugin-nvidia-daemonset.yml, type: daemonset }
   register: container_engine_accelerator_manifests
   when:
-    - inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container
+    - inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container
 
 - name: Container Engine Acceleration Nvidia GPU | Apply manifests for nvidia accelerators
   kube:
@@ -51,4 +51,4 @@
   with_items:
     - "{{ container_engine_accelerator_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container and nvidia_driver_install_supported
+    - inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container and nvidia_driver_install_supported
diff --git a/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml b/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml
index 637d7beef..46384d281 100644
--- a/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml
+++ b/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml
@@ -6,7 +6,7 @@
     dest: "{{ kube_config_dir }}/runtimeclass-crun.yml"
     mode: "0664"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: crun | Apply manifests
   kube:
@@ -16,4 +16,4 @@
     filename: "{{ kube_config_dir }}/runtimeclass-crun.yml"
     state: "latest"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml b/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml
index 34478b990..3fb059fe6 100644
--- a/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml
+++ b/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml
@@ -20,7 +20,7 @@
   with_items: "{{ kata_containers_templates }}"
   register: kata_containers_manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kata Containers | Apply manifests
   kube:
@@ -31,4 +31,4 @@
     state: "latest"
   with_items: "{{ kata_containers_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml b/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml
index 04bb9fd4d..7b2f41a4c 100644
--- a/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml
@@ -9,7 +9,7 @@
     - {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice.yml}
     - {name: aws-ebs-csi-nodeservice, file: aws-ebs-csi-nodeservice.yml}
   register: aws_csi_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: aws-ebs-csi-driver
 
 - name: AWS CSI Driver | Apply Manifests
@@ -20,7 +20,7 @@
   with_items:
     - "{{ aws_csi_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   loop_control:
     label: "{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml b/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml
index e33ca292f..b8bbd7113 100644
--- a/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml
@@ -8,14 +8,14 @@
     dest: "{{ kube_config_dir }}/azure_csi_cloud_config"
     group: "{{ kube_cert_group }}"
     mode: 0640
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: azure-csi-driver
 
 - name: Azure CSI Driver | Get base64 cloud-config
   slurp:
     src: "{{ kube_config_dir }}/azure_csi_cloud_config"
   register: cloud_config_secret
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: azure-csi-driver
 
 - name: Azure CSI Driver | Generate Manifests
@@ -30,7 +30,7 @@
     - {name: azure-csi-azuredisk-node, file: azure-csi-azuredisk-node.yml}
     - {name: azure-csi-node-info-crd.yml.j2, file: azure-csi-node-info-crd.yml}
   register: azure_csi_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: azure-csi-driver
 
 - name: Azure CSI Driver | Apply Manifests
@@ -41,7 +41,7 @@
   with_items:
     - "{{ azure_csi_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   loop_control:
     label: "{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml b/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml
index 14b827513..47ba0a1d0 100644
--- a/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml
@@ -20,14 +20,14 @@
     dest: "{{ kube_config_dir }}/cinder_cloud_config"
     group: "{{ kube_cert_group }}"
     mode: 0640
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: cinder-csi-driver
 
 - name: Cinder CSI Driver | Get base64 cloud-config
   slurp:
     src: "{{ kube_config_dir }}/cinder_cloud_config"
   register: cloud_config_secret
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: cinder-csi-driver
 
 - name: Cinder CSI Driver | Generate Manifests
@@ -43,7 +43,7 @@
     - {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin.yml}
     - {name: cinder-csi-poddisruptionbudget, file: cinder-csi-poddisruptionbudget.yml}
   register: cinder_csi_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: cinder-csi-driver
 
 - name: Cinder CSI Driver | Apply Manifests
@@ -54,7 +54,7 @@
   with_items:
     - "{{ cinder_csi_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   loop_control:
     label: "{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml b/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml
index 85d637efb..029d7ffe5 100644
--- a/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml
@@ -8,7 +8,7 @@
     - {name: volumesnapshotcontents, file: volumesnapshotcontents.yml}
     - {name: volumesnapshots, file: volumesnapshots.yml}
   register: csi_crd_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: csi-driver
 
 - name: CSI CRD | Apply Manifests
@@ -20,7 +20,7 @@
   with_items:
     - "{{ csi_crd_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   loop_control:
     label: "{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml b/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml
index 7e4315854..05961ef56 100644
--- a/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml
@@ -11,14 +11,14 @@
     dest: "{{ kube_config_dir }}/cloud-sa.json"
     group: "{{ kube_cert_group }}"
     mode: 0640
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: gcp-pd-csi-driver
 
 - name: GCP PD CSI Driver | Get base64 cloud-sa.json
   slurp:
     src: "{{ kube_config_dir }}/cloud-sa.json"
   register: gcp_cred_secret
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: gcp-pd-csi-driver
 
 - name: GCP PD CSI Driver | Generate Manifests
@@ -31,7 +31,7 @@
     - {name: gcp-pd-csi-controller, file: gcp-pd-csi-controller.yml}
     - {name: gcp-pd-csi-node, file: gcp-pd-csi-node.yml}
   register: gcp_pd_csi_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: gcp-pd-csi-driver
 
 - name: GCP PD CSI Driver | Apply Manifests
@@ -42,7 +42,7 @@
   with_items:
     - "{{ gcp_pd_csi_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   loop_control:
     label: "{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
index 4e341b2af..26e8751ac 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
@@ -9,7 +9,7 @@
     mode: 0640
   with_items:
     - vsphere-csi-cloud-config
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: vsphere-csi-driver
 
 - name: vSphere CSI Driver | Generate Manifests
@@ -21,13 +21,13 @@
     - vsphere-csi-controller-ss.yml
     - vsphere-csi-node.yml
   register: vsphere_csi_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: vsphere-csi-driver
 
 - name: vSphere CSI Driver | Generate a CSI secret manifest
   command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml"
   register: vsphere_csi_secret_manifest
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   no_log: true
   tags: vsphere-csi-driver
 
@@ -35,7 +35,7 @@
   command:
     cmd: "{{ bin_dir }}/kubectl apply -f -"
     stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   no_log: true
   tags: vsphere-csi-driver
 
@@ -47,7 +47,7 @@
   with_items:
     - "{{ vsphere_csi_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   loop_control:
     label: "{{ item.item }}"
diff --git a/roles/kubernetes-apps/external_cloud_controller/meta/main.yml b/roles/kubernetes-apps/external_cloud_controller/meta/main.yml
index b7d1cc698..a75a42995 100644
--- a/roles/kubernetes-apps/external_cloud_controller/meta/main.yml
+++ b/roles/kubernetes-apps/external_cloud_controller/meta/main.yml
@@ -6,7 +6,7 @@ dependencies:
       - cloud_provider == "external"
       - external_cloud_provider is defined
       - external_cloud_provider == "openstack"
-      - inventory_hostname == groups['kube-master'][0]
+      - inventory_hostname == groups['kube_control_plane'][0]
     tags:
       - external-cloud-controller
       - external-openstack
@@ -16,7 +16,7 @@ dependencies:
       - cloud_provider == "external"
       - external_cloud_provider is defined
       - external_cloud_provider == "vsphere"
-      - inventory_hostname == groups['kube-master'][0]
+      - inventory_hostname == groups['kube_control_plane'][0]
     tags:
       - external-cloud-controller
       - external-vsphere
diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml
index 220d39168..7c89fdbdf 100644
--- a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml
+++ b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml
@@ -20,14 +20,14 @@
     dest: "{{ kube_config_dir }}/external_openstack_cloud_config"
     group: "{{ kube_cert_group }}"
     mode: 0640
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: external-openstack
 
 - name: External OpenStack Cloud Controller | Get base64 cloud-config
   slurp:
     src: "{{ kube_config_dir }}/external_openstack_cloud_config"
   register: external_openstack_cloud_config_secret
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: external-openstack
 
 - name: External OpenStack Cloud Controller | Generate Manifests
@@ -42,7 +42,7 @@
     - {name: external-openstack-cloud-controller-manager-role-bindings, file: external-openstack-cloud-controller-manager-role-bindings.yml}
     - {name: external-openstack-cloud-controller-manager-ds, file: external-openstack-cloud-controller-manager-ds.yml}
   register: external_openstack_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: external-openstack
 
 - name: External OpenStack Cloud Controller | Apply Manifests
@@ -53,7 +53,7 @@
   with_items:
     - "{{ external_openstack_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   loop_control:
     label: "{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml b/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml
index 0dbf3f7dc..86e16dbe7 100644
--- a/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml
+++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml
@@ -9,7 +9,7 @@
     mode: 0640
   with_items:
     - external-vsphere-cpi-cloud-config
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: external-vsphere
 
 - name: External vSphere Cloud Controller | Generate Manifests
@@ -22,20 +22,20 @@
     - external-vsphere-cloud-controller-manager-role-bindings.yml
     - external-vsphere-cloud-controller-manager-ds.yml
   register: external_vsphere_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: external-vsphere
 
 - name: External vSphere Cloud Provider Interface | Create a CPI configMap manifest
   command: "{{ bin_dir }}/kubectl create configmap cloud-config --from-file=vsphere.conf={{ kube_config_dir }}/external-vsphere-cpi-cloud-config -n kube-system --dry-run --save-config -o yaml"
   register: external_vsphere_configmap_manifest
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: external-vsphere
 
 - name: External vSphere Cloud Provider Interface | Apply a CPI configMap manifest
   command:
     cmd: "{{ bin_dir }}/kubectl apply -f -"
     stdin: "{{ external_vsphere_configmap_manifest.stdout }}"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: external-vsphere
 
 - name: External vSphere Cloud Controller | Apply Manifests
@@ -46,7 +46,7 @@
   with_items:
     - "{{ external_vsphere_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   loop_control:
     label: "{{ item.item }}"
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
index c93ecfde7..15b2ecf2b 100644
--- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml
@@ -5,7 +5,7 @@
     path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
     state: absent
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - upgrade
 
@@ -14,7 +14,7 @@
     {{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
   ignore_errors: yes
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - upgrade
 
@@ -23,7 +23,7 @@
     {{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
   ignore_errors: yes
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - upgrade
 
@@ -35,7 +35,7 @@
     group: root
     mode: 0755
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: CephFS Provisioner | Templates list
   set_fact:
@@ -65,7 +65,7 @@
     dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
   with_items: "{{ cephfs_provisioner_templates }}"
   register: cephfs_provisioner_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: CephFS Provisioner | Apply manifests
   kube:
@@ -76,4 +76,4 @@
     filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ cephfs_provisioner_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml
index a723d24f8..1c3606882 100644
--- a/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml
@@ -7,7 +7,7 @@
     group: root
     mode: 0755
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Local Path Provisioner | Create claim root dir
   file:
@@ -42,7 +42,7 @@
     dest: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.file }}"
   with_items: "{{ local_path_provisioner_templates }}"
   register: local_path_provisioner_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Local Path Provisioner | Apply manifests
   kube:
@@ -53,4 +53,4 @@
     filename: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ local_path_provisioner_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
index b4c4f68eb..88a178825 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
@@ -42,7 +42,7 @@
     dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}"
   with_items: "{{ local_volume_provisioner_templates }}"
   register: local_volume_provisioner_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Local Volume Provisioner | Apply manifests
   kube:
@@ -53,6 +53,6 @@
     filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ local_volume_provisioner_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   loop_control:
     label: "{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/external_provisioner/meta/main.yml b/roles/kubernetes-apps/external_provisioner/meta/main.yml
index 19fe8ba48..13bc8b6e8 100644
--- a/roles/kubernetes-apps/external_provisioner/meta/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/meta/main.yml
@@ -3,7 +3,7 @@ dependencies:
   - role: kubernetes-apps/external_provisioner/local_volume_provisioner
     when:
       - local_volume_provisioner_enabled
-      - inventory_hostname == groups['kube-master'][0]
+      - inventory_hostname == groups['kube_control_plane'][0]
     tags:
       - apps
       - local-volume-provisioner
diff --git a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml
index 7c09168b2..e25e0b143 100644
--- a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml
@@ -5,7 +5,7 @@
     path: "{{ kube_config_dir }}/addons/rbd_provisioner"
     state: absent
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - upgrade
 
@@ -14,7 +14,7 @@
     {{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
   ignore_errors: yes
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - upgrade
 
@@ -23,7 +23,7 @@
     {{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
   ignore_errors: yes
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - upgrade
 
@@ -35,7 +35,7 @@
     group: root
     mode: 0755
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: RBD Provisioner | Templates list
   set_fact:
@@ -65,7 +65,7 @@
     dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}"
   with_items: "{{ rbd_provisioner_templates }}"
   register: rbd_provisioner_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: RBD Provisioner | Apply manifests
   kube:
@@ -76,4 +76,4 @@
     filename: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ rbd_provisioner_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml
index 77f3df4e0..2e8b2f89f 100644
--- a/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml
@@ -20,7 +20,7 @@
     - { name: alb-ingress-deploy, file: alb-ingress-deploy.yml, type: deploy }
   register: alb_ingress_manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: ALB Ingress Controller | Apply manifests
   kube:
@@ -32,4 +32,4 @@
     state: "latest"
   with_items: "{{ alb_ingress_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/ingress_controller/ambassador/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/ambassador/tasks/main.yml
index 91524dea2..e4cbc8bcc 100644
--- a/roles/kubernetes-apps/ingress_controller/ambassador/tasks/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/ambassador/tasks/main.yml
@@ -8,7 +8,7 @@
     group: root
     mode: 0755
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Ambassador | Templates list
   set_fact:
@@ -29,7 +29,7 @@
   loop: "{{ ingress_ambassador_templates }}"
   register: ingress_ambassador_manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Ambassador | Apply manifests
   kube:
@@ -41,7 +41,7 @@
     state: "latest"
   loop: "{{ ingress_ambassador_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 # load the AmbassadorInstallation _after_ the CustomResourceDefinition has been loaded
 
@@ -57,7 +57,7 @@
   loop: "{{ ingress_ambassador_cr_templates }}"
   register: ingress_ambassador_cr_manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Ambassador | Apply AmbassadorInstallation
   kube:
@@ -69,4 +69,4 @@
     state: "latest"
   loop: "{{ ingress_ambassador_cr_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
index c8fdce8f1..42112b0d5 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml
@@ -5,7 +5,7 @@
     path: "{{ kube_config_dir }}/addons/cert_manager"
     state: absent
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - upgrade
 
@@ -14,7 +14,7 @@
     {{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
   ignore_errors: yes
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - upgrade
 
@@ -26,7 +26,7 @@
     group: root
     mode: 0755
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Cert Manager | Templates list
   set_fact:
@@ -54,7 +54,7 @@
   with_items: "{{ cert_manager_templates }}"
   register: cert_manager_manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Cert Manager | Apply manifests
   kube:
@@ -65,12 +65,12 @@
     state: "latest"
   with_items: "{{ cert_manager_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Cert Manager | Wait for Webhook pods become ready
   command: "{{ bin_dir }}/kubectl wait po --namespace={{ cert_manager_namespace }} --selector app=webhook --for=condition=Ready --timeout=600s"
   register: cert_manager_webhook_pods_ready
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Cert Manager | Create ClusterIssuer manifest
   template:
@@ -78,7 +78,7 @@
     dest: "{{ kube_config_dir }}/addons/cert_manager/clusterissuer-cert-manager.yml"
   register: cert_manager_clusterissuer_manifest
   when:
-    - inventory_hostname == groups['kube-master'][0] and cert_manager_webhook_pods_ready is succeeded
+    - inventory_hostname == groups['kube_control_plane'][0] and cert_manager_webhook_pods_ready is succeeded
 
 - name: Cert Manager | Apply ClusterIssuer manifest
   kube:
@@ -86,4 +86,4 @@
     kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/addons/cert_manager/clusterissuer-cert-manager.yml"
     state: "latest"
-  when: inventory_hostname == groups['kube-master'][0] and cert_manager_clusterissuer_manifest is succeeded
+  when: inventory_hostname == groups['kube_control_plane'][0] and cert_manager_clusterissuer_manifest is succeeded
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml
index b8c575817..05d35b3ac 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml
@@ -8,7 +8,7 @@
     group: root
     mode: 0755
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: NGINX Ingress Controller | Templates list
   set_fact:
@@ -38,7 +38,7 @@
   with_items: "{{ ingress_nginx_templates }}"
   register: ingress_nginx_manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: NGINX Ingress Controller | Apply manifests
   kube:
@@ -50,4 +50,4 @@
     state: "latest"
   with_items: "{{ ingress_nginx_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml
index e0f8e9a12..a3b1f1dfe 100644
--- a/roles/kubernetes-apps/meta/main.yml
+++ b/roles/kubernetes-apps/meta/main.yml
@@ -2,7 +2,7 @@
 dependencies:
   - role: kubernetes-apps/ansible
     when:
-      - inventory_hostname == groups['kube-master'][0]
+      - inventory_hostname == groups['kube_control_plane'][0]
 
   - role: kubernetes-apps/helm
     when:
@@ -13,21 +13,21 @@ dependencies:
   - role: kubernetes-apps/registry
     when:
       - registry_enabled
-      - inventory_hostname == groups['kube-master'][0]
+      - inventory_hostname == groups['kube_control_plane'][0]
     tags:
       - registry
 
   - role: kubernetes-apps/metrics_server
     when:
       - metrics_server_enabled
-      - inventory_hostname == groups['kube-master'][0]
+      - inventory_hostname == groups['kube_control_plane'][0]
     tags:
       - metrics_server
 
   - role: kubernetes-apps/csi_driver/csi_crd
     when:
       - cinder_csi_enabled
-      - inventory_hostname == groups['kube-master'][0]
+      - inventory_hostname == groups['kube_control_plane'][0]
     tags:
       - csi-driver
 
@@ -69,19 +69,19 @@ dependencies:
   - role: kubernetes-apps/persistent_volumes
     when:
       - persistent_volumes_enabled
-      - inventory_hostname == groups['kube-master'][0]
+      - inventory_hostname == groups['kube_control_plane'][0]
     tags:
       - persistent_volumes
 
   - role: kubernetes-apps/snapshots
-    when: inventory_hostname == groups['kube-master'][0]
+    when: inventory_hostname == groups['kube_control_plane'][0]
     tags:
       - snapshots
       - csi-driver
 
   - role: kubernetes-apps/container_runtimes
     when:
-      - inventory_hostname == groups['kube-master'][0]
+      - inventory_hostname == groups['kube_control_plane'][0]
     tags:
       - container-runtimes
 
@@ -94,13 +94,13 @@ dependencies:
     when:
       - cloud_provider is defined
       - cloud_provider == "oci"
-      - inventory_hostname == groups['kube-master'][0]
+      - inventory_hostname == groups['kube_control_plane'][0]
     tags:
       - oci
 
   - role: kubernetes-apps/metallb
     when:
       - metallb_enabled
-      - inventory_hostname == groups['kube-master'][0]
+      - inventory_hostname == groups['kube_control_plane'][0]
     tags:
       - metallb
diff --git a/roles/kubernetes-apps/metallb/tasks/main.yml b/roles/kubernetes-apps/metallb/tasks/main.yml
index 5d3c58d6e..990500c28 100644
--- a/roles/kubernetes-apps/metallb/tasks/main.yml
+++ b/roles/kubernetes-apps/metallb/tasks/main.yml
@@ -22,7 +22,7 @@
   register: apparmor_status
   when:
     - podsecuritypolicy_enabled
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   failed_when: false
 
 - name: Kubernetes Apps | Set apparmor_enabled
@@ -30,7 +30,7 @@
     apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
   when:
     - podsecuritypolicy_enabled
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: "Kubernetes Apps | Lay Down MetalLB"
   become: true
@@ -38,7 +38,7 @@
   with_items: ["metallb.yml", "metallb-config.yml"]
   register: "rendering"
   when:
-    - "inventory_hostname == groups['kube-master'][0]"
+    - "inventory_hostname == groups['kube_control_plane'][0]"
 
 - name: "Kubernetes Apps | Install and configure MetalLB"
   kube:
@@ -49,7 +49,7 @@
   become: true
   with_items: "{{ rendering.results }}"
   when:
-    - "inventory_hostname == groups['kube-master'][0]"
+    - "inventory_hostname == groups['kube_control_plane'][0]"
 
 - name: Kubernetes Apps | Check existing secret of MetalLB
   command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist"
@@ -57,18 +57,18 @@
   become: true
   ignore_errors: yes
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Apps | Create random bytes for MetalLB
   command: "openssl rand -base64 32"
   register: metallb_rand
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - metallb_secret.rc != 0
 
 - name: Kubernetes Apps | Install secret of MetalLB if not existing
   command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system create secret generic memberlist --from-literal=secretkey={{ metallb_rand.stdout }}"
   become: true
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - metallb_secret.rc != 0
diff --git a/roles/kubernetes-apps/metrics_server/tasks/main.yml b/roles/kubernetes-apps/metrics_server/tasks/main.yml
index d7dc45443..c3be4b830 100644
--- a/roles/kubernetes-apps/metrics_server/tasks/main.yml
+++ b/roles/kubernetes-apps/metrics_server/tasks/main.yml
@@ -2,14 +2,14 @@
 # If all masters have node role, there are no tainted master and toleration should not be specified.
 - name: Check all masters are node or not
   set_fact:
-    masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube-master']) == groups['kube-master'] }}"
+    masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
 
 - name: Metrics Server | Delete addon dir
   file:
     path: "{{ kube_config_dir }}/addons/metrics_server"
     state: absent
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   tags:
     - upgrade
 
@@ -21,7 +21,7 @@
     group: root
     mode: 0755
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Metrics Server | Templates list
   set_fact:
@@ -43,7 +43,7 @@
   with_items: "{{ metrics_server_templates }}"
   register: metrics_server_manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Metrics Server | Apply manifests
   kube:
@@ -54,4 +54,4 @@
     state: "latest"
   with_items: "{{ metrics_server_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
index b495106b1..db7e3f268 100644
--- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml
@@ -8,4 +8,4 @@
     filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ canal_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0] and not item is skipped
+  when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
index 1baaa1ce6..d3d6ceec5 100644
--- a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml
@@ -8,7 +8,7 @@
     filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ cilium_node_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0] and not item is skipped
+  when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
 
 - name: Cilium | Wait for pods to run
   command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"  # noqa 601
@@ -17,4 +17,4 @@
   retries: 30
   delay: 10
   ignore_errors: yes
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
index 3ed49db81..ff56d2461 100644
--- a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
@@ -8,7 +8,7 @@
     filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ flannel_node_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0] and not item is skipped
+  when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
 
 - name: Flannel | Wait for flannel subnet.env file presence
   wait_for:
diff --git a/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml
index 56d21717c..9f4250183 100644
--- a/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml
@@ -6,4 +6,4 @@
     filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ kube_ovn_node_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0] and not item is skipped
+  when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
index 8694e496f..3e483bf7f 100644
--- a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
@@ -8,7 +8,7 @@
     resource: "ds"
     namespace: "kube-system"
     state: "latest"
-  delegate_to: "{{ groups['kube-master'] | first }}"
+  delegate_to: "{{ groups['kube_control_plane'] | first }}"
   run_once: true
 
 - name: kube-router | Wait for kube-router pods to be ready
@@ -18,6 +18,6 @@
   retries: 30
   delay: 10
   ignore_errors: yes
-  delegate_to: "{{ groups['kube-master'] | first }}"
+  delegate_to: "{{ groups['kube_control_plane'] | first }}"
   run_once: true
   changed_when: false
diff --git a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
index eb4965028..232d3e403 100644
--- a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
@@ -8,4 +8,4 @@
     filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ multus_manifest_1.results }} + {{ multus_manifest_2.results }}"
-  when: inventory_hostname == groups['kube-master'][0] and not item is skipped
+  when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/network_plugin/ovn4nfv/tasks/main.yml b/roles/kubernetes-apps/network_plugin/ovn4nfv/tasks/main.yml
index 1262ee6b9..987ff2949 100644
--- a/roles/kubernetes-apps/network_plugin/ovn4nfv/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/ovn4nfv/tasks/main.yml
@@ -6,4 +6,4 @@
     filename: "{{ kube_config_dir }}/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ ovn4nfv_node_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0] and not item is skipped
+  when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
diff --git a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
index daeea97b0..bc0f932d8 100644
--- a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
@@ -8,7 +8,7 @@
     resource: "ds"
     namespace: "kube-system"
     state: "latest"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Weave | Wait for Weave to become available
   uri:
@@ -18,4 +18,4 @@
   retries: 180
   delay: 5
   until: "weave_status.status == 200 and 'Status: ready' in weave_status.content"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml
index 006c35d1e..7588c1f72 100644
--- a/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml
@@ -5,7 +5,7 @@
     dest: "{{ kube_config_dir }}/aws-ebs-csi-storage-class.yml"
   register: manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Persistent Volumes | Add AWS EBS CSI Storage Class
   kube:
@@ -15,5 +15,5 @@
     filename: "{{ kube_config_dir }}/aws-ebs-csi-storage-class.yml"
     state: "latest"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - manifests.changed
diff --git a/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml
index 04cca7618..04ac99ef8 100644
--- a/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml
@@ -5,7 +5,7 @@
     dest: "{{ kube_config_dir }}/azure-csi-storage-class.yml"
   register: manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Persistent Volumes | Add Azure CSI Storage Class
   kube:
@@ -15,5 +15,5 @@
     filename: "{{ kube_config_dir }}/azure-csi-storage-class.yml"
     state: "latest"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - manifests.changed
diff --git a/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml
index f94f8ca3e..c8ca8bc15 100644
--- a/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml
@@ -5,7 +5,7 @@
     dest: "{{ kube_config_dir }}/cinder-csi-storage-class.yml"
   register: manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Persistent Volumes | Add Cinder CSI Storage Class
   kube:
@@ -15,5 +15,5 @@
     filename: "{{ kube_config_dir }}/cinder-csi-storage-class.yml"
     state: "latest"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - manifests.changed
diff --git a/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml
index f1935e76b..d85e68fb4 100644
--- a/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml
@@ -5,7 +5,7 @@
     dest: "{{ kube_config_dir }}/gcp-pd-csi-storage-class.yml"
   register: manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Persistent Volumes | Add GCP PD CSI Storage Class
   kube:
@@ -15,5 +15,5 @@
     filename: "{{ kube_config_dir }}/gcp-pd-csi-storage-class.yml"
     state: "latest"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - manifests.changed
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
index 629c6add7..cc42224e1 100644
--- a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
@@ -5,7 +5,7 @@
     dest: "{{ kube_config_dir }}/openstack-storage-class.yml"
   register: manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
   kube:
@@ -15,5 +15,5 @@
     filename: "{{ kube_config_dir }}/openstack-storage-class.yml"
     state: "latest"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - manifests.changed
diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
index bbd39d63f..10f13893d 100644
--- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
+++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
@@ -19,7 +19,7 @@
     - {name: calico-kube-controllers, file: calico-kube-crb.yml, type: clusterrolebinding}
   register: calico_kube_manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - rbac_enabled or item.type not in rbac_resources
 
 - name: Start of Calico kube controllers
@@ -33,7 +33,7 @@
   with_items:
     - "{{ calico_kube_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   loop_control:
     label: "{{ item.item.file }}"
diff --git a/roles/kubernetes-apps/registry/tasks/main.yml b/roles/kubernetes-apps/registry/tasks/main.yml
index aa3676498..6b8b5e7bc 100644
--- a/roles/kubernetes-apps/registry/tasks/main.yml
+++ b/roles/kubernetes-apps/registry/tasks/main.yml
@@ -38,7 +38,7 @@
     dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}"
   with_items: "{{ registry_templates }}"
   register: registry_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Registry | Apply manifests
   kube:
@@ -49,7 +49,7 @@
     filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}"
     state: "latest"
   with_items: "{{ registry_manifests.results }}"
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Registry | Create PVC manifests
   template:
@@ -61,7 +61,7 @@
   when:
     - registry_storage_class != none and registry_storage_class
     - registry_disk_size != none and registry_disk_size
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Registry | Apply PVC manifests
   kube:
@@ -75,4 +75,4 @@
   when:
     - registry_storage_class != none and registry_storage_class
     - registry_disk_size != none and registry_disk_size
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml b/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml
index 32940af08..b979501cd 100644
--- a/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml
+++ b/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml
@@ -5,7 +5,7 @@
     dest: "{{ kube_config_dir }}/cinder-csi-snapshot-class.yml"
   register: manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kubernetes Snapshots | Add Cinder CSI Snapshot Class
   kube:
@@ -13,5 +13,5 @@
     filename: "{{ kube_config_dir }}/cinder-csi-snapshot-class.yml"
     state: "latest"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - manifests.changed
diff --git a/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml b/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml
index feeee4a41..58f9c2ca2 100644
--- a/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml
+++ b/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml
@@ -7,7 +7,7 @@
     - {name: rbac-snapshot-controller, file: rbac-snapshot-controller.yml}
     - {name: snapshot-controller, file: snapshot-controller.yml}
   register: snapshot_controller_manifests
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
   tags: snapshot-controller
 
 - name: Snapshot Controller | Apply Manifests
@@ -18,7 +18,7 @@
   with_items:
     - "{{ snapshot_controller_manifests.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   loop_control:
     label: "{{ item.item.file }}"
diff --git a/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml b/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml
index 234fa9bff..b88f57c3c 100644
--- a/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml
+++ b/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml
@@ -28,7 +28,7 @@
     kube_encrypt_token: "{{ kube_encrypt_token_extracted }}"
   delegate_to: "{{ item }}"
   delegate_facts: true
-  with_inventory_hostnames: kube-master
+  with_inventory_hostnames: kube_control_plane
   when: kube_encrypt_token_extracted is defined
 
 - name: Write secrets for encrypting secret data at rest
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
index 6f961f2bc..1af7f0c6e 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
@@ -18,15 +18,15 @@
     --upload-certs
   register: kubeadm_upload_cert
   when:
-    - inventory_hostname == groups['kube-master']|first
+    - inventory_hostname == groups['kube_control_plane']|first
 
 - name: Parse certificate key if not set
   set_fact:
-    kubeadm_certificate_key: "{{ hostvars[groups['kube-master'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}"
+    kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}"
   run_once: yes
   when:
-    - hostvars[groups['kube-master'][0]]['kubeadm_upload_cert'] is defined
-    - hostvars[groups['kube-master'][0]]['kubeadm_upload_cert'] is not skipped
+    - hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is defined
+    - hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is not skipped
 
 - name: Create kubeadm ControlPlane config
   template:
@@ -35,7 +35,7 @@
     mode: 0640
     backup: yes
   when:
-    - inventory_hostname != groups['kube-master']|first
+    - inventory_hostname != groups['kube_control_plane']|first
     - not kubeadm_already_run.stat.exists
 
 - name: Wait for k8s apiserver
@@ -64,5 +64,5 @@
   throttle: 1
   until: kubeadm_join_control_plane is succeeded
   when:
-    - inventory_hostname != groups['kube-master']|first
+    - inventory_hostname != groups['kube_control_plane']|first
     - kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
index 5a51a24be..ba214dcc3 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
@@ -25,7 +25,7 @@
 
 - name: kubeadm | aggregate all SANs
   set_fact:
-    apiserver_sans: "{{ (sans_base + groups['kube-master'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn) | unique }}"
+    apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn) | unique }}"
   vars:
     sans_base:
       - "kubernetes"
@@ -38,12 +38,12 @@
     sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}"
     sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}"
     sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}"
-    sans_access_ip: "{{ groups['kube-master'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}"
-    sans_ip: "{{ groups['kube-master'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}"
-    sans_address: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
+    sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}"
+    sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}"
+    sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
     sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}"
-    sans_hostname: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}"
-    sans_fqdn: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}"
+    sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}"
+    sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}"
   tags: facts
 
 - name: Create audit-policy directory
@@ -86,7 +86,7 @@
   register: apiserver_sans_check
   changed_when: "'does match certificate' not in apiserver_sans_check.stdout"
   when:
-    - inventory_hostname == groups['kube-master']|first
+    - inventory_hostname == groups['kube_control_plane']|first
     - kubeadm_already_run.stat.exists
 
 - name: kubeadm | regenerate apiserver cert 1/2
@@ -97,7 +97,7 @@
     - apiserver.crt
     - apiserver.key
   when:
-    - inventory_hostname == groups['kube-master']|first
+    - inventory_hostname == groups['kube_control_plane']|first
     - kubeadm_already_run.stat.exists
     - apiserver_sans_check.changed
 
@@ -107,7 +107,7 @@
     init phase certs apiserver
     --config={{ kube_config_dir }}/kubeadm-config.yaml
   when:
-    - inventory_hostname == groups['kube-master']|first
+    - inventory_hostname == groups['kube_control_plane']|first
     - kubeadm_already_run.stat.exists
     - apiserver_sans_check.changed
 
@@ -123,7 +123,7 @@
   # Retry is because upload config sometimes fails
   retries: 3
   until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
-  when: inventory_hostname == groups['kube-master']|first and not kubeadm_already_run.stat.exists
+  when: inventory_hostname == groups['kube_control_plane']|first and not kubeadm_already_run.stat.exists
   failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
   environment:
     PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
@@ -132,7 +132,7 @@
 - name: set kubeadm certificate key
   set_fact:
     kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}"
-  with_items: "{{ hostvars[groups['kube-master'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
+  with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
   when:
     - kubeadm_certificate_key is not defined
     - (item | trim) is match('.*--certificate-key.*')
@@ -143,7 +143,7 @@
     {{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create {{ kubeadm_token }}
   changed_when: false
   when:
-    - inventory_hostname == groups['kube-master']|first
+    - inventory_hostname == groups['kube_control_plane']|first
     - kubeadm_token is defined
     - kubeadm_refresh_token
   tags:
@@ -156,7 +156,7 @@
   retries: 5
   delay: 5
   until: temp_token is succeeded
-  delegate_to: "{{ groups['kube-master'] | first }}"
+  delegate_to: "{{ groups['kube_control_plane'] | first }}"
   when: kubeadm_token is not defined
   tags:
     - kubeadm_token
@@ -180,7 +180,7 @@
 # FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
 - name: kubeadm | Remove taint for master with node role
   command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} {{ item }}"
-  delegate_to: "{{ groups['kube-master'] | first }}"
+  delegate_to: "{{ groups['kube_control_plane'] | first }}"
   with_items:
     - "node-role.kubernetes.io/master:NoSchedule-"
     - "node-role.kubernetes.io/control-plane:NoSchedule-"
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
index 39fb4f3f9..0570ee9d0 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
@@ -3,7 +3,7 @@
   uri:
     url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
     validate_certs: false
-  when: inventory_hostname in groups['kube-master']
+  when: inventory_hostname in groups['kube_control_plane']
   register: _result
   retries: 60
   delay: 5
@@ -23,7 +23,7 @@
   # Retry is because upload config sometimes fails
   retries: 3
   until: kubeadm_upgrade.rc == 0
-  when: inventory_hostname == groups['kube-master']|first
+  when: inventory_hostname == groups['kube_control_plane']|first
   failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
   environment:
     PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
@@ -40,7 +40,7 @@
     --etcd-upgrade={{ etcd_kubeadm_enabled | bool | lower }}
     --force
   register: kubeadm_upgrade
-  when: inventory_hostname != groups['kube-master']|first
+  when: inventory_hostname != groups['kube_control_plane']|first
   failed_when:
     - kubeadm_upgrade.rc != 0
     - '"field is immutable" not in kubeadm_upgrade.stderr'
diff --git a/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2 b/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2
index 3c5e0c18f..825d983c6 100644
--- a/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2
+++ b/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2
@@ -3,7 +3,7 @@ Description=Timer to renew K8S control plane certificates
 
 [Timer]
 # First Monday of each month
-OnCalendar=Mon *-*-1..7 03:{{ groups['kube-master'].index(inventory_hostname) }}0:00
+OnCalendar=Mon *-*-1..7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00
 
 [Install]
 WantedBy=multi-user.target
diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2
index 50025330a..c0c6e5439 100644
--- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2
+++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2
@@ -16,7 +16,7 @@ nodeRegistration:
 {% if kube_override_hostname|default('') %}
   name: {{ kube_override_hostname }}
 {% endif %}
-{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
+{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube-node'] %}
   taints:
   - effect: NoSchedule
     key: node-role.kubernetes.io/master
diff --git a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml
index b5c0f2552..787613e60 100644
--- a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml
+++ b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml
@@ -1,7 +1,7 @@
 ---
 - name: Parse certificate key if not set
   set_fact:
-    kubeadm_certificate_key: "{{ hostvars[groups['kube-master'][0]]['kubeadm_certificate_key'] }}"
+    kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_certificate_key'] }}"
   when: kubeadm_certificate_key is undefined
 
 - name: Pull control plane certs down
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index 148226e6d..5cb654320 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -25,7 +25,7 @@
     get_checksum: no
     get_mime: no
   register: kubeadm_ca_stat
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   run_once: true
 
 - name: Calculate kubeadm CA cert hash
@@ -36,14 +36,14 @@
   when:
     - kubeadm_ca_stat.stat is defined
     - kubeadm_ca_stat.stat.exists
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   run_once: true
   changed_when: false
 
 - name: Create kubeadm token for joining nodes with 24h expiration (default)
   command: "{{ bin_dir }}/kubeadm token create"
   register: temp_token
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: kubeadm_token is not defined
   changed_when: false
 
@@ -118,7 +118,7 @@
   args:
     executable: /bin/bash
   run_once: true
-  delegate_to: "{{ groups['kube-master']|first }}"
+  delegate_to: "{{ groups['kube_control_plane']|first }}"
   delegate_facts: false
   when:
     - kubeadm_config_api_fqdn is not defined
@@ -138,7 +138,7 @@
 - name: Restart all kube-proxy pods to ensure that they load the new configmap
   command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
   run_once: true
-  delegate_to: "{{ groups['kube-master']|first }}"
+  delegate_to: "{{ groups['kube_control_plane']|first }}"
   delegate_facts: false
   when:
     - kubeadm_config_api_fqdn is not defined
@@ -151,6 +151,6 @@
   include_tasks: kubeadm_etcd_node.yml
   when:
     - etcd_kubeadm_enabled
-    - inventory_hostname not in groups['kube-master']
+    - inventory_hostname not in groups['kube_control_plane']
     - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
diff --git a/roles/kubernetes/node-label/tasks/main.yml b/roles/kubernetes/node-label/tasks/main.yml
index 9522d29b5..d01fda835 100644
--- a/roles/kubernetes/node-label/tasks/main.yml
+++ b/roles/kubernetes/node-label/tasks/main.yml
@@ -9,7 +9,7 @@
   until: result.status == 200
   retries: 10
   delay: 6
-  when: inventory_hostname == groups['kube-master'][0]
+  when: inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Set role node label to empty list
   set_fact:
@@ -42,6 +42,6 @@
   command: >-
       {{ bin_dir }}/kubectl label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true
   loop: "{{ role_node_labels + inventory_node_labels }}"
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   changed_when: false
 ...
diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml
index f7deae705..c24a1fedc 100644
--- a/roles/kubernetes/node/tasks/install.yml
+++ b/roles/kubernetes/node/tasks/install.yml
@@ -8,7 +8,7 @@
   tags:
     - kubeadm
   when:
-    - not inventory_hostname in groups['kube-master']
+    - not inventory_hostname in groups['kube_control_plane']
 
 - name: install | Copy kubelet binary from download dir
   copy:
diff --git a/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 b/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2
index ef3269fc8..1d5d7d945 100644
--- a/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2
+++ b/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2
@@ -38,6 +38,6 @@ backend kube_api_backend
   default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100
   option httpchk GET /healthz
   http-check expect status 200
-  {% for host in groups['kube-master'] -%}
+  {% for host in groups['kube_control_plane'] -%}
   server {{ host }} {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }} check check-ssl verify none
   {% endfor -%}
diff --git a/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 b/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2
index 6361a6f39..38e34aa40 100644
--- a/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2
+++ b/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2
@@ -13,7 +13,7 @@ events {
 stream {
   upstream kube_apiserver {
     least_conn;
-    {% for host in groups['kube-master'] -%}
+    {% for host in groups['kube_control_plane'] -%}
     server {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }};
     {% endfor -%}
   }
diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml
index ec78c50b6..6325ac336 100644
--- a/roles/kubernetes/preinstall/handlers/main.yml
+++ b/roles/kubernetes/preinstall/handlers/main.yml
@@ -55,7 +55,7 @@
     get_checksum: no
     get_mime: no
   register: kube_apiserver_set
-  when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
+  when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
 
 # FIXME(mattymo): Also restart for kubeadm mode
 - name: Preinstall | kube-controller configured
@@ -65,13 +65,13 @@
     get_checksum: no
     get_mime: no
   register: kube_controller_set
-  when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
+  when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
 
 - name: Preinstall | restart kube-controller-manager docker
   shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
   when:
     - container_manager == "docker"
-    - inventory_hostname in groups['kube-master']
+    - inventory_hostname in groups['kube_control_plane']
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
     - kube_controller_set.stat.exists
@@ -80,7 +80,7 @@
   shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
   when:
     - container_manager in ['crio', 'containerd']
-    - inventory_hostname in groups['kube-master']
+    - inventory_hostname in groups['kube_control_plane']
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
     - kube_controller_set.stat.exists
@@ -89,7 +89,7 @@
   shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
   when:
     - container_manager == "docker"
-    - inventory_hostname in groups['kube-master']
+    - inventory_hostname in groups['kube_control_plane']
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
 
@@ -97,7 +97,7 @@
   shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
   when:
     - container_manager in ['crio', 'containerd']
-    - inventory_hostname in groups['kube-master']
+    - inventory_hostname in groups['kube_control_plane']
     - dns_mode != 'none'
     - resolvconf_mode == 'host_resolvconf'
 
diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
index fe18b23fe..c2bc22555 100644
--- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
@@ -1,9 +1,9 @@
 ---
-- name: Stop if either kube-master or kube-node group is empty
+- name: Stop if either kube_control_plane or kube-node group is empty
   assert:
     that: "groups.get('{{ item }}')"
   with_items:
-    - kube-master
+    - kube_control_plane
     - kube-node
   run_once: true
   when: not ignore_assert_errors
@@ -79,7 +79,7 @@
     that: ansible_memtotal_mb >= minimal_master_memory_mb
   when:
     - not ignore_assert_errors
-    - inventory_hostname in groups['kube-master']
+    - inventory_hostname in groups['kube_control_plane']
 
 - name: Stop if memory is too small for nodes
   assert:
@@ -136,7 +136,7 @@
   assert:
     that: rbac_enabled and kube_api_anonymous_auth
   when:
-    - kube_apiserver_insecure_port == 0 and inventory_hostname in groups['kube-master']
+    - kube_apiserver_insecure_port == 0 and inventory_hostname in groups['kube_control_plane']
     - not ignore_assert_errors
 
 - name: Stop if kernel version is too low
@@ -193,7 +193,7 @@
     - kube_network_plugin == 'calico'
     - 'calico_version_on_server.stdout is defined'
     - calico_version_on_server.stdout
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   run_once: yes
 
 - name: "Check that cluster_id is set if calico_rr enabled"
@@ -204,7 +204,7 @@
   when:
     - kube_network_plugin == 'calico'
     - peer_with_calico_rr
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
   run_once: yes
 
 - name: "Check that calico_rr nodes are in k8s-cluster group"
diff --git a/roles/kubernetes/tokens/tasks/check-tokens.yml b/roles/kubernetes/tokens/tasks/check-tokens.yml
index c8fe3812f..ae75f0d04 100644
--- a/roles/kubernetes/tokens/tasks/check-tokens.yml
+++ b/roles/kubernetes/tokens/tasks/check-tokens.yml
@@ -5,7 +5,7 @@
     get_attributes: no
     get_checksum: yes
     get_mime: no
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   register: known_tokens_master
   run_once: true
 
@@ -32,7 +32,7 @@
   set_fact:
     sync_tokens: >-
       {%- set tokens = {'sync': False} -%}
-      {%- for server in groups['kube-master'] | intersect(ansible_play_batch)
+      {%- for server in groups['kube_control_plane'] | intersect(ansible_play_batch)
         if (not hostvars[server].known_tokens.stat.exists) or
         (hostvars[server].known_tokens.stat.checksum|default('') != known_tokens_master.stat.checksum|default('')) -%}
         {%- set _ = tokens.update({'sync': True}) -%}
diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml
index 2b94ce4f3..40d4910d2 100644
--- a/roles/kubernetes/tokens/tasks/gen_tokens.yml
+++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml
@@ -5,7 +5,7 @@
     dest: "{{ kube_script_dir }}/kube-gen-token.sh"
     mode: 0700
   run_once: yes
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: gen_tokens|default(false)
 
 - name: Gen_tokens | generate tokens for master components
@@ -14,11 +14,11 @@
     TOKEN_DIR: "{{ kube_token_dir }}"
   with_nested:
     - [ "system:kubectl" ]
-    - "{{ groups['kube-master'] }}"
+    - "{{ groups['kube_control_plane'] }}"
   register: gentoken_master
   changed_when: "'Added' in gentoken_master.stdout"
   run_once: yes
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: gen_tokens|default(false)
 
 - name: Gen_tokens | generate tokens for node components
@@ -31,14 +31,14 @@
   register: gentoken_node
   changed_when: "'Added' in gentoken_node.stdout"
   run_once: yes
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: gen_tokens|default(false)
 
 - name: Gen_tokens | Get list of tokens from first master
   command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
   register: tokens_list
   check_mode: no
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   run_once: true
   when: sync_tokens|default(false)
 
@@ -49,7 +49,7 @@
     executable: /bin/bash
   register: tokens_data
   check_mode: no
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   run_once: true
   when: sync_tokens|default(false)
 
@@ -58,7 +58,7 @@
   args:
     executable: /bin/bash
   when:
-    - inventory_hostname in groups['kube-master']
+    - inventory_hostname in groups['kube_control_plane']
     - sync_tokens|default(false)
-    - inventory_hostname != groups['kube-master'][0]
+    - inventory_hostname != groups['kube_control_plane'][0]
     - tokens_data.stdout
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 3b66cab84..782e15d40 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -447,11 +447,11 @@ ssl_ca_dirs: |-
   ]
 
 # Vars for pointing to kubernetes api endpoints
-is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
-kube_apiserver_count: "{{ groups['kube-master'] | length }}"
+is_kube_master: "{{ inventory_hostname in groups['kube_control_plane'] }}"
+kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}"
 kube_apiserver_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
 kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
-first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(fallback_ips[groups['kube-master'][0]])) }}"
+first_kube_master: "{{ hostvars[groups['kube_control_plane'][0]]['access_ip'] | default(hostvars[groups['kube_control_plane'][0]]['ip'] | default(fallback_ips[groups['kube_control_plane'][0]])) }}"
 loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}"
 loadbalancer_apiserver_type: "nginx"
 # applied if only external loadbalancer_apiserver is defined, otherwise ignored
@@ -483,7 +483,7 @@ kube_apiserver_client_key: "{{ kube_cert_dir }}/ca.key"
 etcd_events_cluster_enabled: false
 
 # etcd group can be empty when kubeadm manages etcd
-etcd_hosts: "{{ groups['etcd'] | default(groups['kube-master']) }}"
+etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}"
 
 # Vars for pointing to etcd endpoints
 is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
diff --git a/roles/kubespray-defaults/tasks/no_proxy.yml b/roles/kubespray-defaults/tasks/no_proxy.yml
index 954418537..984bb50a2 100644
--- a/roles/kubespray-defaults/tasks/no_proxy.yml
+++ b/roles/kubespray-defaults/tasks/no_proxy.yml
@@ -7,7 +7,7 @@
       {{ loadbalancer_apiserver.address | default('') }},
       {%- endif -%}
       {%- if no_proxy_exclude_workers | default(false) -%}
-      {% set cluster_or_master = 'kube-master' %}
+      {% set cluster_or_master = 'kube_control_plane' %}
       {%- else -%}
       {% set cluster_or_master = 'k8s-cluster' %}
       {%- endif -%}
diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml
index 78e4cb881..a0a656707 100644
--- a/roles/network_plugin/calico/tasks/check.yml
+++ b/roles/network_plugin/calico/tasks/check.yml
@@ -43,7 +43,7 @@
   changed_when: False
   register: calico
   run_once: True
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: "Set calico_pool_conf"
   set_fact:
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index c4831cbbc..d214b29b5 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -39,7 +39,7 @@
   include_tasks: typha_certs.yml
   when:
     - typha_secure
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Calico | Install calicoctl wrapper script
   template:
@@ -74,14 +74,14 @@
   delay: "{{ retry_stagger | random + 3 }}"
   changed_when: false
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined
   assert:
     that: "[calico_pool_cidr] | ipaddr(kube_pods_subnet) | length == 1"
     msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - 'calico_conf.stdout == "0"'
     - calico_pool_cidr is defined
 
@@ -97,7 +97,7 @@
   delay: "{{ retry_stagger | random + 3 }}"
   changed_when: false
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - enable_dual_stack_networks
 
 - name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined
@@ -105,7 +105,7 @@
     that: "[calico_pool_cidr_ipv6] | ipaddr(kube_pods_subnet_ipv6) | length == 1"
     msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0"
     - calico_pool_cidr_ipv6 is defined
     - enable_dual_stack_networks
@@ -134,9 +134,9 @@
         filename: "{{ kube_config_dir }}/kdd-crds.yml"
         state: "latest"
       when:
-        - inventory_hostname == groups['kube-master'][0]
+        - inventory_hostname == groups['kube_control_plane'][0]
   when:
-    - inventory_hostname in groups['kube-master']
+    - inventory_hostname in groups['kube_control_plane']
     - calico_datastore == "kdd"
 
 - name: Calico | Configure calico network pool
@@ -157,7 +157,7 @@
           "vxlanMode": "{{ calico_vxlan_mode }}",
           "natOutgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }} }}
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - 'calico_conf.stdout == "0"'
 
 - name: Calico | Configure calico ipv6 network pool (version >= v3.3.0)
@@ -176,7 +176,7 @@
           "vxlanMode": "{{ calico_vxlan_mode_ipv6 }}",
           "natOutgoing": {{ nat_outgoing_ipv6|default(false) and not peer_with_router_ipv6|default(false) }} }}
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0"
     - calico_version is version("v3.3.0", ">=")
     - enable_dual_stack_networks | bool
@@ -214,7 +214,7 @@
           "serviceExternalIPs": {{ _service_external_ips|default([]) }} }}
   changed_when: false
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Calico | Configure peering with router(s) at global scope
   command:
@@ -238,7 +238,7 @@
   with_items:
     - "{{ peers|selectattr('scope','defined')|selectattr('scope','equalto', 'global')|list|default([]) }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - peer_with_router|default(false)
 
 - name: Calico | Configure peering with route reflectors at global scope
@@ -264,7 +264,7 @@
   with_items:
     - "{{ groups['calico-rr'] | default([]) }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - peer_with_calico_rr|default(false)
 
 - name: Calico | Configure route reflectors to peer with each other
@@ -290,7 +290,7 @@
   with_items:
     - "{{ groups['calico-rr'] | default([]) }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - peer_with_calico_rr|default(false)
 
 - name: Calico | Create calico manifests
@@ -305,7 +305,7 @@
     - {name: calico, file: calico-crb.yml, type: clusterrolebinding}
   register: calico_node_manifests
   when:
-    - inventory_hostname in groups['kube-master']
+    - inventory_hostname in groups['kube_control_plane']
     - rbac_enabled or item.type not in rbac_resources
 
 - name: Calico | Create calico manifests for typha
@@ -316,7 +316,7 @@
     - {name: calico, file: calico-typha.yml, type: typha}
   register: calico_node_typha_manifest
   when:
-    - inventory_hostname in groups['kube-master']
+    - inventory_hostname in groups['kube_control_plane']
     - typha_enabled and calico_datastore == "kdd"
 
 - name: Start Calico resources
@@ -331,7 +331,7 @@
     - "{{ calico_node_manifests.results }}"
     - "{{ calico_node_typha_manifest.results }}"
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
     - not item is skipped
   loop_control:
     label: "{{ item.item.file }}"
@@ -340,7 +340,7 @@
   wait_for:
     path: /etc/cni/net.d/calico-kubeconfig
   when:
-    - inventory_hostname not in groups['kube-master']
+    - inventory_hostname not in groups['kube_control_plane']
     - calico_datastore == "kdd"
 
 - name: Calico | Configure node asNumber for per node peering
diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml
index 517218a88..e3ca15065 100644
--- a/roles/network_plugin/calico/tasks/pre.yml
+++ b/roles/network_plugin/calico/tasks/pre.yml
@@ -22,6 +22,6 @@
   args:
     executable: /bin/bash
   register: calico_kubelet_name
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when:
   - "cloud_provider is defined"
diff --git a/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 b/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2
index e6e4ec6e8..a6c080cf4 100644
--- a/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2
+++ b/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2
@@ -1,6 +1,6 @@
 #!/bin/bash
 DATASTORE_TYPE=kubernetes \
-{% if inventory_hostname in groups['kube-master'] %}
+{% if inventory_hostname in groups['kube_control_plane'] %}
 KUBECONFIG=/etc/kubernetes/admin.conf \
 {% else %}
 KUBECONFIG=/etc/cni/net.d/calico-kubeconfig \
diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml
index 982182446..320c20ad3 100644
--- a/roles/network_plugin/canal/tasks/main.yml
+++ b/roles/network_plugin/canal/tasks/main.yml
@@ -59,7 +59,7 @@
     - {name: canal-flannel, file: canal-crb-flannel.yml, type: clusterrolebinding}
   register: canal_manifests
   when:
-    - inventory_hostname in groups['kube-master']
+    - inventory_hostname in groups['kube_control_plane']
 
 - name: Canal | Install calicoctl wrapper script
   template:
diff --git a/roles/network_plugin/cilium/tasks/install.yml b/roles/network_plugin/cilium/tasks/install.yml
index 7a8750d5d..1470d2d97 100644
--- a/roles/network_plugin/cilium/tasks/install.yml
+++ b/roles/network_plugin/cilium/tasks/install.yml
@@ -39,7 +39,7 @@
     - {name: cilium, file: cilium-sa.yml, type: sa}
   register: cilium_node_manifests
   when:
-    - inventory_hostname in groups['kube-master']
+    - inventory_hostname in groups['kube_control_plane']
 
 - name: Cilium | Enable portmap addon
   template:
diff --git a/roles/network_plugin/cilium/tasks/main.yml b/roles/network_plugin/cilium/tasks/main.yml
index 515536094..c3bee558e 100644
--- a/roles/network_plugin/cilium/tasks/main.yml
+++ b/roles/network_plugin/cilium/tasks/main.yml
@@ -1,4 +1,4 @@
 ---
 - import_tasks: check.yml
 
-- include_tasks: install.yml
\ No newline at end of file
+- include_tasks: install.yml
diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml
index 115743ace..8db000c30 100644
--- a/roles/network_plugin/flannel/tasks/main.yml
+++ b/roles/network_plugin/flannel/tasks/main.yml
@@ -8,4 +8,4 @@
     - {name: kube-flannel, file: cni-flannel.yml, type: ds}
   register: flannel_node_manifests
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/network_plugin/kube-ovn/tasks/main.yml b/roles/network_plugin/kube-ovn/tasks/main.yml
index c416f120a..2efafa4cd 100644
--- a/roles/network_plugin/kube-ovn/tasks/main.yml
+++ b/roles/network_plugin/kube-ovn/tasks/main.yml
@@ -1,9 +1,9 @@
 ---
 - name: Kube-OVN | Label ovn-db node
   command: >-
-    {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master
+    {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Kube-OVN | Create Kube-OVN manifests
   template:
diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml
index 6b8719e8a..6be517bc4 100644
--- a/roles/network_plugin/kube-router/tasks/annotate.yml
+++ b/roles/network_plugin/kube-router/tasks/annotate.yml
@@ -1,21 +1,21 @@
 ---
-- name: kube-router | Add annotations on kube-master
+- name: kube-router | Add annotations on kube_control_plane
   command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_master }}"
-  delegate_to: "{{ groups['kube-master'][0] }}"
-  when: kube_router_annotations_master is defined and inventory_hostname in groups['kube-master']
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
+  when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane']
 
 - name: kube-router | Add annotations on kube-node
   command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_node }}"
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: kube_router_annotations_node is defined and inventory_hostname in groups['kube-node']
 
 - name: kube-router | Add common annotations on all servers
   command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_all }}"
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s-cluster']
diff --git a/roles/network_plugin/kube-router/tasks/main.yml b/roles/network_plugin/kube-router/tasks/main.yml
index 48d8abe32..f107eed64 100644
--- a/roles/network_plugin/kube-router/tasks/main.yml
+++ b/roles/network_plugin/kube-router/tasks/main.yml
@@ -55,5 +55,5 @@
   template:
     src: kube-router.yml.j2
     dest: "{{ kube_config_dir }}/kube-router.yml"
-  delegate_to: "{{ groups['kube-master'] | first }}"
+  delegate_to: "{{ groups['kube_control_plane'] | first }}"
   run_once: true
diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml
index 45f877f6f..191df8cef 100644
--- a/roles/network_plugin/macvlan/tasks/main.yml
+++ b/roles/network_plugin/macvlan/tasks/main.yml
@@ -3,7 +3,7 @@
   command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'"
   changed_when: false
   register: node_pod_cidr_cmd
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: Macvlan | set node_pod_cidr
   set_fact:
diff --git a/roles/network_plugin/ovn4nfv/tasks/main.yml b/roles/network_plugin/ovn4nfv/tasks/main.yml
index 32a4c2dc5..26dbd32bd 100644
--- a/roles/network_plugin/ovn4nfv/tasks/main.yml
+++ b/roles/network_plugin/ovn4nfv/tasks/main.yml
@@ -1,9 +1,9 @@
 ---
 - name: ovn4nfv | Label control-plane node
   command: >-
-    {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane
+    {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane
   when:
-    - inventory_hostname == groups['kube-master'][0]
+    - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: ovn4nfv | Create ovn4nfv-k8s manifests
   template:
diff --git a/roles/recover_control_plane/control-plane/tasks/main.yml b/roles/recover_control_plane/control-plane/tasks/main.yml
index 5f4b6a922..450e6f36d 100644
--- a/roles/recover_control_plane/control-plane/tasks/main.yml
+++ b/roles/recover_control_plane/control-plane/tasks/main.yml
@@ -8,22 +8,22 @@
   retries: 6
   delay: 10
   changed_when: false
-  when: groups['broken_kube-master']
+  when: groups['broken_kube_control_plane']
 
-- name: Delete broken kube-master nodes from cluster
+- name: Delete broken kube_control_plane nodes from cluster
   command: "{{ bin_dir }}/kubectl delete node {{ item }}"
   environment:
     - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
-  with_items: "{{ groups['broken_kube-master'] }}"
+  with_items: "{{ groups['broken_kube_control_plane'] }}"
   register: delete_broken_kube_masters
   failed_when: false
-  when: groups['broken_kube-master']
+  when: groups['broken_kube_control_plane']
 
-- name: Fail if unable to delete broken kube-master nodes from cluster
+- name: Fail if unable to delete broken kube_control_plane nodes from cluster
   fail:
-    msg: "Unable to delete broken kube-master node: {{ item.item }}"
+    msg: "Unable to delete broken kube_control_plane node: {{ item.item }}"
   loop: "{{ delete_broken_kube_masters.results }}"
   changed_when: false
   when:
-    - groups['broken_kube-master']
+    - groups['broken_kube_control_plane']
     - "item.rc != 0 and not 'NotFound' in item.stderr"
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index c4660ef87..fd4c6fc58 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -1,5 +1,5 @@
 ---
 - name: Delete node  # noqa 301
   command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
-  delegate_to: "{{ groups['kube-master']|first }}"
+  delegate_to: "{{ groups['kube_control_plane']|first }}"
   ignore_errors: yes
\ No newline at end of file
diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml
index 42316e209..ba9c94531 100644
--- a/roles/remove-node/pre-remove/tasks/main.yml
+++ b/roles/remove-node/pre-remove/tasks/main.yml
@@ -11,7 +11,7 @@
                   | jq "select(. | test(\"^{{ hostvars[item]['kube_override_hostname']|default(item) }}$\"))"
   loop: "{{ node.split(',') | default(groups['kube-node']) }}"
   register: nodes
-  delegate_to: "{{ groups['kube-master']|first }}"
+  delegate_to: "{{ groups['kube_control_plane']|first }}"
   changed_when: false
   run_once: true
 
@@ -33,7 +33,7 @@
   loop: "{{ nodes_to_drain }}"
   register: result
   failed_when: result.rc != 0 and not allow_ungraceful_removal
-  delegate_to: "{{ groups['kube-master']|first }}"
+  delegate_to: "{{ groups['kube_control_plane']|first }}"
   run_once: true
   until: result.rc == 0 or allow_ungraceful_removal
   retries: "{{ drain_retries }}"
diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml
index 5e6309e17..805677f86 100644
--- a/roles/upgrade/post-upgrade/tasks/main.yml
+++ b/roles/upgrade/post-upgrade/tasks/main.yml
@@ -1,6 +1,6 @@
 ---
 - name: Uncordon node
   command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf uncordon {{ kube_override_hostname|default(inventory_hostname) }}"
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when:
     - needs_cordoning|default(false)
diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml
index bf436d360..d969175e3 100644
--- a/roles/upgrade/pre-upgrade/tasks/main.yml
+++ b/roles/upgrade/pre-upgrade/tasks/main.yml
@@ -21,7 +21,7 @@
     {{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }}
     -o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }'
   register: kubectl_node_ready
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   failed_when: false
   changed_when: false
 
@@ -32,7 +32,7 @@
     {{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }}
     -o jsonpath='{ .spec.unschedulable }'
   register: kubectl_node_schedulable
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   failed_when: false
   changed_when: false
 
@@ -49,12 +49,12 @@
   block:
     - name: Cordon node
       command: "{{ bin_dir }}/kubectl cordon {{ kube_override_hostname|default(inventory_hostname) }}"
-      delegate_to: "{{ groups['kube-master'][0] }}"
+      delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
     - name: Check kubectl version
       command: "{{ bin_dir }}/kubectl version --client --short"
       register: kubectl_version
-      delegate_to: "{{ groups['kube-master'][0] }}"
+      delegate_to: "{{ groups['kube_control_plane'][0] }}"
       run_once: yes
       changed_when: false
       when:
@@ -90,6 +90,6 @@
       fail:
         msg: "Failed to drain node {{ inventory_hostname }}"
       when: upgrade_node_fail_if_drain_fails
-  delegate_to: "{{ groups['kube-master'][0] }}"
+  delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when:
     - needs_cordoning
diff --git a/scale.yml b/scale.yml
index 3e7274837..f6a8578e7 100644
--- a/scale.yml
+++ b/scale.yml
@@ -2,6 +2,15 @@
 - name: Check ansible version
   import_playbook: ansible_version.yml
 
+- name: Add kube-master nodes to kube_control_plane
+  # This is for old inventory which contains kube-master instead of kube_control_plane
+  hosts: kube-master
+  gather_facts: false
+  tasks:
+    - name: add nodes to kube_control_plane group
+      group_by:
+        key: 'kube_control_plane'
+
 - hosts: bastion[0]
   gather_facts: False
   environment: "{{ proxy_disable_env }}"
@@ -32,8 +41,8 @@
     - { role: kubespray-defaults }
     - { role: etcd, tags: etcd, etcd_cluster_setup: false }
 
-- name: Download images to ansible host cache via first kube-master node
-  hosts: kube-master[0]
+- name: Download images to ansible host cache via first kube_control_plane node
+  hosts: kube_control_plane[0]
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -64,7 +73,7 @@
     - { role: kubernetes/node, tags: node }
 
 - name: Upload control plane certs and retrieve encryption key
-  hosts: kube-master | first
+  hosts: kube_control_plane | first
   environment: "{{ proxy_disable_env }}"
   gather_facts: False
   tags: kubeadm
diff --git a/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 b/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2
index b842c97a7..8e59e2f3c 100644
--- a/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2
+++ b/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2
@@ -4,6 +4,10 @@ instance-{{ loop.index }} ansible_ssh_host={{instance.stdout}}
 {% endfor %}
 
 {% if mode is defined and mode in ["separate", "separate-scale"] %}
+[kube_control_plane]
+instance-1
+
+# TODO(oomichi): Remove all kube-master groups from this file after releasing v2.16.
 [kube-master]
 instance-1
 
@@ -13,6 +17,10 @@ instance-2
 [etcd]
 instance-3
 {% elif mode is defined and mode in ["ha", "ha-scale"] %}
+[kube_control_plane]
+instance-1
+instance-2
+
 [kube-master]
 instance-1
 instance-2
@@ -25,6 +33,9 @@ instance-1
 instance-2
 instance-3
 {% elif mode == "default" %}
+[kube_control_plane]
+instance-1
+
 [kube-master]
 instance-1
 
@@ -34,6 +45,9 @@ instance-2
 [etcd]
 instance-1
 {% elif mode == "aio" %}
+[kube_control_plane]
+instance-1
+
 [kube-master]
 instance-1
 
@@ -46,6 +60,10 @@ instance-1
 [vault]
 instance-1
 {% elif mode == "ha-recover" %}
+[kube_control_plane]
+instance-1
+instance-2
+
 [kube-master]
 instance-1
 instance-2
@@ -64,6 +82,11 @@ instance-2
 [broken_etcd]
 instance-2 etcd_member_name=etcd3
 {% elif mode == "ha-recover-noquorum" %}
+[kube_control_plane]
+instance-3
+instance-1
+instance-2
+
 [kube-master]
 instance-3
 instance-1
diff --git a/tests/scripts/testcases_run.sh b/tests/scripts/testcases_run.sh
index 9f9870b57..5c27747ce 100755
--- a/tests/scripts/testcases_run.sh
+++ b/tests/scripts/testcases_run.sh
@@ -65,7 +65,7 @@ fi
 # Test control plane recovery
 if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
   ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}:!fake_hosts" -e reset_confirmation=yes reset.yml
-  ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube-master:!fake_hosts recover-control-plane.yml
+  ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube_control_plane:!fake_hosts recover-control-plane.yml
 fi
 
 # Tests Cases
diff --git a/tests/templates/inventory-aws.j2 b/tests/templates/inventory-aws.j2
index 3ed86eb96..f5bba6fd7 100644
--- a/tests/templates/inventory-aws.j2
+++ b/tests/templates/inventory-aws.j2
@@ -2,7 +2,7 @@ node1 ansible_ssh_host={{ec2.instances[0].public_ip}} ansible_ssh_user={{ssh_use
 node2 ansible_ssh_host={{ec2.instances[1].public_ip}} ansible_ssh_user={{ssh_user}}
 node3 ansible_ssh_host={{ec2.instances[2].public_ip}} ansible_ssh_user={{ssh_user}}
 
-[kube-master]
+[kube_control_plane]
 node1
 node2
 
@@ -21,12 +21,12 @@ node2
 
 [k8s-cluster:children]
 kube-node
-kube-master
+kube_control_plane
 calico-rr
 
 [calico-rr]
 
-[broken_kube-master]
+[broken_kube_control_plane]
 node2
 
 [broken_etcd]
diff --git a/tests/templates/inventory-do.j2 b/tests/templates/inventory-do.j2
index ab7d95220..f11306ce3 100644
--- a/tests/templates/inventory-do.j2
+++ b/tests/templates/inventory-do.j2
@@ -3,7 +3,7 @@
 {% endfor %}
 
 {% if mode is defined and mode == "separate" %}
-[kube-master]
+[kube_control_plane]
 {{droplets.results[0].droplet.name}}
 
 [kube-node]
@@ -15,7 +15,7 @@
 [vault]
 {{droplets.results[2].droplet.name}}
 {% elif mode is defined and mode == "ha" %}
-[kube-master]
+[kube_control_plane]
 {{droplets.results[0].droplet.name}}
 {{droplets.results[1].droplet.name}}
 
@@ -30,13 +30,13 @@
 {{droplets.results[1].droplet.name}}
 {{droplets.results[2].droplet.name}}
 
-[broken_kube-master]
+[broken_kube_control_plane]
 {{droplets.results[1].droplet.name}}
 
 [broken_etcd]
 {{droplets.results[2].droplet.name}}
 {% else %}
-[kube-master]
+[kube_control_plane]
 {{droplets.results[0].droplet.name}}
 
 [kube-node]
@@ -53,5 +53,5 @@
 
 [k8s-cluster:children]
 kube-node
-kube-master
+kube_control_plane
 calico-rr
diff --git a/tests/templates/inventory-gce.j2 b/tests/templates/inventory-gce.j2
index 55f67deec..f78f5a96f 100644
--- a/tests/templates/inventory-gce.j2
+++ b/tests/templates/inventory-gce.j2
@@ -9,7 +9,7 @@
 {{node3}} ansible_ssh_host={{gce.instance_data[2].public_ip}}
 {% endif %}
 {% if mode is defined and mode in ["separate", "separate-scale"] %}
-[kube-master]
+[kube_control_plane]
 {{node1}}
 
 [kube-node]
@@ -21,7 +21,7 @@
 [vault]
 {{node3}}
 {% elif mode is defined and mode in ["ha", "ha-scale"] %}
-[kube-master]
+[kube_control_plane]
 {{node1}}
 {{node2}}
 
@@ -38,14 +38,14 @@
 {{node2}}
 {{node3}}
 
-[broken_kube-master]
+[broken_kube_control_plane]
 {{node2}}
 
 [etcd]
 {{node2}}
 {{node3}}
 {% elif mode == "default" %}
-[kube-master]
+[kube_control_plane]
 {{node1}}
 
 [kube-node]
@@ -57,7 +57,7 @@
 [vault]
 {{node1}}
 {% elif mode == "aio" %}
-[kube-master]
+[kube_control_plane]
 {{node1}}
 
 [kube-node]
@@ -72,7 +72,7 @@
 
 [k8s-cluster:children]
 kube-node
-kube-master
+kube_control_plane
 calico-rr
 
 [calico-rr]
diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml
index 330e5e6bf..adf0a35c9 100644
--- a/tests/testcases/010_check-apiserver.yml
+++ b/tests/testcases/010_check-apiserver.yml
@@ -1,5 +1,5 @@
 ---
-- hosts: kube-master
+- hosts: kube_control_plane
 
   tasks:
   - name: Check the API servers are responding
diff --git a/tests/testcases/015_check-nodes-ready.yml b/tests/testcases/015_check-nodes-ready.yml
index b5bf60938..0faa1d46b 100644
--- a/tests/testcases/015_check-nodes-ready.yml
+++ b/tests/testcases/015_check-nodes-ready.yml
@@ -1,5 +1,5 @@
 ---
-- hosts: kube-master[0]
+- hosts: kube_control_plane[0]
   tasks:
 
   - name: Force binaries directory for Flatcar Container Linux by Kinvolk
diff --git a/tests/testcases/020_check-pods-running.yml b/tests/testcases/020_check-pods-running.yml
index 6af07b137..edea22a5c 100644
--- a/tests/testcases/020_check-pods-running.yml
+++ b/tests/testcases/020_check-pods-running.yml
@@ -1,5 +1,5 @@
 ---
-- hosts: kube-master[0]
+- hosts: kube_control_plane[0]
   tasks:
 
   - name: Force binaries directory for Flatcar Container Linux by Kinvolk
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index d2ab583db..5b18d6a8b 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -1,5 +1,5 @@
 ---
-- hosts: kube-master[0]
+- hosts: kube_control_plane[0]
   vars:
     test_image_repo: busybox
     test_image_tag: latest
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index a2a53b76a..174c9750c 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -37,7 +37,7 @@
       until: ncs_pod.stdout.find('Running') != -1
       retries: 3
       delay: 10
-      when: inventory_hostname == groups['kube-master'][0]
+      when: inventory_hostname == groups['kube_control_plane'][0]
 
     - name: Wait for netchecker agents
       shell: "set -o pipefail && {{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'"
@@ -48,12 +48,12 @@
       retries: 3
       delay: 10
       failed_when: false
-      when: inventory_hostname == groups['kube-master'][0]
+      when: inventory_hostname == groups['kube_control_plane'][0]
 
     - name: Get netchecker pods
       command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}"
       run_once: true
-      delegate_to: "{{ groups['kube-master'][0] }}"
+      delegate_to: "{{ groups['kube_control_plane'][0] }}"
       no_log: false
       with_items:
         - netchecker-agent
@@ -63,14 +63,14 @@
     - debug:
         var: nca_pod.stdout_lines
       failed_when: not nca_pod is success
-      when: inventory_hostname == groups['kube-master'][0]
+      when: inventory_hostname == groups['kube_control_plane'][0]
 
     - name: Get netchecker agents
       uri:
         url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/"
         return_content: yes
       run_once: true
-      delegate_to: "{{ groups['kube-master'][0] }}"
+      delegate_to: "{{ groups['kube_control_plane'][0] }}"
       register: agents
       retries: 18
       delay: "{{ agent_report_interval }}"
@@ -94,7 +94,7 @@
         url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check"
         status_code: 200
         return_content: yes
-      delegate_to: "{{ groups['kube-master'][0] }}"
+      delegate_to: "{{ groups['kube_control_plane'][0] }}"
       run_once: true
       register: result
       retries: 3
@@ -115,13 +115,13 @@
       command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"
       no_log: false
       when:
-        - inventory_hostname == groups['kube-master'][0]
+        - inventory_hostname == groups['kube_control_plane'][0]
         - not result is success
 
     - name: Get logs from other apps
       command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers"
       when:
-        - inventory_hostname == groups['kube-master'][0]
+        - inventory_hostname == groups['kube_control_plane'][0]
         - not result is success
       no_log: false
       with_items:
@@ -184,7 +184,7 @@
           }'
           EOF
       when:
-        - inventory_hostname == groups['kube-master'][0]
+        - inventory_hostname == groups['kube_control_plane'][0]
         - kube_network_plugin_multus|default(false)|bool
 
     - name: Annotate pod with macvlan network
@@ -208,7 +208,7 @@
               image: dougbtv/centos-network
           EOF
       when:
-        - inventory_hostname == groups['kube-master'][0]
+        - inventory_hostname == groups['kube_control_plane'][0]
         - kube_network_plugin_multus|default(false)|bool
 
     - name: Check secondary macvlan interface
@@ -218,5 +218,5 @@
       retries: 90
       changed_when: false
       when:
-        - inventory_hostname == groups['kube-master'][0]
+        - inventory_hostname == groups['kube_control_plane'][0]
         - kube_network_plugin_multus|default(false)|bool
diff --git a/tests/testcases/100_check-k8s-conformance.yml b/tests/testcases/100_check-k8s-conformance.yml
index 9716b3dac..3830f2ca2 100644
--- a/tests/testcases/100_check-k8s-conformance.yml
+++ b/tests/testcases/100_check-k8s-conformance.yml
@@ -1,5 +1,5 @@
 ---
-- hosts: kube-master[0]
+- hosts: kube_control_plane[0]
   vars:
     sonobuoy_version: 0.20.0
     sonobuoy_arch: amd64
diff --git a/tests/testcases/roles/cluster-dump/tasks/main.yml b/tests/testcases/roles/cluster-dump/tasks/main.yml
index 589a712e0..966a13c3d 100644
--- a/tests/testcases/roles/cluster-dump/tasks/main.yml
+++ b/tests/testcases/roles/cluster-dump/tasks/main.yml
@@ -2,17 +2,17 @@
 - name: Generate dump folder
   command: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
   no_log: true
-  when: inventory_hostname in groups['kube-master']
+  when: inventory_hostname in groups['kube_control_plane']
 
 - name: Compress directory cluster-dump
   archive:
     path: /tmp/cluster-dump
     dest: /tmp/cluster-dump.tgz
-  when: inventory_hostname in groups['kube-master']
+  when: inventory_hostname in groups['kube_control_plane']
 
 - name: Fetch dump file
   fetch:
     src: /tmp/cluster-dump.tgz
     dest: "{{ lookup('env', 'CI_PROJECT_DIR') }}/cluster-dump/{{ inventory_hostname }}.tgz"
     flat: true
-  when: inventory_hostname in groups['kube-master']
+  when: inventory_hostname in groups['kube_control_plane']
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index b53668408..6fd30537b 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -2,6 +2,15 @@
 - name: Check ansible version
   import_playbook: ansible_version.yml
 
+- name: Add kube-master nodes to kube_control_plane
+  # This is for old inventory which contains kube-master instead of kube_control_plane
+  hosts: kube-master
+  gather_facts: false
+  tasks:
+    - name: add nodes to kube_control_plane group
+      group_by:
+        key: 'kube_control_plane'
+
 - hosts: bastion[0]
   gather_facts: False
   environment: "{{ proxy_disable_env }}"
@@ -26,8 +35,8 @@
   tags: always
   import_playbook: facts.yml
 
-- name: Download images to ansible host cache via first kube-master node
-  hosts: kube-master[0]
+- name: Download images to ansible host cache via first kube_control_plane node
+  hosts: kube_control_plane[0]
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -84,7 +93,7 @@
 
 - name: Handle upgrades to master components first to maintain backwards compat.
   gather_facts: False
-  hosts: kube-master
+  hosts: kube_control_plane
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   serial: 1
@@ -101,7 +110,7 @@
     - { role: upgrade/post-upgrade, tags: post-upgrade }
 
 - name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
-  hosts: kube-master:calico-rr:kube-node
+  hosts: kube_control_plane:calico-rr:kube-node
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
@@ -114,7 +123,7 @@
     - { role: kubernetes-apps/policy_controller, tags: policy-controller }
 
 - name: Finally handle worker upgrades, based on given batch size
-  hosts: kube-node:calico-rr:!kube-master
+  hosts: kube-node:calico-rr:!kube_control_plane
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -128,7 +137,7 @@
     - { role: kubernetes/node-label, tags: node-label }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
 
-- hosts: kube-master[0]
+- hosts: kube_control_plane[0]
   gather_facts: False
   any_errors_fatal: true
   environment: "{{ proxy_disable_env }}"
@@ -144,7 +153,7 @@
     - { role: kubespray-defaults }
     - { role: network_plugin/calico/rr, tags: network }
 
-- hosts: kube-master
+- hosts: kube_control_plane
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
-- 
GitLab