diff --git a/README.md b/README.md
index 33f76b8c79b54f2bf4171c4a8608564ce4c8c727..27d13aa8075079fe4258bf280eea4991d0670e22 100644
--- a/README.md
+++ b/README.md
@@ -32,7 +32,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
 
 # Review and change parameters under ``inventory/mycluster/group_vars``
 cat inventory/mycluster/group_vars/all/all.yml
-cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
+cat inventory/mycluster/group_vars/k8s_cluster/k8s_cluster.yml
 
 # Deploy Kubespray with Ansible Playbook - run the playbook as root
 # The option `--become` is required, as for example writing SSL keys in /etc/,
diff --git a/Vagrantfile b/Vagrantfile
index 5ee9e46374d96409ec5433a682b7b455b9e1e751..0a90b5170fea802f802b638c3d80249f036acd75 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -254,8 +254,8 @@ Vagrant.configure("2") do |config|
           ansible.groups = {
             "etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
             "kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
-            "kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
-            "k8s-cluster:children" => ["kube_control_plane", "kube-node"],
+            "kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
+            "k8s_cluster:children" => ["kube_control_plane", "kube_node"],
           }
         end
       end
diff --git a/cluster.yml b/cluster.yml
index 6a169e9b07204640ad218550f65947a70ad7de12..c2ba9a7bd17c395c8c39c1084548cb8a281bcb8f 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -2,14 +2,8 @@
 - name: Check ansible version
   import_playbook: ansible_version.yml
 
-- name: Add kube-master nodes to kube_control_plane
-  # This is for old inventory which contains kube-master instead of kube_control_plane
-  hosts: kube-master
-  gather_facts: false
-  tasks:
-    - name: add nodes to kube_control_plane group
-      group_by:
-        key: 'kube_control_plane'
+- name: Ensure compatibility with old groups
+  import_playbook: legacy_groups.yml
 
 - hosts: bastion[0]
   gather_facts: False
@@ -18,7 +12,7 @@
     - { role: kubespray-defaults }
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
 
-- hosts: k8s-cluster:etcd
+- hosts: k8s_cluster:etcd
   strategy: linear
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   gather_facts: false
@@ -31,7 +25,7 @@
   tags: always
   import_playbook: facts.yml
 
-- hosts: k8s-cluster:etcd
+- hosts: k8s_cluster:etcd
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -54,7 +48,7 @@
         etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
       when: not etcd_kubeadm_enabled| default(false)
 
-- hosts: k8s-cluster
+- hosts: k8s_cluster
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -67,7 +61,7 @@
         etcd_events_cluster_setup: false
       when: not etcd_kubeadm_enabled| default(false)
 
-- hosts: k8s-cluster
+- hosts: k8s_cluster
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -85,7 +79,7 @@
     - { role: kubernetes/client, tags: client }
     - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
 
-- hosts: k8s-cluster
+- hosts: k8s_cluster
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -95,7 +89,7 @@
     - { role: network_plugin, tags: network }
     - { role: kubernetes/node-label, tags: node-label }
 
-- hosts: calico-rr
+- hosts: calico_rr
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -131,7 +125,7 @@
     - { role: kubespray-defaults }
     - { role: kubernetes-apps, tags: apps }
 
-- hosts: k8s-cluster
+- hosts: k8s_cluster
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
diff --git a/contrib/aws_inventory/kubespray-aws-inventory.py b/contrib/aws_inventory/kubespray-aws-inventory.py
index 46ad6a0631026ff231f733bc03f7e71707f43793..3ad241c7e2f46ce96868e0725d113c1eae658fbd 100755
--- a/contrib/aws_inventory/kubespray-aws-inventory.py
+++ b/contrib/aws_inventory/kubespray-aws-inventory.py
@@ -35,7 +35,7 @@ class SearchEC2Tags(object):
     hosts['_meta'] = { 'hostvars': {} }
 
     ##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
-    for group in ["kube_control_plane", "kube-node", "etcd"]:
+    for group in ["kube_control_plane", "kube_node", "etcd"]:
       hosts[group] = []
       tag_key = "kubespray-role"
       tag_value = ["*"+group+"*"]
@@ -70,7 +70,7 @@ class SearchEC2Tags(object):
         hosts[group].append(dns_name)
         hosts['_meta']['hostvars'][dns_name] = ansible_host
         
-    hosts['k8s-cluster'] = {'children':['kube_control_plane', 'kube-node']}
+    hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
     print(json.dumps(hosts, sort_keys=True, indent=2))
 
 SearchEC2Tags()
diff --git a/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory/templates/inventory.j2
index 8a13cc635f6cb4d95ca089e3884a575f5de0bc84..6c5feb2cd4cfb74f6329bb1ef43b275f321c1699 100644
--- a/contrib/azurerm/roles/generate-inventory/templates/inventory.j2
+++ b/contrib/azurerm/roles/generate-inventory/templates/inventory.j2
@@ -21,13 +21,13 @@
 {% endif %}
 {% endfor %}
 
-[kube-node]
+[kube_node]
 {% for vm in vm_list %}
-{% if 'kube-node' in vm.tags.roles %}
+{% if 'kube_node' in vm.tags.roles %}
 {{ vm.name }}
 {% endif %}
 {% endfor %}
 
-[k8s-cluster:children]
-kube-node
+[k8s_cluster:children]
+kube_node
 kube_control_plane
diff --git a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2
index 61183cd1d678b45ace8a5a6f4d2ca5e1b890e096..6ab59df1ba28b8f9a74024a5868affc5a937fa58 100644
--- a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2
+++ b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2
@@ -21,14 +21,14 @@
 {% endif %}
 {% endfor %}
 
-[kube-node]
+[kube_node]
 {% for vm in vm_roles_list %}
-{% if 'kube-node' in vm.tags.roles %}
+{% if 'kube_node' in vm.tags.roles %}
 {{ vm.name }}
 {% endif %}
 {% endfor %}
 
-[k8s-cluster:children]
-kube-node
+[k8s_cluster:children]
+kube_node
 kube_control_plane
 
diff --git a/contrib/azurerm/roles/generate-templates/templates/minions.json b/contrib/azurerm/roles/generate-templates/templates/minions.json
index 3c122f34aa03dea0280a41c8c7a7ed6365e44173..bd0d059cbb63b4ad0ec1aa35a0918b6c7145d70a 100644
--- a/contrib/azurerm/roles/generate-templates/templates/minions.json
+++ b/contrib/azurerm/roles/generate-templates/templates/minions.json
@@ -61,7 +61,7 @@
         "[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
       ],
       "tags": {
-        "roles": "kube-node"
+        "roles": "kube_node"
       },
       "apiVersion": "{{apiVersion}}",
       "properties": {
@@ -112,4 +112,4 @@
     } {% if not loop.last %},{% endif %}
     {% endfor %}
   ]
-}
\ No newline at end of file
+}
diff --git a/contrib/dind/run-test-distros.sh b/contrib/dind/run-test-distros.sh
index 0e3510fd0e19f723c91a680b0647279d4730c591..bd7e12223fba6452735aafad9c581da50e2deb54 100755
--- a/contrib/dind/run-test-distros.sh
+++ b/contrib/dind/run-test-distros.sh
@@ -46,7 +46,7 @@ test_distro() {
     pass_or_fail "$prefix: netcheck" || return 1
 }
 
-NODES=($(egrep ^kube-node hosts))
+NODES=($(egrep ^kube_node hosts))
 NETCHECKER_HOST=localhost
 
 : ${OUTPUT_DIR:=./out}
diff --git a/contrib/inventory_builder/inventory.py b/contrib/inventory_builder/inventory.py
index 814085a73e9d1d418349f305df8c075289a931fd..184989fc303e923f740b9ef4b3e1b4bb6b6d0fb1 100644
--- a/contrib/inventory_builder/inventory.py
+++ b/contrib/inventory_builder/inventory.py
@@ -44,8 +44,8 @@ import re
 import subprocess
 import sys
 
-ROLES = ['all', 'kube_control_plane', 'kube-node', 'etcd', 'k8s-cluster',
-         'calico-rr']
+ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
+         'calico_rr']
 PROTECTED_NAMES = ROLES
 AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
                       'load']
@@ -269,7 +269,7 @@ class KubesprayInventory(object):
 
     def purge_invalid_hosts(self, hostnames, protected_names=[]):
         for role in self.yaml_config['all']['children']:
-            if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']:  # noqa
+            if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']:  # noqa
                 all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy()  # noqa
                 for host in all_hosts.keys():
                     if host not in hostnames and host not in protected_names:
@@ -290,7 +290,7 @@ class KubesprayInventory(object):
             if self.yaml_config['all']['hosts'] is None:
                 self.yaml_config['all']['hosts'] = {host: None}
             self.yaml_config['all']['hosts'][host] = opts
-        elif group != 'k8s-cluster:children':
+        elif group != 'k8s_cluster:children':
             if self.yaml_config['all']['children'][group]['hosts'] is None:
                 self.yaml_config['all']['children'][group]['hosts'] = {
                     host: None}
@@ -307,37 +307,37 @@ class KubesprayInventory(object):
 
     def set_k8s_cluster(self):
         k8s_cluster = {'children': {'kube_control_plane': None,
-                                    'kube-node': None}}
-        self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
+                                    'kube_node': None}}
+        self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
 
     def set_calico_rr(self, hosts):
         for host in hosts:
             if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
-                self.debug("Not adding {0} to calico-rr group because it "
+                self.debug("Not adding {0} to calico_rr group because it "
                            "conflicts with kube_control_plane "
                            "group".format(host))
                 continue
-            if host in self.yaml_config['all']['children']['kube-node']:
-                self.debug("Not adding {0} to calico-rr group because it "
-                           "conflicts with kube-node group".format(host))
+            if host in self.yaml_config['all']['children']['kube_node']:
+                self.debug("Not adding {0} to calico_rr group because it "
+                           "conflicts with kube_node group".format(host))
                 continue
-            self.add_host_to_group('calico-rr', host)
+            self.add_host_to_group('calico_rr', host)
 
     def set_kube_node(self, hosts):
         for host in hosts:
             if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
                 if host in self.yaml_config['all']['children']['etcd']['hosts']:  # noqa
-                    self.debug("Not adding {0} to kube-node group because of "
+                    self.debug("Not adding {0} to kube_node group because of "
                                "scale deployment and host is in etcd "
                                "group.".format(host))
                     continue
             if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD:  # noqa
                 if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']:  # noqa
-                    self.debug("Not adding {0} to kube-node group because of "
+                    self.debug("Not adding {0} to kube_node group because of "
                                "scale deployment and host is in "
                                "kube_control_plane group.".format(host))
                     continue
-            self.add_host_to_group('kube-node', host)
+            self.add_host_to_group('kube_node', host)
 
     def set_etcd(self, hosts):
         for host in hosts:
diff --git a/contrib/inventory_builder/tests/test_inventory.py b/contrib/inventory_builder/tests/test_inventory.py
index 4d04603a7d07dc31d94f662089bb250b4967ab22..f9aa40bc11194df55f742ef5fd8483a3170f08ff 100644
--- a/contrib/inventory_builder/tests/test_inventory.py
+++ b/contrib/inventory_builder/tests/test_inventory.py
@@ -241,8 +241,8 @@ class TestInventory(unittest.TestCase):
                 self.inv.yaml_config['all']['hosts'].get(host), opt)
 
     def test_set_k8s_cluster(self):
-        group = 'k8s-cluster'
-        expected_hosts = ['kube-node', 'kube_control_plane']
+        group = 'k8s_cluster'
+        expected_hosts = ['kube_node', 'kube_control_plane']
 
         self.inv.set_k8s_cluster()
         for host in expected_hosts:
@@ -251,7 +251,7 @@ class TestInventory(unittest.TestCase):
                 self.inv.yaml_config['all']['children'][group]['children'])
 
     def test_set_kube_node(self):
-        group = 'kube-node'
+        group = 'kube_node'
         host = 'node1'
 
         self.inv.set_kube_node([host])
@@ -280,7 +280,7 @@ class TestInventory(unittest.TestCase):
         for h in range(3):
             self.assertFalse(
                 list(hosts.keys())[h] in
-                self.inv.yaml_config['all']['children']['kube-node']['hosts'])
+                self.inv.yaml_config['all']['children']['kube_node']['hosts'])
 
     def test_scale_scenario_two(self):
         num_nodes = 500
@@ -296,7 +296,7 @@ class TestInventory(unittest.TestCase):
         for h in range(5):
             self.assertFalse(
                 list(hosts.keys())[h] in
-                self.inv.yaml_config['all']['children']['kube-node']['hosts'])
+                self.inv.yaml_config['all']['children']['kube_node']['hosts'])
 
     def test_range2ips_range(self):
         changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
diff --git a/contrib/network-storage/glusterfs/glusterfs.yml b/contrib/network-storage/glusterfs/glusterfs.yml
index 8146dfc06e377fa4aff94c235503863fb960bdea..79fc3aeb9b1bf95e97f20d23991f323636330e5c 100644
--- a/contrib/network-storage/glusterfs/glusterfs.yml
+++ b/contrib/network-storage/glusterfs/glusterfs.yml
@@ -15,7 +15,7 @@
   roles:
     - { role: glusterfs/server }
 
-- hosts: k8s-cluster
+- hosts: k8s_cluster
   roles:
     - { role: glusterfs/client }
 
diff --git a/contrib/network-storage/glusterfs/inventory.example b/contrib/network-storage/glusterfs/inventory.example
index dc77b4b0a6540e6b309a253300ab08e090eda5eb..f6c107070de77156ff04fbb6aaa2bfa00b48c629 100644
--- a/contrib/network-storage/glusterfs/inventory.example
+++ b/contrib/network-storage/glusterfs/inventory.example
@@ -23,15 +23,15 @@
 # node2
 # node3
 
-# [kube-node]
+# [kube_node]
 # node2
 # node3
 # node4
 # node5
 # node6
 
-# [k8s-cluster:children]
-# kube-node
+# [k8s_cluster:children]
+# kube_node
 # kube_control_plane
 
 # [gfs-cluster]
diff --git a/contrib/network-storage/heketi/inventory.yml.sample b/contrib/network-storage/heketi/inventory.yml.sample
index 46adbed44634c1303a5e46e168daf45fc8aa8dd7..e68ec96377381575417a53666c6b98f79e02721f 100644
--- a/contrib/network-storage/heketi/inventory.yml.sample
+++ b/contrib/network-storage/heketi/inventory.yml.sample
@@ -3,7 +3,7 @@ all:
         heketi_admin_key: "11elfeinhundertundelf"
         heketi_user_key: "!!einseinseins"
     children:
-        k8s-cluster:
+        k8s_cluster:
             vars:
                 kubelet_fail_swap_on: false
             children:
@@ -13,7 +13,7 @@ all:
                 etcd:
                     hosts:
                         node2:
-                kube-node:
+                kube_node:
                     hosts: &kube_nodes
                         node1:
                         node2:
diff --git a/contrib/packaging/rpm/kubespray.spec b/contrib/packaging/rpm/kubespray.spec
index e4c1808bea1ee0229fe3bdffbeb5cbf575d37f6b..656f624be385bd68d334fc8cd6699a2c335bcd91 100644
--- a/contrib/packaging/rpm/kubespray.spec
+++ b/contrib/packaging/rpm/kubespray.spec
@@ -51,7 +51,7 @@ export SKIP_PIP_INSTALL=1
 %doc %{_docdir}/%{name}/inventory/sample/hosts.ini
 %config %{_sysconfdir}/%{name}/ansible.cfg
 %config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
-%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
+%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s_cluster.yml
 %license %{_docdir}/%{name}/LICENSE
 %{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
 %{_datarootdir}/%{name}/roles/
diff --git a/contrib/terraform/aws/templates/inventory.tpl b/contrib/terraform/aws/templates/inventory.tpl
index d8fe2f995a0a2340c392a4e86fab2c91bc63de84..baa9ea8545c22125f94f631bc368f23ba1916c8e 100644
--- a/contrib/terraform/aws/templates/inventory.tpl
+++ b/contrib/terraform/aws/templates/inventory.tpl
@@ -11,7 +11,7 @@ ${public_ip_address_bastion}
 ${list_master}
 
 
-[kube-node]
+[kube_node]
 ${list_node}
 
 
@@ -19,10 +19,10 @@ ${list_node}
 ${list_etcd}
 
 
-[k8s-cluster:children]
-kube-node
+[k8s_cluster:children]
+kube_node
 kube_control_plane
 
 
-[k8s-cluster:vars]
+[k8s_cluster:vars]
 ${elb_api_fqdn}
diff --git a/contrib/terraform/exoscale/templates/inventory.tpl b/contrib/terraform/exoscale/templates/inventory.tpl
index 27b9e60f36a670842d50d39ddb08f917907fb49f..85ed1924b1d04283fadc017a5b1a7036f3ea90e6 100644
--- a/contrib/terraform/exoscale/templates/inventory.tpl
+++ b/contrib/terraform/exoscale/templates/inventory.tpl
@@ -11,9 +11,9 @@ supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
 [etcd]
 ${list_master}
 
-[kube-node]
+[kube_node]
 ${list_worker}
 
-[k8s-cluster:children]
+[k8s_cluster:children]
 kube_control_plane
-kube-node
+kube_node
diff --git a/contrib/terraform/gcp/generate-inventory.sh b/contrib/terraform/gcp/generate-inventory.sh
index d266b18992653c0cd997010bb5204fa3314a0fa6..585a4f415eb5cc7686522e7e3d7baa206e0ac549 100755
--- a/contrib/terraform/gcp/generate-inventory.sh
+++ b/contrib/terraform/gcp/generate-inventory.sh
@@ -65,12 +65,12 @@ for name in "${MASTER_NAMES[@]}"; do
 done
 
 echo ""
-echo "[kube-node]"
+echo "[kube_node]"
 for name in "${WORKER_NAMES[@]}"; do
   echo "${name}"
 done
 
 echo ""
-echo "[k8s-cluster:children]"
+echo "[k8s_cluster:children]"
 echo "kube_control_plane"
-echo "kube-node"
+echo "kube_node"
diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md
index 67bc0f0662a4fde8f045bd718e78716e2f62cf98..7b7e9e1ceca13f243a9944718279841f881830da 100644
--- a/contrib/terraform/openstack/README.md
+++ b/contrib/terraform/openstack/README.md
@@ -263,8 +263,8 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
 |`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
 |`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
 | `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
-|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
-|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. |
+|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube_node` for tainting them as nodes, empty by default. |
+|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
 |`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
 |`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
 |`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
@@ -421,7 +421,7 @@ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
 ```
 
 if you chose to create a bastion host, this script will create
-`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
+`contrib/terraform/openstack/k8s_cluster.yml` with an ssh command for Ansible to
 be able to access your machines tunneling through the bastion's IP address. If
 you want to manually handle the ssh tunneling to these machines, please delete
 or move that file. If you want to use this, just leave it there, as ansible will
@@ -546,7 +546,7 @@ bin_dir: /opt/bin
 cloud_provider: openstack
 ```
 
-Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml`:
+Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml`:
 
 - Set variable **kube_network_plugin** to your desired networking plugin.
   - **flannel** works out-of-the-box
diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf
index c9e5609f38ce9b45c61a86afb08ca0762c86072c..9409be3c67f0585d2b2b4c424e53e26c068e4c2b 100644
--- a/contrib/terraform/openstack/modules/compute/main.tf
+++ b/contrib/terraform/openstack/modules/compute/main.tf
@@ -204,7 +204,7 @@ resource "openstack_compute_instance_v2" "bastion" {
   }
 
   provisioner "local-exec" {
-    command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml"
+    command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no_floating.yml"
   }
 }
 
@@ -245,13 +245,13 @@ resource "openstack_compute_instance_v2" "k8s_master" {
 
   metadata = {
     ssh_user         = var.ssh_user
-    kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster"
+    kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
 
   provisioner "local-exec" {
-    command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
+    command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml"
   }
 }
 
@@ -292,13 +292,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
 
   metadata = {
     ssh_user         = var.ssh_user
-    kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster"
+    kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
 
   provisioner "local-exec" {
-    command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
+    command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml"
   }
 }
 
@@ -337,7 +337,7 @@ resource "openstack_compute_instance_v2" "etcd" {
 
   metadata = {
     ssh_user         = var.ssh_user
-    kubespray_groups = "etcd,no-floating"
+    kubespray_groups = "etcd,no_floating"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
@@ -379,7 +379,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
 
   metadata = {
     ssh_user         = var.ssh_user
-    kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,no-floating"
+    kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
@@ -421,7 +421,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
 
   metadata = {
     ssh_user         = var.ssh_user
-    kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,no-floating"
+    kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
@@ -462,13 +462,13 @@ resource "openstack_compute_instance_v2" "k8s_node" {
 
   metadata = {
     ssh_user         = var.ssh_user
-    kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
+    kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
 
   provisioner "local-exec" {
-    command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
+    command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no_floating.yml"
   }
 }
 
@@ -507,7 +507,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
 
   metadata = {
     ssh_user         = var.ssh_user
-    kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
+    kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
@@ -548,13 +548,13 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
 
   metadata = {
     ssh_user         = var.ssh_user
-    kubespray_groups = "kube-node,k8s-cluster,%{if each.value.floating_ip == false}no-floating,%{endif}${var.supplementary_node_groups}"
+    kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
 
   provisioner "local-exec" {
-    command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no-floating.yml%{else}true%{endif}"
+    command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no_floating.yml%{else}true%{endif}"
   }
 }
 
@@ -593,7 +593,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
 
   metadata = {
     ssh_user         = var.ssh_user_gfs
-    kubespray_groups = "gfs-cluster,network-storage,no-floating"
+    kubespray_groups = "gfs-cluster,network-storage,no_floating"
     depends_on       = var.network_id
     use_access_ip    = var.use_access_ip
   }
diff --git a/contrib/terraform/openstack/variables.tf b/contrib/terraform/openstack/variables.tf
index 6c6e85a8ae64b2038cb76dd3f37b0e8c12627628..35bd62b60c84543cabe5f182eed9fbd2d947eec3 100644
--- a/contrib/terraform/openstack/variables.tf
+++ b/contrib/terraform/openstack/variables.tf
@@ -177,12 +177,12 @@ variable "external_net" {
 }
 
 variable "supplementary_master_groups" {
-  description = "supplementary kubespray ansible groups for masters, such kube-node"
+  description = "supplementary kubespray ansible groups for masters, such kube_node"
   default     = ""
 }
 
 variable "supplementary_node_groups" {
-  description = "supplementary kubespray ansible groups for worker nodes, such as kube-ingress"
+  description = "supplementary kubespray ansible groups for worker nodes, such as kube_ingress"
   default     = ""
 }
 
diff --git a/contrib/terraform/packet/README.md b/contrib/terraform/packet/README.md
index 496e74206212f02f9cfb95add3cd4df4c706cc2e..532acb80948c3c11b6e53748058325f25b2fe8a7 100644
--- a/contrib/terraform/packet/README.md
+++ b/contrib/terraform/packet/README.md
@@ -108,7 +108,7 @@ While the defaults in variables.tf will successfully deploy a cluster, it is rec
 Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the
 `kubeconfig_localhost: true` in the Kubespray configuration.
 
-Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml` and comment back in the following line and change from `false` to `true`:
+Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml` and comment back in the following line and change from `false` to `true`:
 `\# kubeconfig_localhost: false`
 becomes:
 `kubeconfig_localhost: true`
diff --git a/contrib/terraform/packet/kubespray.tf b/contrib/terraform/packet/kubespray.tf
index 00cf21ff07e538c8725afe5f2f3ad40d3a18bca2..18f901aea70ba890da1de5d7f1d53a386f2bdd1d 100644
--- a/contrib/terraform/packet/kubespray.tf
+++ b/contrib/terraform/packet/kubespray.tf
@@ -19,7 +19,7 @@ resource "packet_device" "k8s_master" {
   operating_system = var.operating_system
   billing_cycle    = var.billing_cycle
   project_id       = var.packet_project_id
-  tags             = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane", "etcd", "kube-node"]
+  tags             = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
 }
 
 resource "packet_device" "k8s_master_no_etcd" {
@@ -32,7 +32,7 @@ resource "packet_device" "k8s_master_no_etcd" {
   operating_system = var.operating_system
   billing_cycle    = var.billing_cycle
   project_id       = var.packet_project_id
-  tags             = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane"]
+  tags             = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
 }
 
 resource "packet_device" "k8s_etcd" {
@@ -58,6 +58,6 @@ resource "packet_device" "k8s_node" {
   operating_system = var.operating_system
   billing_cycle    = var.billing_cycle
   project_id       = var.packet_project_id
-  tags             = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-node"]
+  tags             = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
 }
 
diff --git a/contrib/terraform/upcloud/templates/inventory.tpl b/contrib/terraform/upcloud/templates/inventory.tpl
index cb453e3ea29c3dcdd758a01e6773aa28c909452b..28ff28ac22308cccaba31b126b5a8cf1a68c1962 100644
--- a/contrib/terraform/upcloud/templates/inventory.tpl
+++ b/contrib/terraform/upcloud/templates/inventory.tpl
@@ -9,9 +9,9 @@ ${list_master}
 [etcd]
 ${list_master}
 
-[kube-node]
+[kube_node]
 ${list_worker}
 
-[k8s-cluster:children]
+[k8s_cluster:children]
 kube_control_plane
-kube-node
+kube_node
diff --git a/contrib/terraform/vsphere/templates/inventory.tpl b/contrib/terraform/vsphere/templates/inventory.tpl
index cb453e3ea29c3dcdd758a01e6773aa28c909452b..28ff28ac22308cccaba31b126b5a8cf1a68c1962 100644
--- a/contrib/terraform/vsphere/templates/inventory.tpl
+++ b/contrib/terraform/vsphere/templates/inventory.tpl
@@ -9,9 +9,9 @@ ${list_master}
 [etcd]
 ${list_master}
 
-[kube-node]
+[kube_node]
 ${list_worker}
 
-[k8s-cluster:children]
+[k8s_cluster:children]
 kube_control_plane
-kube-node
+kube_node
diff --git a/docs/ansible.md b/docs/ansible.md
index 99d72b4dc11112d75be49095c843eeb0146a894f..0440eccf240f77eceb1e4718770ee058a485bd64 100644
--- a/docs/ansible.md
+++ b/docs/ansible.md
@@ -4,28 +4,28 @@
 
 The inventory is composed of 3 groups:
 
-* **kube-node** : list of kubernetes nodes where the pods will run.
+* **kube_node** : list of kubernetes nodes where the pods will run.
 * **kube_control_plane** : list of servers where kubernetes control plane components (apiserver, scheduler, controller) will run.
 * **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose.
 
-Note: do not modify the children of _k8s-cluster_, like putting
-the _etcd_ group into the _k8s-cluster_, unless you are certain
+Note: do not modify the children of _k8s_cluster_, like putting
+the _etcd_ group into the _k8s_cluster_, unless you are certain
 to do that and you have it fully contained in the latter:
 
 ```ShellSession
-k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd
+k8s_cluster ⊂ etcd => kube_node ∩ etcd = etcd
 ```
 
-When _kube-node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads.
+When _kube_node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads.
 If you want it a standalone, make sure those groups do not intersect.
 If you want the server to act both as control-plane and node, the server must be defined
-on both groups _kube_control_plane_ and _kube-node_. If you want a standalone and
+on both groups _kube_control_plane_ and _kube_node_. If you want a standalone and
 unschedulable master, the server must be defined only in the _kube_control_plane_ and
-not _kube-node_.
+not _kube_node_.
 
 There are also two special groups:
 
-* **calico-rr** : explained for [advanced Calico networking cases](calico.md)
+* **calico_rr** : explained for [advanced Calico networking cases](calico.md)
 * **bastion** : configure a bastion host if your nodes are not directly reachable
 
 Below is a complete inventory example:
@@ -49,15 +49,15 @@ node1
 node2
 node3
 
-[kube-node]
+[kube_node]
 node2
 node3
 node4
 node5
 node6
 
-[k8s-cluster:children]
-kube-node
+[k8s_cluster:children]
+kube_node
 kube_control_plane
 ```
 
@@ -66,7 +66,7 @@ kube_control_plane
 The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``.
 Optional variables are located in the `inventory/sample/group_vars/all.yml`.
 Mandatory variables that are common for at least one role (or a node group) can be found in the
-`inventory/sample/group_vars/k8s-cluster.yml`.
+`inventory/sample/group_vars/k8s_cluster.yml`.
 There are also role vars for docker, kubernetes preinstall and master roles.
 According to the [ansible docs](https://docs.ansible.com/ansible/latest/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
 those cannot be overridden from the group vars. In order to override, one should use
@@ -79,7 +79,7 @@ Layer | Comment
 ------|--------
 **role defaults** | provides best UX to override things for Kubespray deployments
 inventory vars | Unused
-**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
+**inventory group_vars** | Expects users to use ``all.yml``,``k8s_cluster.yml`` etc. to override things
 inventory host_vars | Unused
 playbook group_vars | Unused
 playbook host_vars | Unused
diff --git a/docs/aws-ebs-csi.md b/docs/aws-ebs-csi.md
index 4d8c96311880ff2ed763bf0d4fe5dd308e0a0ce2..3a7684ef2913db79e9864a16fb4b4fbc27d859a2 100644
--- a/docs/aws-ebs-csi.md
+++ b/docs/aws-ebs-csi.md
@@ -8,7 +8,7 @@ To set the number of replicas for the AWS CSI controller, you can change `aws_eb
 
 Make sure to add a role, for your EC2 instances hosting Kubernetes, that allows it to do the actions necessary to request a volume and attach it: [AWS CSI Policy](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/example-iam-policy.json)
 
-If you want to deploy the AWS EBS storage class used with the CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
+If you want to deploy the AWS EBS storage class used with the CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`.
 
 You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over AWS EC2 with EBS CSI Driver enabled.
 
diff --git a/docs/aws.md b/docs/aws.md
index 0e680e0d12dd7abd943e2614ca955376bc2c5fa0..b45508c6163a4c638f7bd81c7c69d985be69a375 100644
--- a/docs/aws.md
+++ b/docs/aws.md
@@ -33,16 +33,16 @@ This will produce an inventory that is passed into Ansible that looks like the f
   "etcd": [
     "ip-172-31-3-xxx.us-east-2.compute.internal"
   ],
-  "k8s-cluster": {
+  "k8s_cluster": {
     "children": [
       "kube_control_plane",
-      "kube-node"
+      "kube_node"
     ]
   },
   "kube_control_plane": [
     "ip-172-31-3-xxx.us-east-2.compute.internal"
   ],
-  "kube-node": [
+  "kube_node": [
     "ip-172-31-8-xxx.us-east-2.compute.internal"
   ]
 }
@@ -51,7 +51,7 @@ This will produce an inventory that is passed into Ansible that looks like the f
 Guide:
 
 - Create instances in AWS as needed.
-- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube-node`. You can also share roles like `kube_control_plane, etcd`
+- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube_node`. You can also share roles like `kube_control_plane, etcd`
 - Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
 - Set the following AWS credentials and info as environment variables in your terminal:
 
diff --git a/docs/azure-csi.md b/docs/azure-csi.md
index 95e7a667c095a013e324a5bd3ff6f6a23a0f67b0..d4e04d27586e19e0bc43dff84060319e0aeb22e2 100644
--- a/docs/azure-csi.md
+++ b/docs/azure-csi.md
@@ -8,7 +8,7 @@ To deploy Azure Disk CSI driver, uncomment the `azure_csi_enabled` option in `gr
 
 ## Azure Disk CSI Storage Class
 
-If you want to deploy the Azure Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
+If you want to deploy the Azure Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`.
 
 ## Parameters
 
diff --git a/docs/calico.md b/docs/calico.md
index 45d1b0e909a0a752129dff91c8d53194501c53de..f090ca984f7f3458d783ab5267a7d7635e7fbc94 100644
--- a/docs/calico.md
+++ b/docs/calico.md
@@ -61,7 +61,7 @@ calico_network_backend: none
 ### Optional : Define the default pool CIDRs
 
 By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool, and `kube_pods_subnet_ipv6` for IPv6.
-In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet` and `kube_pods_subnet_ipv6` ), it starts with the default IP Pools of which IP range CIDRs can by defined in group_vars (k8s-cluster/k8s-net-calico.yml):
+In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet` and `kube_pods_subnet_ipv6` ), it starts with the default IP Pools of which IP range CIDRs can by defined in group_vars (k8s_cluster/k8s-net-calico.yml):
 
 ```ShellSession
 calico_pool_cidr: 10.233.64.0/20
@@ -88,14 +88,14 @@ In order to define peers on a per node basis, the `peers` variable must be defin
 NB: Ansible's `hash_behaviour` is by default set to "replace", thus defining both global and per node peers would end up with having only per node peers. If having both global and per node peers defined was meant to happen, global peers would have to be defined in hostvars for each host (as well as per node peers)
 
 Since calico 3.4, Calico supports advertising Kubernetes service cluster IPs over BGP, just as it advertises pod IPs.
-This can be enabled by setting the following variable as follow in group_vars (k8s-cluster/k8s-net-calico.yml)
+This can be enabled by setting the following variable as follow in group_vars (k8s_cluster/k8s-net-calico.yml)
 
 ```yml
 calico_advertise_cluster_ips: true
 ```
 
 Since calico 3.10, Calico supports advertising Kubernetes service ExternalIPs over BGP in addition to cluster IPs advertising.
-This can be enabled by setting the following variable in group_vars (k8s-cluster/k8s-net-calico.yml)
+This can be enabled by setting the following variable in group_vars (k8s_cluster/k8s-net-calico.yml)
 
 ```yml
 calico_advertise_service_external_ips:
@@ -121,9 +121,9 @@ recommended here:
 
 You need to edit your inventory and add:
 
-* `calico-rr` group with nodes in it. `calico-rr` can be combined with
-  `kube-node` and/or `kube_control_plane`. `calico-rr` group also must be a child
-   group of `k8s-cluster` group.
+* `calico_rr` group with nodes in it. `calico_rr` can be combined with
+  `kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child
+   group of `k8s_cluster` group.
 * `cluster_id` by route reflector node/group (see details
 [here](https://hub.docker.com/r/calico/routereflector/))
 
@@ -147,18 +147,18 @@ node2
 node3
 node4
 
-[kube-node]
+[kube_node]
 node2
 node3
 node4
 node5
 
-[k8s-cluster:children]
-kube-node
+[k8s_cluster:children]
+kube_node
 kube_control_plane
-calico-rr
+calico_rr
 
-[calico-rr]
+[calico_rr]
 rr0
 rr1
 
diff --git a/docs/cinder-csi.md b/docs/cinder-csi.md
index 86379affeb31995b321c3c6157330a1cebb7d545..b7dadf1e45e14bf4451c7d0ca4c024cfc345dc5c 100644
--- a/docs/cinder-csi.md
+++ b/docs/cinder-csi.md
@@ -10,7 +10,7 @@ You need to source the OpenStack credentials you use to deploy your machines tha
 
 Make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack. Otherwise [cinder](https://docs.openstack.org/cinder/latest/) won't work as expected.
 
-If you want to deploy the cinder provisioner used with Cinder CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
+If you want to deploy the cinder provisioner used with Cinder CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`.
 
 You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over OpenStack with Cinder CSI Driver enabled.
 
diff --git a/docs/containerd.md b/docs/containerd.md
index 98de3c23c2cc21191822cf3b5f137ace4319bbb1..452e9990acecbe78d474745c33cb3f582fbcce76 100644
--- a/docs/containerd.md
+++ b/docs/containerd.md
@@ -5,7 +5,7 @@ Kubespray supports basic functionality for using containerd as the default conta
 
 _To use the containerd container runtime set the following variables:_
 
-## k8s-cluster.yml
+## k8s_cluster.yml
 
 ```yaml
 container_manager: containerd
diff --git a/docs/cri-o.md b/docs/cri-o.md
index a96c3f5797968a68d21b7e12d3d83c90bd604db4..ab7bdc1cf79b5678c308f781e815c747eb25599e 100644
--- a/docs/cri-o.md
+++ b/docs/cri-o.md
@@ -16,7 +16,7 @@ skip_downloads: false
 etcd_kubeadm_enabled: true
 ```
 
-## k8s-cluster/k8s-cluster.yml
+## k8s_cluster/k8s_cluster.yml
 
 ```yaml
 container_manager: crio
@@ -52,7 +52,7 @@ This parameter controls not just the number of processes but also the amount of
 (since a thread is technically a process with shared memory). See [cri-o#1921]
 
 In order to increase the default `pids_limit` for cri-o based deployments you need to set the `crio_pids_limit`
-for your `k8s-cluster` ansible group or per node depending on the use case.
+for your `k8s_cluster` ansible group or per node depending on the use case.
 
 ```yaml
 crio_pids_limit: 4096
diff --git a/docs/gcp-pd-csi.md b/docs/gcp-pd-csi.md
index 448d3cadf5e829aecf6cd377e976fcae7bf942ce..88fa06039ce2b3cbad42f8183f5b3937ac17d549 100644
--- a/docs/gcp-pd-csi.md
+++ b/docs/gcp-pd-csi.md
@@ -6,7 +6,7 @@ To deploy GCP Persistent Disk CSI driver, uncomment the `gcp_pd_csi_enabled` opt
 
 ## GCP Persistent Disk Storage Class
 
-If you want to deploy the GCP Persistent Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
+If you want to deploy the GCP Persistent Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`.
 
 ## GCP credentials
 
diff --git a/docs/getting-started.md b/docs/getting-started.md
index 18a50e017f678773ec7e8fdd8c57f596e7ee4040..ed90b88fb3037f30c9bc0b6bb8cc6ae76357e378 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -79,7 +79,7 @@ var in inventory.
 By default, Kubespray configures kube_control_plane hosts with insecure access to
 kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
 because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
-generated will point to localhost (on kube_control_planes) and kube-node hosts will
+generated will point to localhost (on kube_control_planes) and kube_node hosts will
 connect either to a localhost nginx proxy or to a loadbalancer if configured.
 More details on this process are in the [HA guide](/docs/ha-mode.md).
 
diff --git a/docs/ha-mode.md b/docs/ha-mode.md
index 668558f17ac478023635b711517ff2db0f169287..3bc9134ab264ad59f5b2ef8d33271d59e7d57431 100644
--- a/docs/ha-mode.md
+++ b/docs/ha-mode.md
@@ -81,7 +81,7 @@ loadbalancer_apiserver:
   port on the VIP address)
 
 This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
-into the `/etc/hosts` file of all servers in the `k8s-cluster` group and wired
+into the `/etc/hosts` file of all servers in the `k8s_cluster` group and wired
 into the generated self-signed TLS/SSL certificates as well. Note that
 the HAProxy service should as well be HA and requires a VIP management, which
 is out of scope of this doc.
diff --git a/docs/integration.md b/docs/integration.md
index 09c044fa89c25855d4a01705b2bf43197addb0e0..31d6f0bd4a17308f55295c5d0f296434c9ac70da 100644
--- a/docs/integration.md
+++ b/docs/integration.md
@@ -52,10 +52,10 @@ You could rename *all.yml* config to something else, i.e. *kubespray.yml* and cr
    ```ini
      ...
      #Kargo groups:
-     [kube-node:children]
+     [kube_node:children]
      kubenode
 
-     [k8s-cluster:children]
+     [k8s_cluster:children]
      kubernetes
 
      [etcd:children]
diff --git a/docs/kata-containers.md b/docs/kata-containers.md
index c09118806291551bc5599cabbd8b291f7d81cd28..4a5a45525b2974a524c2e323db9d9c563567443e 100644
--- a/docs/kata-containers.md
+++ b/docs/kata-containers.md
@@ -10,7 +10,7 @@ _Qemu_ is the only hypervisor supported by Kubespray.
 
 To use Kata Containers, set the following variables:
 
-**k8s-cluster.yml**:
+**k8s_cluster.yml**:
 
 ```yaml
 container_manager: containerd
diff --git a/docs/kube-ovn.md b/docs/kube-ovn.md
index 375c7a4d54ba88d1d403db8ee5d9f1f8acbc1251..3ddc270da7abefcf02c0d3885bf9bf9c796829af 100644
--- a/docs/kube-ovn.md
+++ b/docs/kube-ovn.md
@@ -12,7 +12,7 @@ kernel version 3.10.0-862 has a nat related bug that will affect ovs function, p
 
 ## How to use it
 
-Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml`
+Enable kube-ovn in `group_vars/k8s_cluster/k8s_cluster.yml`
 
 ```yml
 ...
diff --git a/docs/large-deployments.md b/docs/large-deployments.md
index ec6618bf40e84db5a89d5989eb26f5d14fa89f00..d412010293b453b0ba16d1bcc1541a969e994c49 100644
--- a/docs/large-deployments.md
+++ b/docs/large-deployments.md
@@ -37,9 +37,9 @@ For a large scaled deployments, consider the following configuration changes:
 * Tune network prefix sizes. Those are ``kube_network_node_prefix``,
   ``kube_service_addresses`` and ``kube_pods_subnet``.
 
-* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
-  from host/network interruption much quicker with calico-rr. Note that
-  calico-rr role must be on a host without kube_control_plane or kube-node role (but
+* Add calico_rr nodes if you are deploying with Calico or Canal. Nodes recover
+  from host/network interruption much quicker with calico_rr. Note that
+  calico_rr role must be on a host without kube_control_plane or kube_node role (but
   etcd role is okay).
 
 * Check out the
diff --git a/docs/macvlan.md b/docs/macvlan.md
index 51a4ba95705a39108bd16ea0b6473441b8e27c6c..2d0de074b4130ff10216aef4be723e9f7449267f 100644
--- a/docs/macvlan.md
+++ b/docs/macvlan.md
@@ -2,7 +2,7 @@
 
 ## How to use it
 
-* Enable macvlan in `group_vars/k8s-cluster/k8s-cluster.yml`
+* Enable macvlan in `group_vars/k8s_cluster/k8s_cluster.yml`
 
 ```yml
 ...
@@ -10,7 +10,7 @@ kube_network_plugin: macvlan
 ...
 ```
 
-* Adjust the `macvlan_interface` in `group_vars/k8s-cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file:
+* Adjust the `macvlan_interface` in `group_vars/k8s_cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file:
 
 ```yml
 all:
@@ -34,7 +34,7 @@ add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml`
 
 The nodelocal dns IP is not reacheable.
 
-Disable it in `sample/group_vars/k8s-cluster/k8s-cluster.yml`
+Disable it in `sample/group_vars/k8s_cluster/k8s_cluster.yml`
 
 ```yml
 enable_nodelocaldns: false
diff --git a/docs/nodes.md b/docs/nodes.md
index f369a5f3dd09b682fbbab2decebd1a45936d5932..00fe2481be656efe4ea1b7c012969c0890f5d5e6 100644
--- a/docs/nodes.md
+++ b/docs/nodes.md
@@ -17,7 +17,7 @@ Modify the order of your master list by pushing your first entry to any other po
         node-1:
         node-2:
         node-3:
-    kube-node:
+    kube_node:
       hosts:
         node-1:
         node-2:
@@ -38,7 +38,7 @@ change your inventory to:
         node-2:
         node-3:
         node-1:
-    kube-node:
+    kube_node:
       hosts:
         node-2:
         node-3:
diff --git a/docs/ovn4nfv.md b/docs/ovn4nfv.md
index 9d120a72c4208a4938f82857fd2ecbf23a64a3e2..9106f60327bfc40cfca7a659ac21e87ba8efc3f7 100644
--- a/docs/ovn4nfv.md
+++ b/docs/ovn4nfv.md
@@ -4,7 +4,7 @@ Intro to [ovn4nfv-k8s-plugin](https://github.com/opnfv/ovn4nfv-k8s-plugin)
 
 ## How to use it
 
-* Enable ovn4nfv in `group_vars/k8s-cluster/k8s-cluster.yml`
+* Enable ovn4nfv in `group_vars/k8s_cluster/k8s_cluster.yml`
 
 ```yml
 ...
diff --git a/docs/setting-up-your-first-cluster.md b/docs/setting-up-your-first-cluster.md
index 184d4bc814cdda94b521126b8189b9a5ad7596ba..65645f93d26c3e553bd32ae83138ae4fe5c3c99c 100644
--- a/docs/setting-up-your-first-cluster.md
+++ b/docs/setting-up-your-first-cluster.md
@@ -225,7 +225,7 @@ worker-0, worker-1 and worker-2 are worker nodes. Also update the `ip` to the re
 remove the `access_ip`.
 
 The main configuration for the cluster is stored in
-`inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml`. In this file we
+`inventory/mycluster/group_vars/k8s_cluster/k8s_cluster.yml`. In this file we
  will update the `supplementary_addresses_in_ssl_keys` with a list of the IP
  addresses of the controller nodes. In that way we can access the
   kubernetes API server as an administrator from outside the VPC network. You
@@ -234,7 +234,7 @@ The main configuration for the cluster is stored in
 
 Kubespray also offers to easily enable popular kubernetes add-ons. You can
 modify the
-list of add-ons in `inventory/mycluster/group_vars/k8s-cluster/addons.yml`.
+list of add-ons in `inventory/mycluster/group_vars/k8s_cluster/addons.yml`.
 Let's enable the metrics server as this is a crucial monitoring element for
 the kubernetes cluster, just change the 'false' to 'true' for
 `metrics_server_enabled`.
diff --git a/docs/test_cases.md b/docs/test_cases.md
index 2a8f5e92008950419c83dc9b77bef1e1d328a8cd..738b7b1969908b2e08d4bea8156416d7998bb1d4 100644
--- a/docs/test_cases.md
+++ b/docs/test_cases.md
@@ -2,11 +2,11 @@
 
 There are four node layout types: `default`, `separate`, `ha`, and `scale`.
 
-`default` is a non-HA two nodes setup with one separate `kube-node`
+`default` is a non-HA two nodes setup with one separate `kube_node`
 and the `etcd` group merged with the `kube_control_plane`.
 
 `separate` layout is when there is only node of each type, which includes
- a kube_control_plane, kube-node, and etcd cluster member.
+ a kube_control_plane, kube_node, and etcd cluster member.
 
 `ha` layout consists of two etcd nodes, two masters and a single worker node,
 with role intersection.
diff --git a/docs/upgrades.md b/docs/upgrades.md
index f0cde5e87ea1ea85bf20e14343f5d51d125906ba..abbbc2f7aa4e5a194c114fd7e02060957390fa5b 100644
--- a/docs/upgrades.md
+++ b/docs/upgrades.md
@@ -68,9 +68,9 @@ If you want to manually control the upgrade procedure, you can use the variables
 
 For instance, if you're on v2.6.0, then check out v2.7.0, run the upgrade, check out the next tag, and run the next upgrade, etc.
 
-Assuming you don't explicitly define a kubernetes version in your k8s-cluster.yml, you simply check out the next tag and run the upgrade-cluster.yml playbook
+Assuming you don't explicitly define a kubernetes version in your k8s_cluster.yml, you simply check out the next tag and run the upgrade-cluster.yml playbook
 
-* If you do define kubernetes version in your inventory (e.g. group_vars/k8s-cluster.yml) then either make sure to update it before running upgrade-cluster, or specify the new version you're upgrading to: `ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml -e kube_version=v1.11.3`
+* If you do define kubernetes version in your inventory (e.g. group_vars/k8s_cluster.yml) then either make sure to update it before running upgrade-cluster, or specify the new version you're upgrading to: `ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml -e kube_version=v1.11.3`
 
   Otherwise, the upgrade will leave your cluster at the same k8s version defined in your inventory vars.
 
@@ -232,7 +232,7 @@ Previous HEAD position was 6f97687d Release 2.8 robust san handling (#4478)
 HEAD is now at a4e65c7c Upgrade to Ansible >2.7.0 (#4471)
 ```
 
-:warning: IMPORTANT: Some of the variable formats changed in the k8s-cluster.yml between 2.8.5 and 2.9.0 :warning:
+:warning: IMPORTANT: Some of the variable formats changed in the k8s_cluster.yml between 2.8.5 and 2.9.0 :warning:
 
 If you do not keep your inventory copy up to date, **your upgrade will fail** and your first master will be left non-functional until fixed and re-run.
 
diff --git a/docs/vars.md b/docs/vars.md
index 8e6218fad802873a4520eb107b7b82e463df4ad1..be5c7d93459ee999dc5efc98e98a3eb231e51d7b 100644
--- a/docs/vars.md
+++ b/docs/vars.md
@@ -38,7 +38,7 @@ Some variables of note include:
   and access_ip are undefined
 * *loadbalancer_apiserver* - If defined, all hosts will connect to this
   address instead of localhost for kube_control_planes and kube_control_plane[0] for
-  kube-nodes. See more details in the
+  kube_nodes. See more details in the
   [HA guide](/docs/ha-mode.md).
 * *loadbalancer_apiserver_localhost* - makes all hosts to connect to
   the apiserver internally load balanced endpoint. Mutual exclusive to the
@@ -59,14 +59,14 @@ following default cluster parameters:
 * *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not
   overlap with kube_service_addresses.
 * *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remaining
-  bits in kube_pods_subnet dictates how many kube-nodes can be in cluster. Setting this > 25 will
+  bits in kube_pods_subnet dictates how many kube_nodes can be in cluster. Setting this > 25 will
   raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly
   (assertion not applicable to calico which doesn't use this as a hard limit, see
   [Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes).
 * *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services.
 * *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``.
 * *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``.
-* *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube-nodes can be in cluster.
+* *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube_nodes can be in cluster.
 * *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
 * *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
 * *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/)
diff --git a/docs/weave.md b/docs/weave.md
index 6d7e949564aa092cfb160625d9e0e93b6b3ab394..30fa4944417fc04d1c33d68df215dfe3b28e54bc 100644
--- a/docs/weave.md
+++ b/docs/weave.md
@@ -11,7 +11,7 @@ Weave encryption is supported for all communication
 * To use Weave encryption, specify a strong password (if no password, no encryption)
 
 ```ShellSession
-# In file ./inventory/sample/group_vars/k8s-cluster.yml
+# In file ./inventory/sample/group_vars/k8s_cluster.yml
 weave_password: EnterPasswordHere
 ```
 
diff --git a/extra_playbooks/migrate_openstack_provider.yml b/extra_playbooks/migrate_openstack_provider.yml
index 9d4adbaa948bed92fbca8a1c5aaa18eec3f8ec94..2ce86d5c5ebb43b2bdb970f32275bb50d0d741d6 100644
--- a/extra_playbooks/migrate_openstack_provider.yml
+++ b/extra_playbooks/migrate_openstack_provider.yml
@@ -1,5 +1,5 @@
 ---
-- hosts: kube-node:kube_control_plane
+- hosts: kube_node:kube_control_plane
   tasks:
     - name: Remove old cloud provider config
       file:
diff --git a/extra_playbooks/upgrade-only-k8s.yml b/extra_playbooks/upgrade-only-k8s.yml
index 5bdbd012d926ec89222f9182c3a35fe8e5818876..13ebcc4bde5b48acfe6aee777cfc8b887f2f30bb 100644
--- a/extra_playbooks/upgrade-only-k8s.yml
+++ b/extra_playbooks/upgrade-only-k8s.yml
@@ -16,7 +16,7 @@
     - { role: kubespray-defaults}
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
 
-- hosts: k8s-cluster:etcd:calico-rr
+- hosts: k8s_cluster:etcd:calico_rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   gather_facts: false
   vars:
@@ -27,7 +27,7 @@
     - { role: kubespray-defaults}
     - { role: bootstrap-os, tags: bootstrap-os}
 
-- hosts: k8s-cluster:etcd:calico-rr
+- hosts: k8s_cluster:etcd:calico_rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
     - { role: kubespray-defaults}
@@ -47,7 +47,7 @@
     - { role: upgrade/post-upgrade, tags: post-upgrade }
 
 - name: Finally handle worker upgrades, based on given batch size
-  hosts: kube-node:!kube_control_plane
+  hosts: kube_node:!kube_control_plane
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles:
diff --git a/facts.yml b/facts.yml
index e0281ee40e01b30ca4e66d3c405c27a4cb5ba344..fae86eb30b0cd417d0bccc9e1b71e94050edf2f8 100644
--- a/facts.yml
+++ b/facts.yml
@@ -1,6 +1,6 @@
 ---
 - name: Gather facts
-  hosts: k8s-cluster:etcd:calico-rr
+  hosts: k8s_cluster:etcd:calico_rr
   gather_facts: False
   tasks:
     - name: Gather minimal facts
diff --git a/inventory/local/hosts.ini b/inventory/local/hosts.ini
index 7c3bc95599a0d0a5756a916802e1af8bce34f410..4a6197e49b7ffc1c3d705e65b777c93550bf225d 100644
--- a/inventory/local/hosts.ini
+++ b/inventory/local/hosts.ini
@@ -6,9 +6,9 @@ node1
 [etcd]
 node1
 
-[kube-node]
+[kube_node]
 node1
 
-[k8s-cluster:children]
-kube-node
+[k8s_cluster:children]
+kube_node
 kube_control_plane
diff --git a/inventory/sample/group_vars/k8s-cluster/addons.yml b/inventory/sample/group_vars/k8s_cluster/addons.yml
similarity index 100%
rename from inventory/sample/group_vars/k8s-cluster/addons.yml
rename to inventory/sample/group_vars/k8s_cluster/addons.yml
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml
similarity index 100%
rename from inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
rename to inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml
similarity index 100%
rename from inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml
rename to inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-canal.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-canal.yml
similarity index 100%
rename from inventory/sample/group_vars/k8s-cluster/k8s-net-canal.yml
rename to inventory/sample/group_vars/k8s_cluster/k8s-net-canal.yml
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-cilium.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-cilium.yml
similarity index 100%
rename from inventory/sample/group_vars/k8s-cluster/k8s-net-cilium.yml
rename to inventory/sample/group_vars/k8s_cluster/k8s-net-cilium.yml
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-flannel.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml
similarity index 100%
rename from inventory/sample/group_vars/k8s-cluster/k8s-net-flannel.yml
rename to inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-kube-router.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-router.yml
similarity index 100%
rename from inventory/sample/group_vars/k8s-cluster/k8s-net-kube-router.yml
rename to inventory/sample/group_vars/k8s_cluster/k8s-net-kube-router.yml
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-macvlan.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-macvlan.yml
similarity index 100%
rename from inventory/sample/group_vars/k8s-cluster/k8s-net-macvlan.yml
rename to inventory/sample/group_vars/k8s_cluster/k8s-net-macvlan.yml
diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-weave.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-weave.yml
similarity index 100%
rename from inventory/sample/group_vars/k8s-cluster/k8s-net-weave.yml
rename to inventory/sample/group_vars/k8s_cluster/k8s-net-weave.yml
diff --git a/inventory/sample/inventory.ini b/inventory/sample/inventory.ini
index b450bc068df6c8ff6405616e6f861b7191a13011..99a630922372bfba602d500d34abf73cec144674 100644
--- a/inventory/sample/inventory.ini
+++ b/inventory/sample/inventory.ini
@@ -23,16 +23,16 @@
 # node2
 # node3
 
-[kube-node]
+[kube_node]
 # node2
 # node3
 # node4
 # node5
 # node6
 
-[calico-rr]
+[calico_rr]
 
-[k8s-cluster:children]
+[k8s_cluster:children]
 kube_control_plane
-kube-node
-calico-rr
+kube_node
+calico_rr
diff --git a/legacy_groups.yml b/legacy_groups.yml
new file mode 100644
index 0000000000000000000000000000000000000000..85e6f9ccbfc1f50f67e7d444729e2211f5289d76
--- /dev/null
+++ b/legacy_groups.yml
@@ -0,0 +1,42 @@
+---
+# This is an inventory compatibility playbook to ensure we keep compatibility with old style group names
+
+- name: Add kube-master nodes to kube_control_plane
+  hosts: kube-master
+  gather_facts: false
+  tasks:
+    - name: add nodes to kube_control_plane group
+      group_by:
+        key: 'kube_control_plane'
+
+- name: Add kube-node nodes to kube_node
+  hosts: kube-node
+  gather_facts: false
+  tasks:
+    - name: add nodes to kube_node group
+      group_by:
+        key: 'kube_node'
+
+- name: Add k8s-cluster nodes to k8s_cluster
+  hosts: k8s-cluster
+  gather_facts: false
+  tasks:
+    - name: add nodes to k8s_cluster group
+      group_by:
+        key: 'k8s_cluster'
+
+- name: Add calico-rr nodes to calico_rr
+  hosts: calico-rr
+  gather_facts: false
+  tasks:
+    - name: add nodes to calico_rr group
+      group_by:
+        key: 'calico_rr'
+
+- name: Add no-floating nodes to no_floating
+  hosts: no-floating
+  gather_facts: false
+  tasks:
+    - name: add nodes to no-floating group
+      group_by:
+        key: 'no_floating'
diff --git a/recover-control-plane.yml b/recover-control-plane.yml
index c2b28d093ce016b30cdecb7f5141a163eeac3eb1..03d573d3bb0f0f37491f78bb57c22960daeec388 100644
--- a/recover-control-plane.yml
+++ b/recover-control-plane.yml
@@ -2,14 +2,8 @@
 - name: Check ansible version
   import_playbook: ansible_version.yml
 
-- name: Add kube-master nodes to kube_control_plane
-  # This is for old inventory which contains kube-master instead of kube_control_plane
-  hosts: kube-master
-  gather_facts: false
-  tasks:
-    - name: add nodes to kube_control_plane group
-      group_by:
-        key: 'kube_control_plane'
+- name: Ensure compatibility with old groups
+  import_playbook: legacy_groups.yml
 
 - hosts: bastion[0]
   gather_facts: False
diff --git a/remove-node.yml b/remove-node.yml
index 27c886035bea116cc0e3429102dc03fd167b6eec..ddf56614e5bb90bda29dac806c5681e381ef73c1 100644
--- a/remove-node.yml
+++ b/remove-node.yml
@@ -2,16 +2,10 @@
 - name: Check ansible version
   import_playbook: ansible_version.yml
 
-- name: Add kube-master nodes to kube_control_plane
-  # This is for old inventory which contains kube-master instead of kube_control_plane
-  hosts: kube-master
-  gather_facts: false
-  tasks:
-    - name: add nodes to kube_control_plane group
-      group_by:
-        key: 'kube_control_plane'
+- name: Ensure compatibility with old groups
+  import_playbook: legacy_groups.yml
 
-- hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}"
+- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
   gather_facts: no
   environment: "{{ proxy_disable_env }}"
   vars_prompt:
@@ -34,7 +28,7 @@
     - { role: bootstrap-os, tags: bootstrap-os }
     - { role: remove-node/pre-remove, tags: pre-remove }
 
-- hosts: "{{ node | default('kube-node') }}"
+- hosts: "{{ node | default('kube_node') }}"
   gather_facts: no
   environment: "{{ proxy_disable_env }}"
   roles:
diff --git a/reset.yml b/reset.yml
index 81f2389d4935affb23d53a31ee9d01efcf58a350..80d8f158cc631a2330c10c74716bf8fac9720145 100644
--- a/reset.yml
+++ b/reset.yml
@@ -2,14 +2,8 @@
 - name: Check ansible version
   import_playbook: ansible_version.yml
 
-- name: Add kube-master nodes to kube_control_plane
-  # This is for old inventory which contains kube-master instead of kube_control_plane
-  hosts: kube-master
-  gather_facts: false
-  tasks:
-    - name: add nodes to kube_control_plane group
-      group_by:
-        key: 'kube_control_plane'
+- name: Ensure compatibility with old groups
+  import_playbook: legacy_groups.yml
 
 - hosts: bastion[0]
   gather_facts: False
@@ -21,7 +15,7 @@
 - name: Gather facts
   import_playbook: facts.yml
 
-- hosts: etcd:k8s-cluster:calico-rr
+- hosts: etcd:k8s_cluster:calico_rr
   gather_facts: False
   vars_prompt:
     name: "reset_confirmation"
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index 7d50050c836794b0e5f246c04822e9649fee8885..066ee3e4a430da4e8ac101cd223a25a27133cdfe 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -548,7 +548,7 @@ downloads:
     tag: "{{ netcheck_server_image_tag }}"
     sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   netcheck_agent:
     enabled: "{{ deploy_netchecker }}"
@@ -557,7 +557,7 @@ downloads:
     tag: "{{ netcheck_agent_image_tag }}"
     sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   etcd:
     container: "{{ etcd_deployment_type != 'host' }}"
@@ -588,7 +588,7 @@ downloads:
     owner: "root"
     mode: "0755"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   kubeadm:
     enabled: true
@@ -601,7 +601,7 @@ downloads:
     owner: "root"
     mode: "0755"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   kubelet:
     enabled: true
@@ -614,7 +614,7 @@ downloads:
     owner: "root"
     mode: "0755"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   kubectl:
     enabled: true
@@ -640,7 +640,7 @@ downloads:
     owner: "root"
     mode: "0755"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   crun:
     file: true
@@ -653,7 +653,7 @@ downloads:
     owner: "root"
     mode: "0755"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   kata_containers:
     enabled: "{{ kata_containers_enabled }}"
@@ -666,7 +666,7 @@ downloads:
     owner: "root"
     mode: "0755"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   nerdctl:
     file: true
@@ -679,7 +679,7 @@ downloads:
     owner: "root"
     mode: "0755"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   cilium:
     enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
@@ -688,7 +688,7 @@ downloads:
     tag: "{{ cilium_image_tag }}"
     sha256: "{{ cilium_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   cilium_init:
     enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
@@ -697,7 +697,7 @@ downloads:
     tag: "{{ cilium_init_image_tag }}"
     sha256: "{{ cilium_init_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   cilium_operator:
     enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
@@ -706,7 +706,7 @@ downloads:
     tag: "{{ cilium_operator_image_tag }}"
     sha256: "{{ cilium_operator_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   multus:
     enabled: "{{ kube_network_plugin_multus }}"
@@ -715,7 +715,7 @@ downloads:
     tag: "{{ multus_image_tag }}"
     sha256: "{{ multus_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   flannel:
     enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
@@ -724,7 +724,7 @@ downloads:
     tag: "{{ flannel_image_tag }}"
     sha256: "{{ flannel_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   calicoctl:
     enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
@@ -737,7 +737,7 @@ downloads:
     owner: "root"
     mode: "0755"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   calico_node:
     enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
@@ -746,7 +746,7 @@ downloads:
     tag: "{{ calico_node_image_tag }}"
     sha256: "{{ calico_node_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   calico_cni:
     enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
@@ -755,7 +755,7 @@ downloads:
     tag: "{{ calico_cni_image_tag }}"
     sha256: "{{ calico_cni_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   calico_policy:
     enabled: "{{ enable_network_policy and kube_network_plugin in ['calico', 'canal'] }}"
@@ -764,7 +764,7 @@ downloads:
     tag: "{{ calico_policy_image_tag }}"
     sha256: "{{ calico_policy_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   calico_typha:
     enabled: "{{ typha_enabled }}"
@@ -773,7 +773,7 @@ downloads:
     tag: "{{ calico_typha_image_tag }}"
     sha256: "{{ calico_typha_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   calico_crds:
     file: true
@@ -799,7 +799,7 @@ downloads:
     tag: "{{ weave_kube_image_tag }}"
     sha256: "{{ weave_kube_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   weave_npc:
     enabled: "{{ kube_network_plugin == 'weave' }}"
@@ -808,7 +808,7 @@ downloads:
     tag: "{{ weave_npc_image_tag }}"
     sha256: "{{ weave_npc_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   ovn4nfv:
     enabled: "{{ kube_network_plugin == 'ovn4nfv' }}"
@@ -817,7 +817,7 @@ downloads:
     tag: "{{ ovn4nfv_k8s_plugin_image_tag }}"
     sha256: "{{ ovn4nfv_k8s_plugin_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   kube_ovn:
     enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
@@ -826,7 +826,7 @@ downloads:
     tag: "{{ kube_ovn_container_image_tag }}"
     sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   kube_router:
     enabled: "{{ kube_network_plugin == 'kube-router' }}"
@@ -835,7 +835,7 @@ downloads:
     tag: "{{ kube_router_image_tag }}"
     sha256: "{{ kube_router_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   pod_infra:
     enabled: true
@@ -844,7 +844,7 @@ downloads:
     tag: "{{ pod_infra_image_tag }}"
     sha256: "{{ pod_infra_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   install_socat:
     enabled: "{{ ansible_os_family in ['Flatcar Container Linux by Kinvolk'] }}"
@@ -853,7 +853,7 @@ downloads:
     tag: "{{ install_socat_image_tag }}"
     sha256: "{{ install_socat_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   nginx:
     enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx' }}"
@@ -862,7 +862,7 @@ downloads:
     tag: "{{ nginx_image_tag }}"
     sha256: "{{ nginx_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   haproxy:
     enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'haproxy' }}"
@@ -871,7 +871,7 @@ downloads:
     tag: "{{ haproxy_image_tag }}"
     sha256: "{{ haproxy_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   coredns:
     enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
@@ -889,7 +889,7 @@ downloads:
     tag: "{{ nodelocaldns_image_tag }}"
     sha256: "{{ nodelocaldns_digest_checksum|default(None) }}"
     groups:
-    - k8s-cluster
+    - k8s_cluster
 
   dnsautoscaler:
     enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
@@ -927,7 +927,7 @@ downloads:
     tag: "{{ registry_image_tag }}"
     sha256: "{{ registry_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   registry_proxy:
     enabled: "{{ registry_enabled }}"
@@ -936,7 +936,7 @@ downloads:
     tag: "{{ registry_proxy_image_tag }}"
     sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   metrics_server:
     enabled: "{{ metrics_server_enabled }}"
@@ -964,7 +964,7 @@ downloads:
     tag: "{{ local_volume_provisioner_image_tag }}"
     sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   cephfs_provisioner:
     enabled: "{{ cephfs_provisioner_enabled }}"
@@ -973,7 +973,7 @@ downloads:
     tag: "{{ cephfs_provisioner_image_tag }}"
     sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   rbd_provisioner:
     enabled: "{{ rbd_provisioner_enabled }}"
@@ -982,7 +982,7 @@ downloads:
     tag: "{{ rbd_provisioner_image_tag }}"
     sha256: "{{ rbd_provisioner_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   local_path_provisioner:
     enabled: "{{ local_path_provisioner_enabled }}"
@@ -991,7 +991,7 @@ downloads:
     tag: "{{ local_path_provisioner_image_tag }}"
     sha256: "{{ local_path_provisioner_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   ingress_nginx_controller:
     enabled: "{{ ingress_nginx_enabled }}"
@@ -1000,7 +1000,7 @@ downloads:
     tag: "{{ ingress_nginx_controller_image_tag }}"
     sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   ingress_ambassador_controller:
     enabled: "{{ ingress_ambassador_enabled }}"
@@ -1009,7 +1009,7 @@ downloads:
     tag: "{{ ingress_ambassador_image_tag }}"
     sha256: "{{ ingress_ambassador_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   ingress_alb_controller:
     enabled: "{{ ingress_alb_enabled }}"
@@ -1018,7 +1018,7 @@ downloads:
     tag: "{{ alb_ingress_image_tag }}"
     sha256: "{{ ingress_alb_controller_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   cert_manager_controller:
     enabled: "{{ cert_manager_enabled }}"
@@ -1027,7 +1027,7 @@ downloads:
     tag: "{{ cert_manager_controller_image_tag }}"
     sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   cert_manager_cainjector:
     enabled: "{{ cert_manager_enabled }}"
@@ -1036,7 +1036,7 @@ downloads:
     tag: "{{ cert_manager_cainjector_image_tag }}"
     sha256: "{{ cert_manager_cainjector_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   cert_manager_webhook:
     enabled: "{{ cert_manager_enabled }}"
@@ -1045,7 +1045,7 @@ downloads:
     tag: "{{ cert_manager_webhook_image_tag }}"
     sha256: "{{ cert_manager_webhook_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   csi_attacher:
     enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
@@ -1054,7 +1054,7 @@ downloads:
     tag: "{{ csi_attacher_image_tag }}"
     sha256: "{{ csi_attacher_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   csi_provisioner:
     enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
@@ -1063,7 +1063,7 @@ downloads:
     tag: "{{ csi_provisioner_image_tag }}"
     sha256: "{{ csi_provisioner_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   csi_snapshotter:
     enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
@@ -1072,7 +1072,7 @@ downloads:
     tag: "{{ csi_snapshotter_image_tag }}"
     sha256: "{{ csi_snapshotter_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   snapshot_controller:
     enabled: "{{ cinder_csi_enabled }}"
@@ -1081,7 +1081,7 @@ downloads:
     tag: "{{ snapshot_controller_image_tag }}"
     sha256: "{{ snapshot_controller_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   csi_resizer:
     enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
@@ -1090,7 +1090,7 @@ downloads:
     tag: "{{ csi_resizer_image_tag }}"
     sha256: "{{ csi_resizer_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   csi_node_driver_registrar:
     enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
@@ -1099,7 +1099,7 @@ downloads:
     tag: "{{ csi_node_driver_registrar_image_tag }}"
     sha256: "{{ csi_node_driver_registrar_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   cinder_csi_plugin:
     enabled: "{{ cinder_csi_enabled }}"
@@ -1108,7 +1108,7 @@ downloads:
     tag: "{{ cinder_csi_plugin_image_tag }}"
     sha256: "{{ cinder_csi_plugin_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   aws_ebs_csi_plugin:
     enabled: "{{ aws_ebs_csi_enabled }}"
@@ -1117,7 +1117,7 @@ downloads:
     tag: "{{ aws_ebs_csi_plugin_image_tag }}"
     sha256: "{{ aws_ebs_csi_plugin_digest_checksum|default(None) }}"
     groups:
-    - kube-node
+    - kube_node
 
   dashboard:
     enabled: "{{ dashboard_enabled }}"
diff --git a/roles/download/tasks/prep_kubeadm_images.yml b/roles/download/tasks/prep_kubeadm_images.yml
index fa829e8f0eafa55b03b0f5cdac76a722d1dcec08..c520a9416f70703c5a1abccb80318093df874264 100644
--- a/roles/download/tasks/prep_kubeadm_images.yml
+++ b/roles/download/tasks/prep_kubeadm_images.yml
@@ -55,7 +55,7 @@
         container: true
         repo: "{{ item | regex_replace('^(.*):.*$','\\1') }}"
         tag: "{{ item | regex_replace('^.*:(.*)$','\\1') }}"
-        groups: k8s-cluster
+        groups: k8s_cluster
   loop: "{{ kubeadm_images_list | flatten(levels=1) }}"
   register: kubeadm_images_cooked
   run_once: true
diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml
index 6415a56183d70adf5351c635bc52bb5a6cd4b052..c11758e957940f1397337274cf306287b2a29506 100644
--- a/roles/etcd/defaults/main.yml
+++ b/roles/etcd/defaults/main.yml
@@ -55,7 +55,7 @@ etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %
 
 etcd_blkio_weight: 1000
 
-etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}"
+etcd_node_cert_hosts: "{{ groups['k8s_cluster'] | union(groups.get('calico_rr', [])) }}"
 
 etcd_compaction_retention: "8"
 
diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml
index 611026b48c31199150414daeecac1ff4161af132..ed0580b55a385c32a98201e8667186b49a349859 100644
--- a/roles/etcd/tasks/check_certs.yml
+++ b/roles/etcd/tasks/check_certs.yml
@@ -33,8 +33,8 @@
   stat:
     path: "{{ etcd_cert_dir }}/{{ item }}"
   register: etcd_node_certs
-  when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
-        inventory_hostname in groups['k8s-cluster'])
+  when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
+        inventory_hostname in groups['k8s_cluster'])
   with_items:
     - ca.pem
     - node-{{ inventory_hostname }}.pem
@@ -56,7 +56,7 @@
         '{{ etcd_cert_dir }}/member-{{ host }}.pem',
         '{{ etcd_cert_dir }}/member-{{ host }}-key.pem',
       {% endfor %}
-      {% set k8s_nodes = groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort %}
+      {% set k8s_nodes = groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort %}
       {% for host in k8s_nodes %}
         '{{ etcd_cert_dir }}/node-{{ host }}.pem',
         '{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
@@ -89,7 +89,7 @@
   set_fact:
     gen_node_certs: |-
       {
-      {% set k8s_nodes = groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort -%}
+      {% set k8s_nodes = groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort -%}
       {% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %}
       {% for host in k8s_nodes -%}
         {% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %}
@@ -125,8 +125,8 @@
   set_fact:
     kubernetes_host_requires_sync: true
   when:
-    - (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
-      inventory_hostname in groups['k8s-cluster']) and
+    - (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
+      inventory_hostname in groups['k8s_cluster']) and
       inventory_hostname not in groups['etcd']
     - (not etcd_node_certs.results[0].stat.exists|default(false)) or
       (not etcd_node_certs.results[1].stat.exists|default(false)) or
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index 893e61c1944d9ea744a8dd6432ca58286c0300ac..1f438f98665971606f7470655540b388094c2b6e 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -59,7 +59,7 @@
                     {{ m }}
                   {% endif %}
                 {% endfor %}"
-    - HOSTS: "{% for h in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
+    - HOSTS: "{% for h in (groups['k8s_cluster'] + groups['calico_rr']|default([]))|unique %}
                 {% if gen_node_certs[h] %}
                     {{ h }}
                 {% endif %}
@@ -109,7 +109,7 @@
     src: "{{ item }}"
   register: etcd_master_node_certs
   with_items:
-    - "[{% for node in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
+    - "[{% for node in (groups['k8s_cluster'] + groups['calico_rr']|default([]))|unique %}
         '{{ etcd_cert_dir }}/node-{{ node }}.pem',
         '{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
         {% endfor %}]"
@@ -144,8 +144,8 @@
 - name: "Check_certs | Set 'sync_certs' to true on nodes"
   set_fact:
     sync_certs: true
-  when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
-        inventory_hostname in groups['k8s-cluster']) and
+  when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
+        inventory_hostname in groups['k8s_cluster']) and
         inventory_hostname not in groups['etcd']
   with_items:
     - "{{ my_etcd_node_certs }}"
@@ -159,8 +159,8 @@
   register: etcd_node_certs
   check_mode: no
   delegate_to: "{{ groups['etcd'][0] }}"
-  when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
-        inventory_hostname in groups['k8s-cluster']) and
+  when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
+        inventory_hostname in groups['k8s_cluster']) and
         sync_certs|default(false) and inventory_hostname not in groups['etcd']
 
 - name: Gen_certs | Copy certs on nodes
@@ -170,8 +170,8 @@
   no_log: true
   changed_when: false
   check_mode: no
-  when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
-        inventory_hostname in groups['k8s-cluster']) and
+  when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
+        inventory_hostname in groups['k8s_cluster']) and
         sync_certs|default(false) and inventory_hostname not in groups['etcd']
   notify: set etcd_secret_changed
 
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 966c555d54a29e1a7d1f0030fd1d78ff5931d549..98890e23871c8ee7c6fc430a6df45223dfde4fca 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -20,7 +20,7 @@
   register: "etcd_client_cert_serial_result"
   changed_when: false
   when:
-    - inventory_hostname in groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort
+    - inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
   tags:
     - master
     - network
@@ -29,7 +29,7 @@
   set_fact:
     etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}"
   when:
-    - inventory_hostname in groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort
+    - inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
   tags:
     - master
     - network
diff --git a/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml b/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml
index 47ba0a1d0d7522a6cac3104bf4ed4417ffd79b7f..a62a9db3912a0fa9b0bf5bcdaadb455d750a29f7 100644
--- a/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml
@@ -5,11 +5,11 @@
 - name: Cinder CSI Driver | Write cacert file
   include_tasks: cinder-write-cacert.yml
   run_once: true
-  loop: "{{ groups['k8s-cluster'] }}"
+  loop: "{{ groups['k8s_cluster'] }}"
   loop_control:
     loop_var: delegate_host_to_write_cacert
   when:
-    - inventory_hostname in groups['k8s-cluster']
+    - inventory_hostname in groups['k8s_cluster']
     - cinder_cacert is defined
     - cinder_cacert | length > 0
   tags: cinder-csi-driver
diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml
index 7c89fdbdffde351ebfa7e94d9370d1928618c89b..dd3528094b7ad5e8378e05e6d9e2f7f2c8111523 100644
--- a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml
+++ b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml
@@ -5,11 +5,11 @@
 - name: External OpenStack Cloud Controller | Write cacert file
   include_tasks: openstack-write-cacert.yml
   run_once: true
-  loop: "{{ groups['k8s-cluster'] }}"
+  loop: "{{ groups['k8s_cluster'] }}"
   loop_control:
     loop_var: delegate_host_to_write_cacert
   when:
-    - inventory_hostname in groups['k8s-cluster']
+    - inventory_hostname in groups['k8s_cluster']
     - external_openstack_cacert is defined
     - external_openstack_cacert | length > 0
   tags: external-openstack
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
index 88a1788252b22a2240688f46a4006615286df8f7..404aee389f22572d4a534bb6b362f0ebf601e63e 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml
@@ -4,7 +4,7 @@
   include_tasks: basedirs.yml
   loop_control:
     loop_var: delegate_host_base_dir
-  loop: "{{ groups['k8s-cluster'] | product(local_volume_provisioner_storage_classes.keys()) | list }}"
+  loop: "{{ groups['k8s_cluster'] | product(local_volume_provisioner_storage_classes.keys()) | list }}"
 
 - name: Local Volume Provisioner | Create addon dir
   file:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/README.md b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md
index 47969d5f413d09b7edb045025708690c697e5085..4d7c957c64f2c6f06f111c85f3824b5f4754aced 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/README.md
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md
@@ -33,7 +33,7 @@ LS0tLS1CRUdJTiBSU0Eg...
 
 For further information, read the official [Cert-Manager CA Configuration](https://cert-manager.io/docs/configuration/ca/) doc.
 
-Once the base64 encoded values have been added to `templates\secret-cert-manager.yml.j2`, cert-manager can now be enabled by editing your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s-cluster\addons.yml` and setting `cert_manager_enabled` to true.
+Once the base64 encoded values have been added to `templates\secret-cert-manager.yml.j2`, cert-manager can now be enabled by editing your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s_cluster\addons.yml` and setting `cert_manager_enabled` to true.
 
 ```ini
 # Cert manager deployment
@@ -46,7 +46,7 @@ If you don't have a TLS Root CA certificate and key available, you can create th
 
 A common use-case for cert-manager is requesting TLS signed certificates to secure your ingress resources. This can be done by simply adding annotations to your Ingress resources and cert-manager will facilitate creating the Certificate resource for you. A small sub-component of cert-manager, ingress-shim, is responsible for this.
 
-To enable the Nginx Ingress controller as part of your Kubespray deployment, simply edit your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s-cluster\addons.yml` and set `ingress_nginx_enabled` to true.
+To enable the Nginx Ingress controller as part of your Kubespray deployment, simply edit your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s_cluster\addons.yml` and set `ingress_nginx_enabled` to true.
 
 ```ini
 # Nginx ingress controller deployment
diff --git a/roles/kubernetes-apps/metallb/README.md b/roles/kubernetes-apps/metallb/README.md
index a898d096c889ade42525e4118d56fc01fe4cc8f2..1456a6e8a7348c303f6d16f355981b0bbb3de3fd 100644
--- a/roles/kubernetes-apps/metallb/README.md
+++ b/roles/kubernetes-apps/metallb/README.md
@@ -11,7 +11,7 @@ It deploys MetalLB into Kubernetes and sets up a layer 2 or BGP load-balancer.
 
 In the default, MetalLB is not deployed into your Kubernetes cluster.
 You can override the defaults by copying the contents of roles/kubernetes-apps/metallb/defaults/main.yml
-to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s-cluster/addons.yml
+to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s_cluster/addons.yml
 and updating metallb_enabled option to `true`.
 In addition you need to update metallb_ip_range option on the addons.yml at least for suiting your network
 environment, because MetalLB allocates external IP addresses from this metallb_ip_range option.
diff --git a/roles/kubernetes-apps/metrics_server/tasks/main.yml b/roles/kubernetes-apps/metrics_server/tasks/main.yml
index c3be4b8308d4296dc36aa32124071e79070ec32c..fdc9fc1e93c1dc989240cbd7b4fbcb625b3474de 100644
--- a/roles/kubernetes-apps/metrics_server/tasks/main.yml
+++ b/roles/kubernetes-apps/metrics_server/tasks/main.yml
@@ -2,7 +2,7 @@
 # If all masters have node role, there are no tainted master and toleration should not be specified.
 - name: Check all masters are node or not
   set_fact:
-    masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
+    masters_are_not_tainted: "{{ groups['kube_node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
 
 - name: Metrics Server | Delete addon dir
   file:
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
index 2c60fa7eec9a41a1709276848d05c95107f53402..b362a2a4908e1d1bf938096abd6c566631915f62 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
@@ -192,5 +192,5 @@
   with_items:
     - "node-role.kubernetes.io/master:NoSchedule-"
     - "node-role.kubernetes.io/control-plane:NoSchedule-"
-  when: inventory_hostname in groups['kube-node']
+  when: inventory_hostname in groups['kube_node']
   failed_when: false
diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2
index 2be6e860c8cec674b9852bf9c8335f58da5c9fc4..11c3e714b99ca99d24dea5dd164bf09c8fac3315 100644
--- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2
+++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2
@@ -16,7 +16,7 @@ nodeRegistration:
 {% if kube_override_hostname|default('') %}
   name: {{ kube_override_hostname }}
 {% endif %}
-{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube-node'] %}
+{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %}
   taints:
   - effect: NoSchedule
     key: node-role.kubernetes.io/master
diff --git a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml
index 787613e60659600ebc6d3f2ab83b20b875a8e40e..c87b840c164966f4ab1c30b204fefe2170c44251 100644
--- a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml
+++ b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml
@@ -50,7 +50,7 @@
   register: "etcd_client_cert_serial_result"
   changed_when: false
   when:
-    - inventory_hostname in groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort
+    - inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
   tags:
     - network
 
diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta2.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta2.j2
index c92569ec1eb98729a8f430375ffa689321aebcfe..143a731ed597db2c7250a20af6c452d1a1d7b3a0 100644
--- a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta2.j2
+++ b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta2.j2
@@ -21,7 +21,7 @@ caCertPath: {{ kube_cert_dir }}/ca.crt
 nodeRegistration:
   name: {{ kube_override_hostname }}
   criSocket: {{ cri_socket }}
-{% if 'calico-rr' in group_names and 'kube-node' not in group_names %}
+{% if 'calico_rr' in group_names and 'kube_node' not in group_names %}
   taints:
   - effect: NoSchedule
     key: node-role.kubernetes.io/calico-rr
diff --git a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2
index 868d1bc93a73cabb15a84e92d44f7bd92e8faff2..c11af118483cc5d0c30c6ee1b86d9e4af7e50142 100644
--- a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2
+++ b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2
@@ -81,7 +81,7 @@ resolvConf: "{{ kube_resolv_conf }}"
 {% if kubelet_config_extra_args %}
 {{ kubelet_config_extra_args | to_nice_yaml(indent=2) }}
 {% endif %}
-{% if inventory_hostname in groups['kube-node'] and kubelet_node_config_extra_args %}
+{% if inventory_hostname in groups['kube_node'] and kubelet_node_config_extra_args %}
 {{ kubelet_node_config_extra_args | to_nice_yaml(indent=2) }}
 {% endif %}
 {% if tls_min_version is defined %}
diff --git a/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2 b/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2
index 68c04fd36e65c1346a305a612970a8e39cef4f52..3ca1ffdd79f9148ad00cc083f3327b40858e197c 100644
--- a/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2
+++ b/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2
@@ -34,7 +34,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
 {%   endif %}
 {% endif %}
 
-KUBELET_ARGS="{{ kubelet_args_base }} {% if node_taints|default([]) %}--register-with-taints={{ node_taints | join(',') }} {% endif %} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube-node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
+KUBELET_ARGS="{{ kubelet_args_base }} {% if node_taints|default([]) %}--register-with-taints={{ node_taints | join(',') }} {% endif %} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube_node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
 {% if kubelet_flexvolumes_plugins_dir is defined %}
 KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
 {% endif %}
diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
index a232694e943e85e38d22288dfa8825aa34b25ebc..720fa55b8c242b88cacbc13790f0deae5d7b21e9 100644
--- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
@@ -1,10 +1,10 @@
 ---
-- name: Stop if either kube_control_plane or kube-node group is empty
+- name: Stop if either kube_control_plane or kube_node group is empty
   assert:
     that: "groups.get('{{ item }}')"
   with_items:
     - kube_control_plane
-    - kube-node
+    - kube_node
   run_once: true
   when: not ignore_assert_errors
 
@@ -86,7 +86,7 @@
     that: ansible_memtotal_mb >= minimal_node_memory_mb
   when:
     - not ignore_assert_errors
-    - inventory_hostname in groups['kube-node']
+    - inventory_hostname in groups['kube_node']
 
 # This assertion will fail on the safe side: One can indeed schedule more pods
 # on a node than the CIDR-range has space for when additional pods use the host
@@ -99,7 +99,7 @@
     msg: "Do not schedule more pods on a node than inet addresses are available."
   when:
     - not ignore_assert_errors
-    - inventory_hostname in groups['k8s-cluster']
+    - inventory_hostname in groups['k8s_cluster']
     - kube_network_node_prefix is defined
     - kube_network_plugin != 'calico'
 
@@ -207,14 +207,14 @@
     - inventory_hostname == groups['kube_control_plane'][0]
   run_once: yes
 
-- name: "Check that calico_rr nodes are in k8s-cluster group"
+- name: "Check that calico_rr nodes are in k8s_cluster group"
   assert:
     that:
-      - '"k8s-cluster" in group_names'
-    msg: "calico-rr must be a child group of k8s-cluster group"
+      - '"k8s_cluster" in group_names'
+    msg: "calico_rr must be a child group of k8s_cluster group"
   when:
     - kube_network_plugin == 'calico'
-    - '"calico-rr" in group_names'
+    - '"calico_rr" in group_names'
 
 - name: "Check that kube_service_addresses is a network range"
   assert:
diff --git a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
index 4020406743b57e2455a6144193612b16b211edf6..f184670ab7a08e27df58dffc91f902dfa4c9acd3 100644
--- a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
+++ b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
@@ -4,7 +4,7 @@
     path: "{{ item }}"
     state: directory
     owner: kube
-  when: inventory_hostname in groups['k8s-cluster']
+  when: inventory_hostname in groups['k8s_cluster']
   become: true
   tags:
     - kubelet
@@ -28,7 +28,7 @@
     path: "{{ item }}"
     state: directory
     owner: root
-  when: inventory_hostname in groups['k8s-cluster']
+  when: inventory_hostname in groups['k8s_cluster']
   become: true
   tags:
     - kubelet
@@ -51,7 +51,7 @@
     get_mime: no
   register: kube_cert_compat_dir_check
   when:
-    - inventory_hostname in groups['k8s-cluster']
+    - inventory_hostname in groups['k8s_cluster']
     - kube_cert_dir != kube_cert_compat_dir
 
 - name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498)
@@ -60,7 +60,7 @@
     dest: "{{ kube_cert_compat_dir }}"
     state: link
   when:
-    - inventory_hostname in groups['k8s-cluster']
+    - inventory_hostname in groups['k8s_cluster']
     - kube_cert_dir != kube_cert_compat_dir
     - not kube_cert_compat_dir_check.stat.exists
 
@@ -75,7 +75,7 @@
     - "/var/lib/calico"
   when:
     - kube_network_plugin in ["calico", "weave", "canal", "flannel", "cilium", "kube-ovn", "ovn4nfv", "kube-router", "macvlan"]
-    - inventory_hostname in groups['k8s-cluster']
+    - inventory_hostname in groups['k8s_cluster']
   tags:
     - network
     - cilium
@@ -96,7 +96,7 @@
     mode: "{{ local_volume_provisioner_directory_mode }}"
   with_items: "{{ local_volume_provisioner_storage_classes.keys() | list }}"
   when:
-    - inventory_hostname in groups['k8s-cluster']
+    - inventory_hostname in groups['k8s_cluster']
     - local_volume_provisioner_enabled
   tags:
     - persistent_volumes
diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
index b011fd57b91f58a8e2bcda4ef5f286721967f6ac..95bc711dcdad26c5e8c8d09aa776963825a6ceb1 100644
--- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
+++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
@@ -2,7 +2,7 @@
 - name: Hosts | create list from inventory
   set_fact:
     etc_hosts_inventory_block: |-
-      {% for item in (groups['k8s-cluster'] + groups['etcd']|default([]) + groups['calico-rr']|default([]))|unique -%}
+      {% for item in (groups['k8s_cluster'] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%}
       {% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%}
       {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}
       {%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }}{% endif %} {{ item }}.{{ dns_domain }} {{ item }}
diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml
index 40d4910d28a9ab30b6a250859eed9a1a13b16d64..aa1cf214a5f5f3b4585207e5d1bbf598e401ccd6 100644
--- a/roles/kubernetes/tokens/tasks/gen_tokens.yml
+++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml
@@ -27,7 +27,7 @@
     TOKEN_DIR: "{{ kube_token_dir }}"
   with_nested:
     - [ 'system:kubelet' ]
-    - "{{ groups['kube-node'] }}"
+    - "{{ groups['kube_node'] }}"
   register: gentoken_node
   changed_when: "'Added' in gentoken_node.stdout"
   run_once: yes
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 6ca0b9844805a2a2fe5cd4c92cf2c31e14d81025..edafbe56745e6621c4b7be38646185985061d533 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -147,8 +147,8 @@ kube_log_level: 2
 kube_network_plugin: calico
 kube_network_plugin_multus: false
 
-# Determines if calico-rr group exists
-peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
+# Determines if calico_rr group exists
+peer_with_calico_rr: "{{ 'calico_rr' in groups and groups['calico_rr']|length > 0 }}"
 
 # Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
 calico_datastore: "kdd"
diff --git a/roles/kubespray-defaults/tasks/fallback_ips.yml b/roles/kubespray-defaults/tasks/fallback_ips.yml
index 291bd3fccfe7e4a7edc6457f7718d6ee982d79d3..acca31c0cb6b248240610d8b772d71d161435d5a 100644
--- a/roles/kubespray-defaults/tasks/fallback_ips.yml
+++ b/roles/kubespray-defaults/tasks/fallback_ips.yml
@@ -7,7 +7,7 @@
   tags: always
   include_tasks: fallback_ips_gather.yml
   when: hostvars[delegate_host_to_gather_facts].ansible_default_ipv4 is not defined
-  loop: "{{ groups['k8s-cluster']|default([]) + groups['etcd']|default([]) + groups['calico-rr']|default([]) }}"
+  loop: "{{ groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([]) }}"
   loop_control:
     loop_var: delegate_host_to_gather_facts
   run_once: yes
@@ -16,7 +16,7 @@
   set_fact:
     fallback_ips_base: |
       ---
-      {% for item in (groups['k8s-cluster']|default([]) + groups['etcd']|default([]) + groups['calico-rr']|default([]))|unique %}
+      {% for item in (groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique %}
       {% set found = hostvars[item].get('ansible_default_ipv4') %}
       {{ item }}: "{{ found.get('address', '127.0.0.1') }}"
       {% endfor %}
diff --git a/roles/kubespray-defaults/tasks/no_proxy.yml b/roles/kubespray-defaults/tasks/no_proxy.yml
index 984bb50a282d46eb30b9da397b989cb65ca6c7c1..6e6a5c9bbc7b02a55eeec541523cd1b78472c967 100644
--- a/roles/kubespray-defaults/tasks/no_proxy.yml
+++ b/roles/kubespray-defaults/tasks/no_proxy.yml
@@ -9,9 +9,9 @@
       {%- if no_proxy_exclude_workers | default(false) -%}
       {% set cluster_or_master = 'kube_control_plane' %}
       {%- else -%}
-      {% set cluster_or_master = 'k8s-cluster' %}
+      {% set cluster_or_master = 'k8s_cluster' %}
       {%- endif -%}
-      {%- for item in (groups[cluster_or_master] + groups['etcd']|default([]) + groups['calico-rr']|default([]))|unique -%}
+      {%- for item in (groups[cluster_or_master] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%}
       {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }},
       {%-   if item != hostvars[item].get('ansible_hostname', '') -%}
       {{ hostvars[item]['ansible_hostname'] }},
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index 3dabd56cad2749d82e77b0c799e4d70a43557310..fb202611f3c504e4dcdd475a548d99dd416361c3 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -193,7 +193,7 @@
     nodeToNodeMeshEnabled: "false"
   when:
     - peer_with_router|default(false) or peer_with_calico_rr|default(false)
-    - inventory_hostname in groups['k8s-cluster']
+    - inventory_hostname in groups['k8s_cluster']
   run_once: yes
 
 - name: Calico | Set up BGP Configuration
@@ -264,7 +264,7 @@
   until: output.rc == 0
   delay: "{{ retry_stagger | random + 3 }}"
   with_items:
-    - "{{ groups['calico-rr'] | default([]) }}"
+    - "{{ groups['calico_rr'] | default([]) }}"
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
     - peer_with_calico_rr|default(false)
@@ -290,7 +290,7 @@
   until: output.rc == 0
   delay: "{{ retry_stagger | random + 3 }}"
   with_items:
-    - "{{ groups['calico-rr'] | default([]) }}"
+    - "{{ groups['calico_rr'] | default([]) }}"
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
     - peer_with_calico_rr|default(false)
@@ -368,9 +368,9 @@
   delay: "{{ retry_stagger | random + 3 }}"
   when:
     - peer_with_router|default(false)
-    - inventory_hostname in groups['k8s-cluster']
+    - inventory_hostname in groups['k8s_cluster']
     - local_as is defined
-    - groups['calico-rr'] | default([]) | length == 0
+    - groups['calico_rr'] | default([]) | length == 0
 
 - name: Calico | Configure peering with router(s) at node scope
   command:
@@ -396,4 +396,4 @@
     - "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}"
   when:
     - peer_with_router|default(false)
-    - inventory_hostname in groups['k8s-cluster']
+    - inventory_hostname in groups['k8s_cluster']
diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2
index f13576ffc4d12ea0a06478c203211d16d7f02a9a..b3645d2d60639e232371f1f312595de727bc39fb 100644
--- a/roles/network_plugin/calico/templates/calico-config.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-config.yml.j2
@@ -22,6 +22,6 @@ data:
   cluster_type: "kubespray,bgp"
   calico_backend: "bird"
 {% endif %}
-{% if inventory_hostname in groups['k8s-cluster'] and peer_with_router|default(false) %}
+{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router|default(false) %}
   as: "{{ local_as|default(global_as_num) }}"
 {% endif -%}
diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml
index 6be517bc45082c26325b3891d2b6a14042ee959a..30190124d13e0fc240a2a5811c5401ec1b570f72 100644
--- a/roles/network_plugin/kube-router/tasks/annotate.yml
+++ b/roles/network_plugin/kube-router/tasks/annotate.yml
@@ -6,16 +6,16 @@
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane']
 
-- name: kube-router | Add annotations on kube-node
+- name: kube-router | Add annotations on kube_node
   command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_node }}"
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
-  when: kube_router_annotations_node is defined and inventory_hostname in groups['kube-node']
+  when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node']
 
 - name: kube-router | Add common annotations on all servers
   command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
   with_items:
   - "{{ kube_router_annotations_all }}"
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
-  when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s-cluster']
+  when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s_cluster']
diff --git a/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2
index 51b9ff51f757cc543320bac54d1c48f91a32124d..60400dd49172e4d827b5b38965d6007a4e45bbca 100644
--- a/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2
+++ b/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2
@@ -1,4 +1,4 @@
-{% for host in groups['kube-node'] %}
+{% for host in groups['kube_node'] %}
 {% if hostvars[host]['access_ip'] is defined  %}
 {% if hostvars[host]['node_pod_cidr'] != node_pod_cidr  %}
 {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }}
diff --git a/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2
index ea96cb4048e2e3ea0fa47c18f756e9b90cb7f577..696eba50110769e8629818c3e9be1589a86db048 100644
--- a/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2
+++ b/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2
@@ -4,7 +4,7 @@ Name=mac0
 [Network]
 Address={{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }}/{{ node_pod_cidr|ipaddr('prefix') }}
 
-{% for host in groups['kube-node'] %}
+{% for host in groups['kube_node'] %}
 {% if hostvars[host]['access_ip'] is defined  %}
 {% if hostvars[host]['node_pod_cidr'] != node_pod_cidr  %}
 [Route]
diff --git a/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2
index 0f2cbc15f2aca447e8187e84b379f3c630c47218..9edd6d1577d4548bf966f64f83b0a03edceedbda 100644
--- a/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2
+++ b/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2
@@ -5,7 +5,7 @@ iface mac0 inet static
     netmask {{ node_pod_cidr|ipaddr('netmask') }}
     broadcast {{ node_pod_cidr|ipaddr('broadcast') }}
     pre-up ip link add link {{ macvlan_interface }} mac0 type macvlan mode bridge
-{% for host in groups['kube-node'] %}
+{% for host in groups['kube_node'] %}
 {% if hostvars[host]['access_ip'] is defined  %}
 {% if hostvars[host]['node_pod_cidr'] != node_pod_cidr  %}
     post-up ip route add {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }}
@@ -15,7 +15,7 @@ iface mac0 inet static
 {% if enable_nat_default_gateway %}
     post-up iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE
 {% endif %}
-{% for host in groups['kube-node'] %}
+{% for host in groups['kube_node'] %}
 {% if hostvars[host]['access_ip'] is defined  %}
 {% if hostvars[host]['node_pod_cidr'] != node_pod_cidr  %}
     post-down ip route del {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }}
diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml
index 541d1b4414f5855fee8a7b2d2e85db77ee56244f..2cafaeb7fef53c6dfde10f5f74d2fe609c8fac9e 100644
--- a/roles/remove-node/pre-remove/tasks/main.yml
+++ b/roles/remove-node/pre-remove/tasks/main.yml
@@ -15,7 +15,7 @@
       --grace-period {{ drain_grace_period }}
       --timeout {{ drain_timeout }}
       --delete-local-data {{ hostvars[item]['kube_override_hostname']|default(item) }}
-  loop: "{{ node.split(',') | default(groups['kube-node']) }}"
+  loop: "{{ node.split(',') | default(groups['kube_node']) }}"
   # ignore servers that are not nodes
   when: hostvars[item]['kube_override_hostname']|default(item) in nodes.stdout_lines
   register: result
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index d216cd5ceb4dcbb4a0c03aee3497b25388a2c5b8..735dd4c320b62906e17814cd6ab55d2cd53f0cb2 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -207,7 +207,7 @@
 - name: Clear IPVS virtual server table
   command: "ipvsadm -C"
   when:
-    - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster']
+    - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster']
 
 - name: reset | check kube-ipvs0 network device
   stat:
diff --git a/scale.yml b/scale.yml
index caecfef704907cb6665282a36c4ea7628053ea8e..5e218791a5e6e5e3fced0342c34b49e28b8dcf91 100644
--- a/scale.yml
+++ b/scale.yml
@@ -2,14 +2,8 @@
 - name: Check ansible version
   import_playbook: ansible_version.yml
 
-- name: Add kube-master nodes to kube_control_plane
-  # This is for old inventory which contains kube-master instead of kube_control_plane
-  hosts: kube-master
-  gather_facts: false
-  tasks:
-    - name: add nodes to kube_control_plane group
-      group_by:
-        key: 'kube_control_plane'
+- name: Ensure compatibility with old groups
+  import_playbook: legacy_groups.yml
 
 - hosts: bastion[0]
   gather_facts: False
@@ -19,7 +13,7 @@
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
 
 - name: Bootstrap any new workers
-  hosts: kube-node
+  hosts: kube_node
   strategy: linear
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   gather_facts: false
@@ -52,7 +46,7 @@
     - { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
 
 - name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
-  hosts: kube-node
+  hosts: kube_node
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -64,7 +58,7 @@
     - { role: etcd, tags: etcd, etcd_cluster_setup: false, when: "not etcd_kubeadm_enabled|default(false)" }
 
 - name: Target only workers to get kubelet installed and checking in on any new nodes(node)
-  hosts: kube-node
+  hosts: kube_node
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -95,7 +89,7 @@
       when: kubeadm_certificate_key is not defined
 
 - name: Target only workers to get kubelet installed and checking in on any new nodes(network)
-  hosts: kube-node
+  hosts: kube_node
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
diff --git a/tests/cloud_playbooks/delete-aws.yml b/tests/cloud_playbooks/delete-aws.yml
index b72caf0ee75c045aa493cf99797ced272b33d4ef..02f9b06c7ef70b49263306081e87c78c433820c5 100644
--- a/tests/cloud_playbooks/delete-aws.yml
+++ b/tests/cloud_playbooks/delete-aws.yml
@@ -1,5 +1,5 @@
 ---
-- hosts: kube-node
+- hosts: kube_node
   become: False
 
   tasks:
diff --git a/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 b/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2
index 4158788c944957bfe22e28c13adef3333415de03..1ead107a036ba8f4bd46b6605cdd038540a586d3 100644
--- a/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2
+++ b/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2
@@ -11,9 +11,13 @@ instance-1
 [kube-master]
 instance-1
 
+# TODO(cristicalin): Remove kube-node,k8s-cluster groups from this file after releasing v2.16.
 [kube-node]
 instance-2
 
+[kube_node]
+instance-2
+
 [etcd]
 instance-3
 {% elif mode is defined and mode in ["ha", "ha-scale"] %}
@@ -28,6 +32,9 @@ instance-2
 [kube-node]
 instance-3
 
+[kube_node]
+instance-3
+
 [etcd]
 instance-1
 instance-2
@@ -42,6 +49,9 @@ instance-1
 [kube-node]
 instance-2
 
+[kube_node]
+instance-2
+
 [etcd]
 instance-1
 {% elif mode == "aio" %}
@@ -54,6 +64,9 @@ instance-1
 [kube-node]
 instance-1
 
+[kube_node]
+instance-1
+
 [etcd]
 instance-1
 {% elif mode == "ha-recover" %}
@@ -68,6 +81,9 @@ instance-2
 [kube-node]
 instance-3
 
+[kube_node]
+instance-3
+
 [etcd]
 instance-3
 instance-1
@@ -92,6 +108,9 @@ instance-2
 [kube-node]
 instance-3
 
+[kube_node]
+instance-3
+
 [etcd]
 instance-3
 instance-1
@@ -111,6 +130,11 @@ kube-node
 kube-master
 calico-rr
 
-[calico-rr]
+[k8s_cluster:children]
+kube_node
+kube_master
+calico_rr
+
+[calico_rr]
 
 [fake_hosts]
diff --git a/tests/templates/inventory-aws.j2 b/tests/templates/inventory-aws.j2
index 538f1bdaca6e7c45747ca7d3c1366a74838dfe1c..e3c5373f2d34e07fff7d36db1824ca9252bf98e9 100644
--- a/tests/templates/inventory-aws.j2
+++ b/tests/templates/inventory-aws.j2
@@ -6,7 +6,7 @@ node3 ansible_ssh_host={{ec2.instances[2].public_ip}} ansible_ssh_user={{ssh_use
 node1
 node2
 
-[kube-node]
+[kube_node]
 node1
 node2
 node3
@@ -15,12 +15,12 @@ node3
 node1
 node2
 
-[k8s-cluster:children]
-kube-node
+[k8s_cluster:children]
+kube_node
 kube_control_plane
-calico-rr
+calico_rr
 
-[calico-rr]
+[calico_rr]
 
 [broken_kube_control_plane]
 node2
diff --git a/tests/templates/inventory-do.j2 b/tests/templates/inventory-do.j2
index c24d40180bb1cea53419edcdd8abb503a0b17da2..fb543612358e812c411d1270eb30ae6a196c679c 100644
--- a/tests/templates/inventory-do.j2
+++ b/tests/templates/inventory-do.j2
@@ -6,7 +6,7 @@
 [kube_control_plane]
 {{droplets.results[0].droplet.name}}
 
-[kube-node]
+[kube_node]
 {{droplets.results[1].droplet.name}}
 
 [etcd]
@@ -16,7 +16,7 @@
 {{droplets.results[0].droplet.name}}
 {{droplets.results[1].droplet.name}}
 
-[kube-node]
+[kube_node]
 {{droplets.results[2].droplet.name}}
 
 [etcd]
@@ -32,16 +32,16 @@
 [kube_control_plane]
 {{droplets.results[0].droplet.name}}
 
-[kube-node]
+[kube_node]
 {{droplets.results[1].droplet.name}}
 
 [etcd]
 {{droplets.results[0].droplet.name}}
 {% endif %}
 
-[calico-rr]
+[calico_rr]
 
-[k8s-cluster:children]
-kube-node
+[k8s_cluster:children]
+kube_node
 kube_control_plane
-calico-rr
+calico_rr
diff --git a/tests/templates/inventory-gce.j2 b/tests/templates/inventory-gce.j2
index e1e0bc451fe4d765458c2ad867ca3a0cbacb404c..33e9bbc733f41bd70b6318d286286a3b3610596e 100644
--- a/tests/templates/inventory-gce.j2
+++ b/tests/templates/inventory-gce.j2
@@ -12,7 +12,7 @@
 [kube_control_plane]
 {{node1}}
 
-[kube-node]
+[kube_node]
 {{node2}}
 
 [etcd]
@@ -23,7 +23,7 @@
 {{node1}}
 {{node2}}
 
-[kube-node]
+[kube_node]
 {{node3}}
 
 [etcd]
@@ -41,7 +41,7 @@
 [kube_control_plane]
 {{node1}}
 
-[kube-node]
+[kube_node]
 {{node2}}
 
 [etcd]
@@ -50,24 +50,24 @@
 [kube_control_plane]
 {{node1}}
 
-[kube-node]
+[kube_node]
 {{node1}}
 
 [etcd]
 {{node1}}
 {% endif %}
 
-[k8s-cluster:children]
-kube-node
+[k8s_cluster:children]
+kube_node
 kube_control_plane
-calico-rr
+calico_rr
 
-[calico-rr]
+[calico_rr]
 
 {% if mode is defined and mode in ["scale", "separate-scale", "ha-scale"] %}
 [fake_hosts]
 fake_scale_host[1:200]
 
-[kube-node:children]
+[kube_node:children]
 fake_hosts
 {% endif %}
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index 174c9750c797ba8d17e983d993a0bad7c870792b..18cf6daf10004df0649e52dda99eb295772c74f3 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -1,5 +1,5 @@
 ---
-- hosts: kube-node
+- hosts: kube_node
   tasks:
     - name: Test tunl0 routes
       shell: "set -o pipefail && ! /sbin/ip ro | grep '/26 via' | grep -v tunl0"
@@ -9,7 +9,7 @@
         - (ipip|default(true) or cloud_provider is defined)
         - kube_network_plugin|default('calico') == 'calico'
 
-- hosts: k8s-cluster
+- hosts: k8s_cluster
   vars:
     agent_report_interval: 10
     netcheck_namespace: default
@@ -44,7 +44,7 @@
       args:
         executable: /bin/bash
       register: nca_pod
-      until: nca_pod.stdout_lines|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2
+      until: nca_pod.stdout_lines|length >= groups['k8s_cluster']|intersect(ansible_play_hosts)|length * 2
       retries: 3
       delay: 10
       failed_when: false
@@ -76,7 +76,7 @@
       delay: "{{ agent_report_interval }}"
       until: agents.content|length > 0 and
         agents.content[0] == '{' and
-        agents.content|from_json|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2
+        agents.content|from_json|length >= groups['k8s_cluster']|intersect(ansible_play_hosts)|length * 2
       failed_when: false
       no_log: true
 
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index 6fd30537b260377d1edcf832d38bee7b31a2f6d8..5b6d7b207882d87f37db5207d6be35e6b5eacfa7 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -2,14 +2,8 @@
 - name: Check ansible version
   import_playbook: ansible_version.yml
 
-- name: Add kube-master nodes to kube_control_plane
-  # This is for old inventory which contains kube-master instead of kube_control_plane
-  hosts: kube-master
-  gather_facts: false
-  tasks:
-    - name: add nodes to kube_control_plane group
-      group_by:
-        key: 'kube_control_plane'
+- name: Ensure compatibility with old groups
+  import_playbook: legacy_groups.yml
 
 - hosts: bastion[0]
   gather_facts: False
@@ -18,7 +12,7 @@
     - { role: kubespray-defaults }
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
 
-- hosts: k8s-cluster:etcd:calico-rr
+- hosts: k8s_cluster:etcd:calico_rr
   strategy: linear
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   gather_facts: false
@@ -46,7 +40,7 @@
     - { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
 
 - name: Prepare nodes for upgrade
-  hosts: k8s-cluster:etcd:calico-rr
+  hosts: k8s_cluster:etcd:calico_rr
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -56,7 +50,7 @@
     - { role: download, tags: download, when: "not skip_downloads" }
 
 - name: Upgrade container engine on non-cluster nodes
-  hosts: etcd:calico-rr:!k8s-cluster
+  hosts: etcd:calico_rr:!k8s_cluster
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -78,7 +72,7 @@
         etcd_events_cluster_setup: false
       when: not etcd_kubeadm_enabled | default(false)
 
-- hosts: k8s-cluster
+- hosts: k8s_cluster
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -110,7 +104,7 @@
     - { role: upgrade/post-upgrade, tags: post-upgrade }
 
 - name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
-  hosts: kube_control_plane:calico-rr:kube-node
+  hosts: kube_control_plane:calico_rr:kube_node
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
@@ -123,7 +117,7 @@
     - { role: kubernetes-apps/policy_controller, tags: policy-controller }
 
 - name: Finally handle worker upgrades, based on given batch size
-  hosts: kube-node:calico-rr:!kube_control_plane
+  hosts: kube_node:calico_rr:!kube_control_plane
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -145,7 +139,7 @@
     - { role: kubespray-defaults }
     - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
 
-- hosts: calico-rr
+- hosts: calico_rr
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -161,7 +155,7 @@
     - { role: kubespray-defaults }
     - { role: kubernetes-apps, tags: apps }
 
-- hosts: k8s-cluster
+- hosts: k8s_cluster
   gather_facts: False
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"