diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md
index 5e88849f570116766917ae3bf6c211046607b3ad..cf5391e96a3e617d50fa6b99287eff04b20b7823 100644
--- a/contrib/terraform/openstack/README.md
+++ b/contrib/terraform/openstack/README.md
@@ -60,17 +60,17 @@ You can create many different kubernetes topologies by setting the number of
 different classes of hosts. For each class there are options for allocating
 floating IP addresses or not.
 
-- Master nodes with etcd
-- Master nodes without etcd
+- Control plane nodes with etcd
+- Control plane nodes without etcd
 - Standalone etcd hosts
 - Kubernetes worker nodes
 
 Note that the Ansible script will report an invalid configuration if you wind up
 with an even number of etcd instances since that is not a valid configuration. This
 restriction includes standalone etcd nodes that are deployed in a cluster along with
-master nodes with etcd replicas. As an example, if you have three master nodes with
-etcd replicas and three standalone etcd nodes, the script will fail since there are
-now six total etcd replicas.
+control plane nodes with etcd replicas. As an example, if you have three control plane
+nodes with etcd replicas and three standalone etcd nodes, the script will fail since
+there are now six total etcd replicas.
 
 ### GlusterFS shared file system
 
diff --git a/docs/ansible/ansible.md b/docs/ansible/ansible.md
index 40d52f9c3345e626a31573a016268712147e2c4b..7871300ff2606ded700214bfd8150123f568f224 100644
--- a/docs/ansible/ansible.md
+++ b/docs/ansible/ansible.md
@@ -155,6 +155,7 @@ The following tags are defined in playbooks:
 | container_engine_accelerator   | Enable nvidia accelerator for runtimes                |
 | container-engine               | Configuring container engines                         |
 | container-runtimes             | Configuring container runtimes                        |
+| control-plane                  | Configuring K8s control plane node role               |
 | coredns                        | Configuring coredns deployment                        |
 | crio                           | Configuring crio container engine for hosts           |
 | crun                           | Configuring crun runtime                              |
@@ -199,7 +200,7 @@ The following tags are defined in playbooks:
 | local-path-provisioner         | Configure External provisioner: local-path            |
 | local-volume-provisioner       | Configure External provisioner: local-volume          |
 | macvlan                        | Network plugin macvlan                                |
-| master                         | Configuring K8s master node role                      |
+| master (DEPRECATED)            | Deprecated - see `control-plane`                      |
 | metallb                        | Installing and configuring metallb                    |
 | metrics_server                 | Configuring metrics_server                            |
 | netchecker                     | Installing netchecker K8s app                         |
@@ -210,7 +211,7 @@ The following tags are defined in playbooks:
 | node                           | Configuring K8s minion (compute) node role            |
 | nodelocaldns                   | Configuring nodelocaldns daemonset                    |
 | node-label                     | Tasks linked to labeling of nodes                     |
-| node-webhook                   | Tasks linked to webhook (grating access to resources) |
+| node-webhook                   | Tasks linked to webhook (granting access to resources)|
 | nvidia_gpu                     | Enable nvidia accelerator for runtimes                |
 | oci                            | Cloud provider: oci                                   |
 | persistent_volumes             | Configure csi volumes                                 |
diff --git a/docs/operations/etcd.md b/docs/operations/etcd.md
index 2efc85ce88f31feecf0deb4bd0f578ae89cb833f..94434ebeb2e026980b9af31aa580d9724ebc5875 100644
--- a/docs/operations/etcd.md
+++ b/docs/operations/etcd.md
@@ -14,7 +14,7 @@ Installs docker in etcd group members and runs etcd on docker containers. Only u
 
 ### Kubeadm
 
-This deployment method is experimental and is only available for new deployments. This deploys etcd as a static pod in master hosts.
+This deployment method is experimental and is only available for new deployments. This deploys etcd as a static pod on control plane hosts.
 
 ## Metrics
 
diff --git a/extra_playbooks/migrate_openstack_provider.yml b/extra_playbooks/migrate_openstack_provider.yml
index bba3d918c1f8deb8338e1ab092b4188118041cf7..002fa715597f9a78ead2d3e8eed716fa6232121f 100644
--- a/extra_playbooks/migrate_openstack_provider.yml
+++ b/extra_playbooks/migrate_openstack_provider.yml
@@ -13,7 +13,7 @@
   tasks:
     - name: Include kubespray-default variables
       include_vars: ../roles/kubespray-defaults/defaults/main/main.yml
-    - name: Copy get_cinder_pvs.sh to master
+    - name: Copy get_cinder_pvs.sh to first control plane node
       copy:
         src: get_cinder_pvs.sh
         dest: /tmp
diff --git a/extra_playbooks/upgrade-only-k8s.yml b/extra_playbooks/upgrade-only-k8s.yml
index 5f396fa28ebf64554eb033f7676a9ab1eac6efa2..5556f2674830bc243500339c357013636990f633 100644
--- a/extra_playbooks/upgrade-only-k8s.yml
+++ b/extra_playbooks/upgrade-only-k8s.yml
@@ -36,7 +36,7 @@
     - { role: kubespray-defaults}
     - { role: kubernetes/preinstall, tags: preinstall }
 
-- name: Handle upgrades to master components first to maintain backwards compat.
+- name: Handle upgrades to control plane components first to maintain backwards compat.
   hosts: kube_control_plane
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: 1
diff --git a/inventory/sample/group_vars/all/all.yml b/inventory/sample/group_vars/all/all.yml
index c7f762870027f4422ef2800c7faba2faece3082c..5ada833c109a05659dba9b4ede3dc4645b86e2b2 100644
--- a/inventory/sample/group_vars/all/all.yml
+++ b/inventory/sample/group_vars/all/all.yml
@@ -75,8 +75,8 @@ loadbalancer_apiserver_healthcheck_port: 8081
 # skip_http_proxy_on_os_packages: false
 
 ## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
-## pods will restart) when adding or removing workers.  To override this behaviour by only including master nodes in the
-## no_proxy variable, set below to true:
+## pods will restart) when adding or removing workers.  To override this behaviour by only including control plane nodes
+## in the no_proxy variable, set below to true:
 no_proxy_exclude_workers: false
 
 ## Certificate Management
diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml
index a352e4cf6836cd3b14697a0bd533501f3a0436c7..522ddc589032470146af9379f3e4eea127eeeefe 100644
--- a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml
@@ -272,7 +272,7 @@ default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
 # kube_cpu_reserved: 100m
 # kube_ephemeral_storage_reserved: 2Gi
 # kube_pid_reserved: "1000"
-# Reservation for master hosts
+# Reservation for control plane hosts
 # kube_master_memory_reserved: 512Mi
 # kube_master_cpu_reserved: 200m
 # kube_master_ephemeral_storage_reserved: 2Gi
diff --git a/playbooks/remove_node.yml b/playbooks/remove_node.yml
index f994dae43bb50283b00b81590d9beba5d04066d2..469b326325ac443c26c2250e3352852305c3ffb9 100644
--- a/playbooks/remove_node.yml
+++ b/playbooks/remove_node.yml
@@ -33,7 +33,7 @@
     - { role: remove-node/remove-etcd-node }
     - { role: reset, tags: reset, when: reset_nodes | default(True) | bool }
 
-# Currently cannot remove first master or etcd
+# Currently cannot remove first control plane node or first etcd node
 - name: Post node removal
   hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
   gather_facts: false
diff --git a/playbooks/upgrade_cluster.yml b/playbooks/upgrade_cluster.yml
index 99511a8206f78dd8281ecf1553f1ad214a873f4d..861c026f7d6eee3411eb4089411c90849f03f40e 100644
--- a/playbooks/upgrade_cluster.yml
+++ b/playbooks/upgrade_cluster.yml
@@ -38,7 +38,7 @@
 - name: Install etcd
   import_playbook: install_etcd.yml
 
-- name: Handle upgrades to master components first to maintain backwards compat.
+- name: Handle upgrades to control plane components first to maintain backwards compat.
   gather_facts: false
   hosts: kube_control_plane
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@@ -60,7 +60,7 @@
     - { role: kubernetes-apps, tags: csi-driver }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
 
-- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
+- name: Upgrade calico and external cloud provider on all control plane nodes, calico-rrs, and nodes
   hosts: kube_control_plane:calico_rr:kube_node
   gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
index 9c8b8a82f165d86c86216ba26389e5eb75888fc6..62c56dee81e16a2a37b05f66af487386b1435c71 100644
--- a/roles/etcd/handlers/main.yml
+++ b/roles/etcd/handlers/main.yml
@@ -13,19 +13,19 @@
   service:
     name: etcd
     state: restarted
-  when: is_etcd_master
+  when: ('etcd' in group_names)
   listen: Restart etcd
 
 - name: Reload etcd-events
   service:
     name: etcd-events
     state: restarted
-  when: is_etcd_master
+  when: ('etcd' in group_names)
   listen: Restart etcd-events
 
 - name: Wait for etcd up
   uri:
-    url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
+    url: "https://{% if 'etcd' in group_names %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
     validate_certs: false
     client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
     client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
@@ -40,7 +40,7 @@
 
 - name: Wait for etcd-events up
   uri:
-    url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
+    url: "https://{% if 'etcd' in group_names %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
     validate_certs: false
     client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
     client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml
index b7b943f0d10696d984818d7af0a948d11f0dd162..58383fa1fe96125a3e0fc8a8b05caec019f1e27e 100644
--- a/roles/etcd/tasks/configure.yml
+++ b/roles/etcd/tasks/configure.yml
@@ -9,7 +9,7 @@
   check_mode: false
   run_once: true
   when:
-    - is_etcd_master
+    - ('etcd' in group_names)
     - etcd_cluster_setup
   tags:
     - facts
@@ -30,7 +30,7 @@
   check_mode: false
   run_once: true
   when:
-    - is_etcd_master
+    - ('etcd' in group_names)
     - etcd_events_cluster_setup
   tags:
     - facts
@@ -43,7 +43,7 @@
 
 - name: Configure | Refresh etcd config
   include_tasks: refresh_config.yml
-  when: is_etcd_master
+  when: ('etcd' in group_names)
 
 - name: Configure | Copy etcd.service systemd file
   template:
@@ -54,7 +54,9 @@
     # FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
     # Remove once we drop support for systemd < 250
     validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:etcd-{{ etcd_deployment_type }}.service'"
-  when: is_etcd_master and etcd_cluster_setup
+  when:
+    - ('etcd' in group_names)
+    - etcd_cluster_setup
 
 - name: Configure | Copy etcd-events.service systemd file
   template:
@@ -65,12 +67,14 @@
     validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:etcd-events-{{ etcd_deployment_type }}.service'"
     # FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
     # Remove once we drop support for systemd < 250
-  when: is_etcd_master and etcd_events_cluster_setup
+  when:
+    - ('etcd' in group_names)
+    - etcd_events_cluster_setup
 
 - name: Configure | reload systemd
   systemd_service:
     daemon_reload: true
-  when: is_etcd_master
+  when: ('etcd' in group_names)
 
 # when scaling new etcd will fail to start
 - name: Configure | Ensure etcd is running
@@ -79,7 +83,9 @@
     state: started
     enabled: true
   ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}"  # noqa ignore-errors
-  when: is_etcd_master and etcd_cluster_setup
+  when:
+    - ('etcd' in group_names)
+    - etcd_cluster_setup
 
 # when scaling new etcd will fail to start
 - name: Configure | Ensure etcd-events is running
@@ -88,7 +94,9 @@
     state: started
     enabled: true
   ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}"  # noqa ignore-errors
-  when: is_etcd_master and etcd_events_cluster_setup
+  when:
+    - ('etcd' in group_names)
+    - etcd_events_cluster_setup
 
 - name: Configure | Wait for etcd cluster to be healthy
   shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null"
@@ -102,7 +110,7 @@
   check_mode: false
   run_once: true
   when:
-    - is_etcd_master
+    - ('etcd' in group_names)
     - etcd_cluster_setup
   tags:
     - facts
@@ -125,7 +133,7 @@
   check_mode: false
   run_once: true
   when:
-    - is_etcd_master
+    - ('etcd' in group_names)
     - etcd_events_cluster_setup
   tags:
     - facts
@@ -142,7 +150,9 @@
   ignore_errors: true  # noqa ignore-errors
   changed_when: false
   check_mode: false
-  when: is_etcd_master and etcd_cluster_setup
+  when:
+    - ('etcd' in group_names)
+    - etcd_cluster_setup
   tags:
     - facts
   environment:
@@ -158,7 +168,9 @@
   ignore_errors: true  # noqa ignore-errors
   changed_when: false
   check_mode: false
-  when: is_etcd_master and etcd_events_cluster_setup
+  when:
+    - ('etcd' in group_names)
+    - etcd_events_cluster_setup
   tags:
     - facts
   environment:
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 74d5f16d3992c6644cb34ed09fb2f8e2060d6e6f..b6e3658840e623e2cbad6a55bbc49cfab11bd2c1 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -16,7 +16,7 @@
 - name: Trust etcd CA
   include_tasks: upd_ca_trust.yml
   when:
-    - inventory_hostname in groups['etcd'] | union(groups['kube_control_plane']) | unique | sort
+    - ('etcd' in group_names) or ('kube_control_plane' in group_names)
   tags:
     - etcd-secrets
 
@@ -39,7 +39,8 @@
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
     - inventory_hostname in groups['k8s_cluster']
   tags:
-    - master
+    - master    # master tag is deprecated and replaced by control-plane
+    - control-plane
     - network
 
 - name: Set etcd_client_cert_serial
@@ -50,7 +51,8 @@
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
     - inventory_hostname in groups['k8s_cluster']
   tags:
-    - master
+    - master    # master tag is deprecated and replaced by control-plane
+    - control-plane
     - network
 
 - name: Install etcdctl and etcdutl binary
@@ -61,36 +63,42 @@
     - etcdutl
     - upgrade
   when:
-    - inventory_hostname in groups['etcd']
+    - ('etcd' in group_names)
     - etcd_cluster_setup
 
 - name: Install etcd
   include_tasks: "install_{{ etcd_deployment_type }}.yml"
-  when: is_etcd_master
+  when: ('etcd' in group_names)
   tags:
     - upgrade
 
 - name: Configure etcd
   include_tasks: configure.yml
-  when: is_etcd_master
+  when: ('etcd' in group_names)
 
 - name: Refresh etcd config
   include_tasks: refresh_config.yml
-  when: is_etcd_master
+  when: ('etcd' in group_names)
 
 - name: Restart etcd if certs changed
   command: /bin/true
   notify: Restart etcd
-  when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed | default(false)
+  when:
+    - ('etcd' in group_names)
+    - etcd_cluster_setup
+    - etcd_secret_changed | default(false)
 
 - name: Restart etcd-events if certs changed
   command: /bin/true
   notify: Restart etcd
-  when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed | default(false)
+  when:
+    - ('etcd' in group_names)
+    - etcd_events_cluster_setup
+    - etcd_secret_changed | default(false)
 
 # After etcd cluster is assembled, make sure that
 # initial state of the cluster is in `existing`
 # state instead of `new`.
 - name: Refresh etcd config again for idempotency
   include_tasks: refresh_config.yml
-  when: is_etcd_master
+  when: ('etcd' in group_names)
diff --git a/roles/etcd/tasks/refresh_config.yml b/roles/etcd/tasks/refresh_config.yml
index effebbddbef4dcf04348af4a690f3f76621f50e4..ff0f8a622c3c189c991218df679a6760bd6edfb2 100644
--- a/roles/etcd/tasks/refresh_config.yml
+++ b/roles/etcd/tasks/refresh_config.yml
@@ -5,7 +5,9 @@
     dest: /etc/etcd.env
     mode: "0640"
   notify: Restart etcd
-  when: is_etcd_master and etcd_cluster_setup
+  when:
+    - ('etcd' in group_names)
+    - etcd_cluster_setup
 
 - name: Refresh config | Create etcd-events config file
   template:
@@ -13,4 +15,6 @@
     dest: /etc/etcd-events.env
     mode: "0640"
   notify: Restart etcd-events
-  when: is_etcd_master and etcd_events_cluster_setup
+  when:
+    - ('etcd' in group_names)
+    - etcd_events_cluster_setup
diff --git a/roles/kubernetes-apps/metrics_server/tasks/main.yml b/roles/kubernetes-apps/metrics_server/tasks/main.yml
index 3517686cb7e319aa987b2140831a36480d666f68..9c4e7cb7ca1e80601ab94a19fd35c499e1cef95c 100644
--- a/roles/kubernetes-apps/metrics_server/tasks/main.yml
+++ b/roles/kubernetes-apps/metrics_server/tasks/main.yml
@@ -1,8 +1,8 @@
 ---
-# If all masters have node role, there are no tainted master and toleration should not be specified.
-- name: Check all masters are node or not
+# If all control plane nodes have the node role, there are no tainted control plane nodes and toleration should not be specified.
+- name: Check all control plane nodes are node or not
   set_fact:
-    masters_are_not_tainted: "{{ groups['kube_node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
+    control_plane_nodes_are_not_tainted: "{{ groups['kube_node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
 
 - name: Metrics Server | Delete addon dir
   file:
diff --git a/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 b/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2
index ce121fe154e89e7cb5be6a22c5502090ff0d06c3..746af17ad172edf6405224bebd75c732b0040f66 100644
--- a/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2
+++ b/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2
@@ -85,9 +85,9 @@ spec:
       volumes:
         - name: tmp
           emptyDir: {}
-{% if not masters_are_not_tainted or metrics_server_extra_tolerations is defined %}
+{% if not control_plane_nodes_are_not_tainted or metrics_server_extra_tolerations is defined %}
       tolerations:
-{% if not masters_are_not_tainted %}
+{% if not control_plane_nodes_are_not_tainted %}
         - key: node-role.kubernetes.io/control-plane
           effect: NoSchedule
 {% endif %}
diff --git a/roles/kubernetes/control-plane/defaults/main/main.yml b/roles/kubernetes/control-plane/defaults/main/main.yml
index df92c419be745732e580a2639b4f6a721e5c8ffc..30b71b14971f9ccddd484f28db98a3a89168a901 100644
--- a/roles/kubernetes/control-plane/defaults/main/main.yml
+++ b/roles/kubernetes/control-plane/defaults/main/main.yml
@@ -5,7 +5,7 @@ upgrade_cluster_setup: false
 # By default the external API listens on all interfaces, this can be changed to
 # listen on a specific address/interface.
 # NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
-# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
+# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
 kube_apiserver_bind_address: 0.0.0.0
 
 # A port range to reserve for services with NodePort visibility.
@@ -38,7 +38,7 @@ kube_controller_manager_leader_elect_renew_deadline: 10s
 # discovery_timeout modifies the discovery timeout
 discovery_timeout: 5m0s
 
-# Instruct first master to refresh kubeadm token
+# Instruct first control plane node to refresh kubeadm token
 kubeadm_refresh_token: true
 
 # Scale down coredns replicas to 0 if not using coredns dns_mode
diff --git a/roles/kubernetes/control-plane/handlers/main.yml b/roles/kubernetes/control-plane/handlers/main.yml
index 3d7f3e074096ce1d6d09c45442b957496dda58fe..ef554a238e44b754c067305716791b6858bfe836 100644
--- a/roles/kubernetes/control-plane/handlers/main.yml
+++ b/roles/kubernetes/control-plane/handlers/main.yml
@@ -1,16 +1,16 @@
 ---
-- name: Master | reload systemd
+- name: Control plane | reload systemd
   systemd_service:
     daemon_reload: true
-  listen: Master | restart kubelet
+  listen: Control plane | restart kubelet
 
-- name: Master | reload kubelet
+- name: Control plane | reload kubelet
   service:
     name: kubelet
     state: restarted
-  listen: Master | restart kubelet
+  listen: Control plane | restart kubelet
 
-- name: Master | Remove apiserver container docker
+- name: Control plane | Remove apiserver container docker
   shell: "set -o pipefail && docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f"
   args:
     executable: /bin/bash
@@ -19,9 +19,9 @@
   until: remove_apiserver_container.rc == 0
   delay: 1
   when: container_manager == "docker"
-  listen: Master | Restart apiserver
+  listen: Control plane | Restart apiserver
 
-- name: Master | Remove apiserver container containerd/crio
+- name: Control plane | Remove apiserver container containerd/crio
   shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
   args:
     executable: /bin/bash
@@ -30,9 +30,9 @@
   until: remove_apiserver_container.rc == 0
   delay: 1
   when: container_manager in ['containerd', 'crio']
-  listen: Master | Restart apiserver
+  listen: Control plane | Restart apiserver
 
-- name: Master | Remove scheduler container docker
+- name: Control plane | Remove scheduler container docker
   shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
   args:
     executable: /bin/bash
@@ -41,9 +41,9 @@
   until: remove_scheduler_container.rc == 0
   delay: 1
   when: container_manager == "docker"
-  listen: Master | Restart kube-scheduler
+  listen: Control plane | Restart kube-scheduler
 
-- name: Master | Remove scheduler container containerd/crio
+- name: Control plane | Remove scheduler container containerd/crio
   shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
   args:
     executable: /bin/bash
@@ -52,9 +52,9 @@
   until: remove_scheduler_container.rc == 0
   delay: 1
   when: container_manager in ['containerd', 'crio']
-  listen: Master | Restart kube-scheduler
+  listen: Control plane | Restart kube-scheduler
 
-- name: Master | Remove controller manager container docker
+- name: Control plane | Remove controller manager container docker
   shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
   args:
     executable: /bin/bash
@@ -63,9 +63,9 @@
   until: remove_cm_container.rc == 0
   delay: 1
   when: container_manager == "docker"
-  listen: Master | Restart kube-controller-manager
+  listen: Control plane | Restart kube-controller-manager
 
-- name: Master | Remove controller manager container containerd/crio
+- name: Control plane | Remove controller manager container containerd/crio
   shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
   args:
     executable: /bin/bash
@@ -74,9 +74,9 @@
   until: remove_cm_container.rc == 0
   delay: 1
   when: container_manager in ['containerd', 'crio']
-  listen: Master | Restart kube-controller-manager
+  listen: Control plane | Restart kube-controller-manager
 
-- name: Master | wait for kube-scheduler
+- name: Control plane | wait for kube-scheduler
   vars:
     endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}"
   uri:
@@ -87,10 +87,10 @@
   retries: 60
   delay: 1
   listen:
-    - Master | restart kubelet
-    - Master | Restart kube-scheduler
+    - Control plane | restart kubelet
+    - Control plane | Restart kube-scheduler
 
-- name: Master | wait for kube-controller-manager
+- name: Control plane | wait for kube-controller-manager
   vars:
     endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}"
   uri:
@@ -101,10 +101,10 @@
   retries: 60
   delay: 1
   listen:
-    - Master | restart kubelet
-    - Master | Restart kube-controller-manager
+    - Control plane | restart kubelet
+    - Control plane | Restart kube-controller-manager
 
-- name: Master | wait for the apiserver to be running
+- name: Control plane | wait for the apiserver to be running
   uri:
     url: "{{ kube_apiserver_endpoint }}/healthz"
     validate_certs: false
@@ -113,5 +113,5 @@
   retries: 60
   delay: 1
   listen:
-    - Master | restart kubelet
-    - Master | Restart apiserver
+    - Control plane | restart kubelet
+    - Control plane | Restart apiserver
diff --git a/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml b/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml
index 2950c76e27d8adb1a910647056ef36a9b2e22009..15b5496e54c4faccac7434c1d6f203ae2dd95f2d 100644
--- a/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml
+++ b/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml
@@ -23,7 +23,7 @@
     kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}"
   when: secrets_encryption_file.stat.exists
 
-- name: Set kube_encrypt_token across master nodes
+- name: Set kube_encrypt_token across control plane nodes
   set_fact:
     kube_encrypt_token: "{{ kube_encrypt_token_extracted }}"
   delegate_to: "{{ item }}"
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml b/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml
index e47f571d339e96a3288941ff174ba7b12bd4ae9e..919e8b64fe72ce0d607bab5071b09505a6549472 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml
@@ -12,6 +12,6 @@
     - kubelet.conf
     - scheduler.conf
   notify:
-    - "Master | Restart kube-controller-manager"
-    - "Master | Restart kube-scheduler"
-    - "Master | reload kubelet"
+    - "Control plane | Restart kube-controller-manager"
+    - "Control plane | Restart kube-scheduler"
+    - "Control plane | reload kubelet"
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
index 52700af2e5927b0e87ae68453f33f0303a3fb8f5..18bf2ec0f7d8996d69fdff86467879db36768afb 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
@@ -189,7 +189,7 @@
     mode: "0644"
   when: kubeadm_patches is defined and kubeadm_patches.enabled
 
-- name: Kubeadm | Initialize first master
+- name: Kubeadm | Initialize first control plane node
   command: >-
     timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }}
     {{ bin_dir }}/kubeadm init
@@ -205,7 +205,7 @@
   failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
   environment:
     PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
-  notify: Master | restart kubelet
+  notify: Control plane | restart kubelet
 
 - name: Set kubeadm certificate key
   set_fact:
@@ -250,7 +250,7 @@
   tags:
     - kubeadm_token
 
-- name: Kubeadm | Join other masters
+- name: Kubeadm | Join other control plane nodes
   include_tasks: kubeadm-secondary.yml
 
 - name: Kubeadm | upgrade kubernetes cluster
@@ -260,7 +260,7 @@
     - kubeadm_already_run.stat.exists
 
 # FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
-- name: Kubeadm | Remove taint for master with node role
+- name: Kubeadm | Remove taint for control plane node with node role
   command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
   delegate_to: "{{ first_kube_control_plane }}"
   with_items:
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
index f88921e98ac8ab04eeee8ce5cc60557e5d86cd90..343724c473c7875c4d2ddefb28c149caf535079f 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
@@ -9,7 +9,7 @@
   delay: 5
   until: _result.status == 200
 
-- name: Kubeadm | Upgrade first master
+- name: Kubeadm | Upgrade first control plane node
   command: >-
     timeout -k 600s 600s
     {{ bin_dir }}/kubeadm
@@ -28,9 +28,9 @@
   failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
   environment:
     PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
-  notify: Master | restart kubelet
+  notify: Control plane | restart kubelet
 
-- name: Kubeadm | Upgrade other masters
+- name: Kubeadm | Upgrade other control plane nodes
   command: >-
     timeout -k 600s 600s
     {{ bin_dir }}/kubeadm
@@ -49,7 +49,7 @@
   failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
   environment:
     PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
-  notify: Master | restart kubelet
+  notify: Control plane | restart kubelet
 
 - name: Kubeadm | Remove binding to anonymous user
   command: "{{ kubectl }} -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo --ignore-not-found"
diff --git a/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml b/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml
index 409ecb043a089a5619818c12c9ac4cdbc40afc6d..16fec4e4aa917c5b0a1690a320c75f1c29f0e83c 100644
--- a/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml
+++ b/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml
@@ -6,7 +6,7 @@
     line: '    client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem'
     backup: true
   notify:
-    - "Master | reload kubelet"
+    - "Control plane | reload kubelet"
 
 - name: Fixup kubelet client cert rotation 2/2
   lineinfile:
@@ -15,4 +15,4 @@
     line: '    client-key: /var/lib/kubelet/pki/kubelet-client-current.pem'
     backup: true
   notify:
-    - "Master | reload kubelet"
+    - "Control plane | reload kubelet"
diff --git a/roles/kubernetes/control-plane/tasks/pre-upgrade.yml b/roles/kubernetes/control-plane/tasks/pre-upgrade.yml
index 2d7dce5bd0db557b7470dac661b1c5dbf06ca6b4..72534da07acbee632e48eb457300e9498a8e2506 100644
--- a/roles/kubernetes/control-plane/tasks/pre-upgrade.yml
+++ b/roles/kubernetes/control-plane/tasks/pre-upgrade.yml
@@ -1,5 +1,5 @@
 ---
-- name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
+- name: "Pre-upgrade | Delete control plane manifests if etcd secrets changed"
   file:
     path: "/etc/kubernetes/manifests/{{ item }}.manifest"
     state: absent
@@ -8,14 +8,14 @@
   register: kube_apiserver_manifest_replaced
   when: etcd_secret_changed | default(false)
 
-- name: "Pre-upgrade | Delete master containers forcefully"  # noqa no-handler
+- name: "Pre-upgrade | Delete control plane containers forcefully"  # noqa no-handler
   shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
   args:
     executable: /bin/bash
   with_items:
     - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
   when: kube_apiserver_manifest_replaced.changed
-  register: remove_master_container
+  register: remove_control_plane_container
   retries: 10
-  until: remove_master_container.rc == 0
+  until: remove_control_plane_container.rc == 0
   delay: 1
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index ad6ac36beafe7592517163f5ae349dcc3e001483..9e01f5fe5fa4077d3c41bb31241b69606ab3db1b 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -71,7 +71,7 @@
     owner: "root"
     mode: "0644"
   when:
-    - not is_kube_master
+    - ('kube_control_plane' not in group_names)
     - not kubelet_conf.stat.exists
     - kubeadm_use_file_discovery
 
@@ -81,7 +81,7 @@
     dest: "{{ kube_config_dir }}/kubeadm-client.conf"
     backup: true
     mode: "0640"
-  when: not is_kube_master
+  when: ('kube_control_plane' not in group_names)
 
 - name: Kubeadm | Create directory to store kubeadm patches
   file:
@@ -101,7 +101,9 @@
 - name: Join to cluster if needed
   environment:
     PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}:/sbin"
-  when: not is_kube_master and (not kubelet_conf.stat.exists)
+  when:
+    - ('kube_control_plane' not in group_names)
+    - not kubelet_conf.stat.exists
   block:
 
     - name: Join to cluster
@@ -143,7 +145,7 @@
     backup: true
   when:
     - kubeadm_config_api_fqdn is not defined
-    - not is_kube_master
+    - ('kube_control_plane' not in group_names)
     - kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
   notify: Kubeadm | restart kubelet
 
@@ -154,7 +156,7 @@
     line: '    server: {{ kube_apiserver_endpoint }}'
     backup: true
   when:
-    - not is_kube_master
+    - ('kube_control_plane' not in group_names)
     - loadbalancer_apiserver is defined
   notify: Kubeadm | restart kubelet
 
@@ -169,8 +171,8 @@
   tags:
     - kube-proxy
 
-# FIXME(mattymo): Need to point to localhost, otherwise masters will all point
-#                 incorrectly to first master, creating SPoF.
+# FIXME(mattymo): Need to point to localhost, otherwise control plane nodes will all point
+#                 incorrectly to first control plane node, creating SPoF.
 - name: Update server field in kube-proxy kubeconfig
   shell: >-
     set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml
diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml
index 7b8438e9bba8b385b6e46d857f8564844b7dad18..4d4b011a7df70cb7b2b62ed49eae25aff31b9e4e 100644
--- a/roles/kubernetes/node/defaults/main.yml
+++ b/roles/kubernetes/node/defaults/main.yml
@@ -42,7 +42,7 @@ kube_memory_reserved: 256Mi
 kube_cpu_reserved: 100m
 # kube_ephemeral_storage_reserved: 2Gi
 # kube_pid_reserved: "1000"
-# Reservation for master hosts
+# Reservation for control plane hosts
 kube_master_memory_reserved: 512Mi
 kube_master_cpu_reserved: 200m
 # kube_master_ephemeral_storage_reserved: 2Gi
@@ -56,7 +56,7 @@ system_memory_reserved: 512Mi
 system_cpu_reserved: 500m
 # system_ephemeral_storage_reserved: 2Gi
 # system_pid_reserved: "1000"
-# Reservation for master hosts
+# Reservation for control plane hosts
 system_master_memory_reserved: 256Mi
 system_master_cpu_reserved: 250m
 # system_master_ephemeral_storage_reserved: 2Gi
@@ -136,7 +136,7 @@ kubelet_config_extra_args_cgroupfs:
   systemCgroups: /system.slice
   cgroupRoot: /
 
-## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not masters
+## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not control plane nodes
 kubelet_node_config_extra_args: {}
 
 # Maximum number of container log files that can be present for a container.
@@ -148,7 +148,7 @@ kubelet_logfiles_max_size: 10Mi
 ## Support custom flags to be passed to kubelet
 kubelet_custom_flags: []
 
-## Support custom flags to be passed to kubelet only on nodes, not masters
+## Support custom flags to be passed to kubelet only on nodes, not control plane nodes
 kubelet_node_custom_flags: []
 
 # If non-empty, will use this string as identification instead of the actual hostname
@@ -216,7 +216,7 @@ vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK') | default(''
 # azure_vmtype: standard
 # Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
 azure_loadbalancer_sku: basic
-# excludes master nodes from standard load balancer.
+# excludes control plane nodes from standard load balancer.
 azure_exclude_master_from_standard_lb: true
 # disables the outbound SNAT for public load balancer rules
 azure_disable_outbound_snat: false
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 56117bc3a99d08ec2972820414d115f2679a5a25..572850ba0507d9f144a5459a0bf972783fd57773 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -24,7 +24,7 @@
 - name: Install kube-vip
   import_tasks: loadbalancer/kube-vip.yml
   when:
-    - is_kube_master
+    - ('kube_control_plane' in group_names)
     - kube_vip_enabled
   tags:
     - kube-vip
@@ -32,7 +32,7 @@
 - name: Install nginx-proxy
   import_tasks: loadbalancer/nginx-proxy.yml
   when:
-    - not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
+    - ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0')
     - loadbalancer_apiserver_localhost
     - loadbalancer_apiserver_type == 'nginx'
   tags:
@@ -41,7 +41,7 @@
 - name: Install haproxy
   import_tasks: loadbalancer/haproxy.yml
   when:
-    - not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
+    - ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0')
     - loadbalancer_apiserver_localhost
     - loadbalancer_apiserver_type == 'haproxy'
   tags:
diff --git a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2
index bc59f03d2c8fa2b8cde555255e4b9182acc0c216..870383c041b21c500e2cdf51071a49fc5896759c 100644
--- a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2
+++ b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2
@@ -64,7 +64,7 @@ clusterDNS:
 kubeReservedCgroup: {{ kube_reserved_cgroups }}
 {% endif %}
 kubeReserved:
-{% if is_kube_master | bool %}
+{% if 'kube_control_plane' in group_names %}
   cpu: "{{ kube_master_cpu_reserved }}"
   memory: {{ kube_master_memory_reserved }}
 {% if kube_master_ephemeral_storage_reserved is defined %}
@@ -86,7 +86,7 @@ kubeReserved:
 {% if system_reserved | bool %}
 systemReservedCgroup: {{ system_reserved_cgroups }}
 systemReserved:
-{% if is_kube_master | bool %}
+{% if 'kube_control_plane' in group_names %}
   cpu: "{{ system_master_cpu_reserved }}"
   memory: {{ system_master_memory_reserved }}
 {% if system_master_ephemeral_storage_reserved is defined %}
@@ -106,10 +106,10 @@ systemReserved:
 {% endif %}
 {% endif %}
 {% endif %}
-{% if is_kube_master | bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %}
+{% if ('kube_control_plane' in group_names) and (eviction_hard_control_plane is defined) and eviction_hard_control_plane %}
 evictionHard:
   {{ eviction_hard_control_plane | to_nice_yaml(indent=2) | indent(2) }}
-{% elif not is_kube_master | bool and eviction_hard is defined and eviction_hard %}
+{% elif ('kube_control_plane' not in group_names) and (eviction_hard is defined) and eviction_hard %}
 evictionHard:
   {{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }}
 {% endif %}
diff --git a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
index af9ca0674deeac6be83f946e128e9968106463db..51dae8f9dda33ae1858a28bc316bb540921d47aa 100644
--- a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
@@ -60,7 +60,7 @@
     - not ignore_assert_errors
     - inventory_hostname in groups.get('etcd',[])
 
-- name: Stop if memory is too small for masters
+- name: Stop if memory is too small for control plane nodes
   assert:
     that: ansible_memtotal_mb >= minimal_master_memory_mb
   when:
diff --git a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
index 507a72d7817a0203db0306c2e74620a0e3b2946e..80d873f891bfb9ce10ae5958f55bc6c4f87bc89c 100644
--- a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
+++ b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
@@ -15,7 +15,8 @@
     - bootstrap-os
     - apps
     - network
-    - master
+    - master    # master tag is deprecated and replaced by control-plane
+    - control-plane
     - node
   with_items:
     - "{{ kube_config_dir }}"
@@ -39,7 +40,8 @@
     - bootstrap-os
     - apps
     - network
-    - master
+    - master    # master tag is deprecated and replaced by control-plane
+    - control-plane
     - node
   with_items:
     - "{{ kube_cert_dir }}"
diff --git a/roles/kubernetes/tokens/tasks/check-tokens.yml b/roles/kubernetes/tokens/tasks/check-tokens.yml
index d8bb203e91ce5348b095b5a1eff63499feceb250..baa0c9f03168d94a06fe64b094057f6e7f7eba2f 100644
--- a/roles/kubernetes/tokens/tasks/check-tokens.yml
+++ b/roles/kubernetes/tokens/tasks/check-tokens.yml
@@ -1,12 +1,12 @@
 ---
-- name: "Check_tokens | check if the tokens have already been generated on first master"
+- name: "Check_tokens | check if the tokens have already been generated on first control plane node"
   stat:
     path: "{{ kube_token_dir }}/known_tokens.csv"
     get_attributes: false
     get_checksum: true
     get_mime: false
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
-  register: known_tokens_master
+  register: known_tokens_control_plane
   run_once: true
 
 - name: "Check_tokens | Set default value for 'sync_tokens' and 'gen_tokens' to false"
@@ -17,7 +17,7 @@
 - name: "Check_tokens | Set 'sync_tokens' and 'gen_tokens' to true"
   set_fact:
     gen_tokens: true
-  when: not known_tokens_master.stat.exists and kube_token_auth | default(true)
+  when: not known_tokens_control_plane.stat.exists and kube_token_auth | default(true)
   run_once: true
 
 - name: "Check tokens | check if a cert already exists"
@@ -34,7 +34,7 @@
       {%- set tokens = {'sync': False} -%}
       {%- for server in groups['kube_control_plane'] | intersect(ansible_play_batch)
         if (not hostvars[server].known_tokens.stat.exists) or
-        (hostvars[server].known_tokens.stat.checksum | default('') != known_tokens_master.stat.checksum | default('')) -%}
+        (hostvars[server].known_tokens.stat.checksum | default('') != known_tokens_control_plane.stat.checksum | default('')) -%}
         {%- set _ = tokens.update({'sync': True}) -%}
       {%- endfor -%}
       {{ tokens.sync }}
diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml
index a64aea9e022f4fb0256cbe4ea33f94cc131ed787..c85886a808b26ce91ad374a1d02246c998ec5741 100644
--- a/roles/kubernetes/tokens/tasks/gen_tokens.yml
+++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml
@@ -8,15 +8,15 @@
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: gen_tokens | default(false)
 
-- name: Gen_tokens | generate tokens for master components
+- name: Gen_tokens | generate tokens for control plane components
   command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
   environment:
     TOKEN_DIR: "{{ kube_token_dir }}"
   with_nested:
     - [ "system:kubectl" ]
     - "{{ groups['kube_control_plane'] }}"
-  register: gentoken_master
-  changed_when: "'Added' in gentoken_master.stdout"
+  register: gentoken_control_plane
+  changed_when: "'Added' in gentoken_control_plane.stdout"
   run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: gen_tokens | default(false)
@@ -34,7 +34,7 @@
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: gen_tokens | default(false)
 
-- name: Gen_tokens | Get list of tokens from first master
+- name: Gen_tokens | Get list of tokens from first control plane node
   command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
   register: tokens_list
   check_mode: false
@@ -52,7 +52,7 @@
   run_once: true
   when: sync_tokens | default(false)
 
-- name: Gen_tokens | Copy tokens on masters
+- name: Gen_tokens | Copy tokens on control plane nodes
   shell: "set -o pipefail && echo '{{ tokens_data.stdout | quote }}' | base64 -d | tar xz -C /"
   args:
     executable: /bin/bash
diff --git a/roles/kubespray-defaults/defaults/main/main.yml b/roles/kubespray-defaults/defaults/main/main.yml
index 4f1dfd2b92a23cafd37694e0e5845842aba4109b..e2d0ec22e612f6d3b33ce54483edad90548fa3d7 100644
--- a/roles/kubespray-defaults/defaults/main/main.yml
+++ b/roles/kubespray-defaults/defaults/main/main.yml
@@ -243,7 +243,7 @@ kube_network_node_prefix_ipv6: 120
 kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
 
 # NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
-# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
+# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
 kube_apiserver_bind_address: 0.0.0.0
 
 # https
@@ -531,7 +531,6 @@ ssl_ca_dirs: |-
   ]
 
 # Vars for pointing to kubernetes api endpoints
-is_kube_master: "{{ inventory_hostname in groups['kube_control_plane'] }}"
 kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}"
 kube_apiserver_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
 kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
@@ -551,9 +550,9 @@ kube_apiserver_global_endpoint: |-
 kube_apiserver_endpoint: |-
   {% if loadbalancer_apiserver is defined -%}
       https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
-  {%- elif not is_kube_master and loadbalancer_apiserver_localhost -%}
+  {%- elif ('kube_control_plane' not in group_names) and loadbalancer_apiserver_localhost -%}
       https://localhost:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }}
-  {%- elif is_kube_master -%}
+  {%- elif 'kube_control_plane' in group_names -%}
       https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0', '127.0.0.1') }}:{{ kube_apiserver_port }}
   {%- else -%}
       https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
@@ -568,7 +567,6 @@ etcd_events_cluster_enabled: false
 etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}"
 
 # Vars for pointing to etcd endpoints
-is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
 etcd_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
 etcd_access_address: "{{ access_ip | default(etcd_address) }}"
 etcd_events_access_address: "{{ access_ip | default(etcd_address) }}"
diff --git a/roles/kubespray-defaults/tasks/no_proxy.yml b/roles/kubespray-defaults/tasks/no_proxy.yml
index adec886f4a3b99f45363247280a33f1038ae3c04..c686e655da85ea524ee2b5ccd019c65340592e64 100644
--- a/roles/kubespray-defaults/tasks/no_proxy.yml
+++ b/roles/kubespray-defaults/tasks/no_proxy.yml
@@ -8,11 +8,11 @@
       {{ loadbalancer_apiserver.address | default('') }},
       {%- endif -%}
       {%- if no_proxy_exclude_workers | default(false) -%}
-      {% set cluster_or_master = 'kube_control_plane' %}
+      {% set cluster_or_control_plane = 'kube_control_plane' %}
       {%- else -%}
-      {% set cluster_or_master = 'k8s_cluster' %}
+      {% set cluster_or_control_plane = 'k8s_cluster' %}
       {%- endif -%}
-      {%- for item in (groups[cluster_or_master] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
+      {%- for item in (groups[cluster_or_control_plane] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
       {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }},
       {%- if item != hostvars[item].get('ansible_hostname', '') -%}
       {{ hostvars[item]['ansible_hostname'] }},
diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2
index ff85a5123df5d9f1b090b399b92c32da178dd024..fbaa9fe7eaced00409cd55747c3da963fa2029aa 100644
--- a/roles/network_plugin/calico/templates/calico-node.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-node.yml.j2
@@ -1,7 +1,7 @@
 ---
 # This manifest installs the calico/node container, as well
 # as the Calico CNI plugins and network config on
-# each master and worker node in a Kubernetes cluster.
+# each control plane and worker node in a Kubernetes cluster.
 kind: DaemonSet
 apiVersion: apps/v1
 metadata:
diff --git a/roles/recover_control_plane/control-plane/tasks/main.yml b/roles/recover_control_plane/control-plane/tasks/main.yml
index ec50f3ffdbb106cfa9290747d6670e235bff2284..2dc8113216757e3653d789aabf6f8e616024c57b 100644
--- a/roles/recover_control_plane/control-plane/tasks/main.yml
+++ b/roles/recover_control_plane/control-plane/tasks/main.yml
@@ -15,14 +15,14 @@
   environment:
     KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
   with_items: "{{ groups['broken_kube_control_plane'] }}"
-  register: delete_broken_kube_masters
+  register: delete_broken_kube_control_plane_nodes
   failed_when: false
   when: groups['broken_kube_control_plane']
 
 - name: Fail if unable to delete broken kube_control_plane nodes from cluster
   fail:
     msg: "Unable to delete broken kube_control_plane node: {{ item.item }}"
-  loop: "{{ delete_broken_kube_masters.results }}"
+  loop: "{{ delete_broken_kube_control_plane_nodes.results }}"
   changed_when: false
   when:
     - groups['broken_kube_control_plane']
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index bc8bfd6d6233a3397ca5debcfa0175dc9adbed1a..473e49f55ee297980dcaa04a59ba741da107ea6f 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -7,7 +7,7 @@
     # ignore servers that are not nodes
     - inventory_hostname in groups['k8s_cluster'] and kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines
   retries: "{{ delete_node_retries }}"
-  # Sometimes the api-server can have a short window of indisponibility when we delete a master node
+  # Sometimes the api-server can have a short window of indisponibility when we delete a control plane node
   delay: "{{ delete_node_delay_seconds }}"
   register: result
   until: result is not failed
diff --git a/tests/scripts/testcases_run.sh b/tests/scripts/testcases_run.sh
index 6e01fb5bb4fd6dc80bc209ef4ce83f1ce1f3ef55..44ef4f04cc7773d38150403d4fa7997de9cc0fe9 100755
--- a/tests/scripts/testcases_run.sh
+++ b/tests/scripts/testcases_run.sh
@@ -122,7 +122,7 @@ EOF
 
 fi
 # Tests Cases
-## Test Master API
+## Test Control Plane API
 run_playbook tests/testcases/010_check-apiserver.yml
 run_playbook tests/testcases/015_check-nodes-ready.yml