diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8161944e7a0b8b6b448e3c31574c9f81dfc02b0d..412f63d3c146ecf2b644d86e39550ec3ac67c138 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,7 @@ variables: MITOGEN_ENABLE: "false" ANSIBLE_LOG_LEVEL: "-vv" RECOVER_CONTROL_PLANE_TEST: "false" - RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]" + RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]" before_script: - ./tests/scripts/rebase.sh diff --git a/.gitlab-ci/packet.yml b/.gitlab-ci/packet.yml index e5e9e4c27974943df9d929c58a12a858d5ad3e95..a349763604952083f4fabd48f3bbbbb53d2b3544 100644 --- a/.gitlab-ci/packet.yml +++ b/.gitlab-ci/packet.yml @@ -223,7 +223,7 @@ packet_ubuntu18-calico-ha-recover: when: on_success variables: RECOVER_CONTROL_PLANE_TEST: "true" - RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]" + RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]" packet_ubuntu18-calico-ha-recover-noquorum: stage: deploy-part3 @@ -231,4 +231,4 @@ packet_ubuntu18-calico-ha-recover-noquorum: when: on_success variables: RECOVER_CONTROL_PLANE_TEST: "true" - RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]" + RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]" diff --git a/Vagrantfile b/Vagrantfile index 1c43f280b46b7bb607b429ab7a1f40e8cb77ad79..5ee9e46374d96409ec5433a682b7b455b9e1e751 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -253,9 +253,9 @@ Vagrant.configure("2") do |config| #ansible.tags = ['download'] ansible.groups = { "etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"], - "kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"], + "kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"], "kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"], - "k8s-cluster:children" => ["kube-master", "kube-node"], + "k8s-cluster:children" => ["kube_control_plane", "kube-node"], } end end diff --git a/cluster.yml b/cluster.yml index cf6942a6ed5dcc98b462511450151cfcaea3983b..6a169e9b07204640ad218550f65947a70ad7de12 100644 --- a/cluster.yml +++ b/cluster.yml @@ -2,6 +2,15 @@ - name: Check ansible version import_playbook: ansible_version.yml +- name: Add kube-master nodes to kube_control_plane + # This is for old inventory which contains kube-master instead of kube_control_plane + hosts: kube-master + gather_facts: false + tasks: + - name: add nodes to kube_control_plane group + group_by: + key: 'kube_control_plane' + - hosts: bastion[0] gather_facts: False environment: "{{ proxy_disable_env }}" @@ -66,7 +75,7 @@ - { role: kubespray-defaults } - { role: kubernetes/node, tags: node } -- hosts: kube-master +- hosts: kube_control_plane gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -94,7 +103,7 @@ - { role: kubespray-defaults } - { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] } -- hosts: kube-master[0] +- hosts: kube_control_plane[0] gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -102,7 +111,7 @@ - { role: kubespray-defaults } - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] } -- hosts: kube-master +- hosts: kube_control_plane gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -114,7 +123,7 @@ - { role: kubernetes-apps/ingress_controller, tags: ingress-controller } - { role: kubernetes-apps/external_provisioner, tags: external-provisioner } -- hosts: kube-master +- hosts: kube_control_plane gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" diff --git a/contrib/aws_inventory/kubespray-aws-inventory.py b/contrib/aws_inventory/kubespray-aws-inventory.py index 91a848b62b36532050561902cf4c2b31e0d1d2be..46ad6a0631026ff231f733bc03f7e71707f43793 100755 --- a/contrib/aws_inventory/kubespray-aws-inventory.py +++ b/contrib/aws_inventory/kubespray-aws-inventory.py @@ -35,7 +35,7 @@ class SearchEC2Tags(object): hosts['_meta'] = { 'hostvars': {} } ##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value. - for group in ["kube-master", "kube-node", "etcd"]: + for group in ["kube_control_plane", "kube-node", "etcd"]: hosts[group] = [] tag_key = "kubespray-role" tag_value = ["*"+group+"*"] @@ -70,7 +70,7 @@ class SearchEC2Tags(object): hosts[group].append(dns_name) hosts['_meta']['hostvars'][dns_name] = ansible_host - hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']} + hosts['k8s-cluster'] = {'children':['kube_control_plane', 'kube-node']} print(json.dumps(hosts, sort_keys=True, indent=2)) SearchEC2Tags() diff --git a/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 index cd93a2bb673e83f936a7d71179d44d198b6ae48a..8a13cc635f6cb4d95ca089e3884a575f5de0bc84 100644 --- a/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 +++ b/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 @@ -7,9 +7,9 @@ {% endif %} {% endfor %} -[kube-master] +[kube_control_plane] {% for vm in vm_list %} -{% if 'kube-master' in vm.tags.roles %} +{% if 'kube_control_plane' in vm.tags.roles %} {{ vm.name }} {% endif %} {% endfor %} @@ -30,4 +30,4 @@ [k8s-cluster:children] kube-node -kube-master +kube_control_plane diff --git a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 index 21f7bbf1c5cdedf319862364194395056b820e76..61183cd1d678b45ace8a5a6f4d2ca5e1b890e096 100644 --- a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 +++ b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 @@ -7,9 +7,9 @@ {% endif %} {% endfor %} -[kube-master] +[kube_control_plane] {% for vm in vm_roles_list %} -{% if 'kube-master' in vm.tags.roles %} +{% if 'kube_control_plane' in vm.tags.roles %} {{ vm.name }} {% endif %} {% endfor %} @@ -30,5 +30,5 @@ [k8s-cluster:children] kube-node -kube-master +kube_control_plane diff --git a/contrib/azurerm/roles/generate-templates/templates/masters.json b/contrib/azurerm/roles/generate-templates/templates/masters.json index 69a42cb689eb986748fa87f0f21efb379a0ef30c..b299383a66efd1ab3a644b03fdd7d354814cf1d2 100644 --- a/contrib/azurerm/roles/generate-templates/templates/masters.json +++ b/contrib/azurerm/roles/generate-templates/templates/masters.json @@ -144,7 +144,7 @@ "[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]" ], "tags": { - "roles": "kube-master,etcd" + "roles": "kube_control_plane,etcd" }, "apiVersion": "{{apiVersion}}", "properties": { diff --git a/contrib/inventory_builder/inventory.py b/contrib/inventory_builder/inventory.py index 66d6184741773cd04ede08afdf8997cff207e033..814085a73e9d1d418349f305df8c075289a931fd 100644 --- a/contrib/inventory_builder/inventory.py +++ b/contrib/inventory_builder/inventory.py @@ -44,7 +44,7 @@ import re import subprocess import sys -ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster', +ROLES = ['all', 'kube_control_plane', 'kube-node', 'etcd', 'k8s-cluster', 'calico-rr'] PROTECTED_NAMES = ROLES AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames', @@ -299,21 +299,23 @@ class KubesprayInventory(object): def set_kube_control_plane(self, hosts): for host in hosts: - self.add_host_to_group('kube-master', host) + self.add_host_to_group('kube_control_plane', host) def set_all(self, hosts): for host, opts in hosts.items(): self.add_host_to_group('all', host, opts) def set_k8s_cluster(self): - k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}} + k8s_cluster = {'children': {'kube_control_plane': None, + 'kube-node': None}} self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster def set_calico_rr(self, hosts): for host in hosts: - if host in self.yaml_config['all']['children']['kube-master']: + if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa self.debug("Not adding {0} to calico-rr group because it " - "conflicts with kube-master group".format(host)) + "conflicts with kube_control_plane " + "group".format(host)) continue if host in self.yaml_config['all']['children']['kube-node']: self.debug("Not adding {0} to calico-rr group because it " @@ -330,10 +332,10 @@ class KubesprayInventory(object): "group.".format(host)) continue if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa - if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa + if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa self.debug("Not adding {0} to kube-node group because of " - "scale deployment and host is in kube-master " - "group.".format(host)) + "scale deployment and host is in " + "kube_control_plane group.".format(host)) continue self.add_host_to_group('kube-node', host) diff --git a/contrib/inventory_builder/tests/test_inventory.py b/contrib/inventory_builder/tests/test_inventory.py index afcbe75003d0ae900d2b993e4da29fbb220cf3ed..c76990240d7752af8ccb6066772c51c1ec5a57f1 100644 --- a/contrib/inventory_builder/tests/test_inventory.py +++ b/contrib/inventory_builder/tests/test_inventory.py @@ -223,7 +223,7 @@ class TestInventory(unittest.TestCase): None) def test_set_kube_control_plane(self): - group = 'kube-master' + group = 'kube_control_plane' host = 'node1' self.inv.set_kube_control_plane([host]) @@ -242,7 +242,7 @@ class TestInventory(unittest.TestCase): def test_set_k8s_cluster(self): group = 'k8s-cluster' - expected_hosts = ['kube-node', 'kube-master'] + expected_hosts = ['kube-node', 'kube_control_plane'] self.inv.set_k8s_cluster() for host in expected_hosts: diff --git a/contrib/network-storage/glusterfs/glusterfs.yml b/contrib/network-storage/glusterfs/glusterfs.yml index e5b6f1301be1b4f03dcfb9455de8193628b8ac3e..8146dfc06e377fa4aff94c235503863fb960bdea 100644 --- a/contrib/network-storage/glusterfs/glusterfs.yml +++ b/contrib/network-storage/glusterfs/glusterfs.yml @@ -19,6 +19,6 @@ roles: - { role: glusterfs/client } -- hosts: kube-master[0] +- hosts: kube_control_plane[0] roles: - { role: kubernetes-pv } diff --git a/contrib/network-storage/glusterfs/inventory.example b/contrib/network-storage/glusterfs/inventory.example index 15fbad0a815509043a82b3f1d998b9fe1fdb4c65..dc77b4b0a6540e6b309a253300ab08e090eda5eb 100644 --- a/contrib/network-storage/glusterfs/inventory.example +++ b/contrib/network-storage/glusterfs/inventory.example @@ -14,7 +14,7 @@ # gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8 # gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9 -# [kube-master] +# [kube_control_plane] # node1 # node2 @@ -32,7 +32,7 @@ # [k8s-cluster:children] # kube-node -# kube-master +# kube_control_plane # [gfs-cluster] # gfs_node1 diff --git a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml index baf8356b6d8a4e9f8d49b0b89aa8e9f2f100a03b..5ed8f69449dc9f32e33798a9edf104b98655e92d 100644 --- a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml +++ b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml @@ -8,7 +8,7 @@ - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml} - { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json} register: gluster_pv - when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined + when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined - name: Kubernetes Apps | Set GlusterFS endpoint and PV kube: @@ -19,4 +19,4 @@ filename: "{{ kube_config_dir }}/{{ item.item.dest }}" state: "{{ item.changed | ternary('latest','present') }}" with_items: "{{ gluster_pv.results }}" - when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined + when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined diff --git a/contrib/network-storage/heketi/heketi-tear-down.yml b/contrib/network-storage/heketi/heketi-tear-down.yml index 92b9f92d64e27e25bcd698abd95ea2d7c9a82f1a..9e2d1f45a857356038d1c118e1c35c77216ddb05 100644 --- a/contrib/network-storage/heketi/heketi-tear-down.yml +++ b/contrib/network-storage/heketi/heketi-tear-down.yml @@ -1,5 +1,5 @@ --- -- hosts: kube-master[0] +- hosts: kube_control_plane[0] roles: - { role: tear-down } diff --git a/contrib/network-storage/heketi/heketi.yml b/contrib/network-storage/heketi/heketi.yml index 3ec719e95b7fa4f44207713466c945990f576209..2309267b1ab8fb7bf15f4ca20d796b7b6cbf7711 100644 --- a/contrib/network-storage/heketi/heketi.yml +++ b/contrib/network-storage/heketi/heketi.yml @@ -3,7 +3,7 @@ roles: - { role: prepare } -- hosts: kube-master[0] +- hosts: kube_control_plane[0] tags: - "provision" roles: diff --git a/contrib/network-storage/heketi/inventory.yml.sample b/contrib/network-storage/heketi/inventory.yml.sample index 7d488d1ba14833b12ea3f0be3e7e753ba6cb8505..46adbed44634c1303a5e46e168daf45fc8aa8dd7 100644 --- a/contrib/network-storage/heketi/inventory.yml.sample +++ b/contrib/network-storage/heketi/inventory.yml.sample @@ -7,7 +7,7 @@ all: vars: kubelet_fail_swap_on: false children: - kube-master: + kube_control_plane: hosts: node1: etcd: diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md index ea58de93e87beeae38f11b7f9c5c212afc7a19b9..993d2bb848f715032d6ecb9873def106707ade85 100644 --- a/contrib/terraform/aws/README.md +++ b/contrib/terraform/aws/README.md @@ -122,7 +122,7 @@ You can use the following set of commands to get the kubeconfig file from your n ```commandline # Get the controller's IP address. -CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube-master\]" -A 1 | tail -n 1) +CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1) CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2) # Get the hostname of the load balancer. diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf index 72aa27c79d7e2116a0b518dde41cf98772012cce..1c7f036b7996df17ee998a505d45fc86b271a867 100644 --- a/contrib/terraform/aws/create-infrastructure.tf +++ b/contrib/terraform/aws/create-infrastructure.tf @@ -84,7 +84,7 @@ resource "aws_instance" "k8s-master" { vpc_security_group_ids = module.aws-vpc.aws_security_group - iam_instance_profile = module.aws-iam.kube-master-profile + iam_instance_profile = module.aws-iam.kube_control_plane-profile key_name = var.AWS_SSH_KEY_NAME tags = merge(var.default_tags, map( diff --git a/contrib/terraform/aws/modules/iam/main.tf b/contrib/terraform/aws/modules/iam/main.tf index 1dc3c3a6a86c5fc6eb37d440377f249831d017b2..a35afc7e5965d23891d9dcc59c134a41037fbb2a 100644 --- a/contrib/terraform/aws/modules/iam/main.tf +++ b/contrib/terraform/aws/modules/iam/main.tf @@ -1,6 +1,6 @@ #Add AWS Roles for Kubernetes -resource "aws_iam_role" "kube-master" { +resource "aws_iam_role" "kube_control_plane" { name = "kubernetes-${var.aws_cluster_name}-master" assume_role_policy = <<EOF @@ -40,9 +40,9 @@ EOF #Add AWS Policies for Kubernetes -resource "aws_iam_role_policy" "kube-master" { +resource "aws_iam_role_policy" "kube_control_plane" { name = "kubernetes-${var.aws_cluster_name}-master" - role = aws_iam_role.kube-master.id + role = aws_iam_role.kube_control_plane.id policy = <<EOF { @@ -130,9 +130,9 @@ EOF #Create AWS Instance Profiles -resource "aws_iam_instance_profile" "kube-master" { +resource "aws_iam_instance_profile" "kube_control_plane" { name = "kube_${var.aws_cluster_name}_master_profile" - role = aws_iam_role.kube-master.name + role = aws_iam_role.kube_control_plane.name } resource "aws_iam_instance_profile" "kube-worker" { diff --git a/contrib/terraform/aws/modules/iam/outputs.tf b/contrib/terraform/aws/modules/iam/outputs.tf index e8a276617779d58f04eae95a77e15b2b45c88f6e..724e46cc832e91bc54a701affc054c80ec7ecb89 100644 --- a/contrib/terraform/aws/modules/iam/outputs.tf +++ b/contrib/terraform/aws/modules/iam/outputs.tf @@ -1,5 +1,5 @@ -output "kube-master-profile" { - value = aws_iam_instance_profile.kube-master.name +output "kube_control_plane-profile" { + value = aws_iam_instance_profile.kube_control_plane.name } output "kube-worker-profile" { diff --git a/contrib/terraform/aws/templates/inventory.tpl b/contrib/terraform/aws/templates/inventory.tpl index beb4f76a903b6c84ba6370685a42ecab5fc4f7b5..d8fe2f995a0a2340c392a4e86fab2c91bc63de84 100644 --- a/contrib/terraform/aws/templates/inventory.tpl +++ b/contrib/terraform/aws/templates/inventory.tpl @@ -7,7 +7,7 @@ ${public_ip_address_bastion} [bastion] ${public_ip_address_bastion} -[kube-master] +[kube_control_plane] ${list_master} @@ -21,7 +21,7 @@ ${list_etcd} [k8s-cluster:children] kube-node -kube-master +kube_control_plane [k8s-cluster:vars] diff --git a/contrib/terraform/exoscale/templates/inventory.tpl b/contrib/terraform/exoscale/templates/inventory.tpl index fd9a03484db980b63a7d3750c09369602be973ff..27b9e60f36a670842d50d39ddb08f917907fb49f 100644 --- a/contrib/terraform/exoscale/templates/inventory.tpl +++ b/contrib/terraform/exoscale/templates/inventory.tpl @@ -2,10 +2,10 @@ ${connection_strings_master} ${connection_strings_worker} -[kube-master] +[kube_control_plane] ${list_master} -[kube-master:vars] +[kube_control_plane:vars] supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ] [etcd] @@ -15,5 +15,5 @@ ${list_master} ${list_worker} [k8s-cluster:children] -kube-master +kube_control_plane kube-node diff --git a/contrib/terraform/gcp/generate-inventory.sh b/contrib/terraform/gcp/generate-inventory.sh index 36cbcd776f03c2235db7eafa8ab51f48401f08f8..d266b18992653c0cd997010bb5204fa3314a0fa6 100755 --- a/contrib/terraform/gcp/generate-inventory.sh +++ b/contrib/terraform/gcp/generate-inventory.sh @@ -50,13 +50,13 @@ for name in "${WORKER_NAMES[@]}"; do done echo "" -echo "[kube-master]" +echo "[kube_control_plane]" for name in "${MASTER_NAMES[@]}"; do echo "${name}" done echo "" -echo "[kube-master:vars]" +echo "[kube_control_plane:vars]" echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate echo "" echo "[etcd]" @@ -72,5 +72,5 @@ done echo "" echo "[k8s-cluster:children]" -echo "kube-master" +echo "kube_control_plane" echo "kube-node" diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf index c8d9f4ff39ffc3f6775b5cb1e035d52c4011be9b..5084468f54d469239154ca4516fafdc6f4166a2b 100644 --- a/contrib/terraform/openstack/modules/compute/main.tf +++ b/contrib/terraform/openstack/modules/compute/main.tf @@ -245,7 +245,7 @@ resource "openstack_compute_instance_v2" "k8s_master" { metadata = { ssh_user = var.ssh_user - kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault" + kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault" depends_on = var.network_id use_access_ip = var.use_access_ip } @@ -292,7 +292,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { metadata = { ssh_user = var.ssh_user - kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault" + kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault" depends_on = var.network_id use_access_ip = var.use_access_ip } @@ -379,7 +379,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" { metadata = { ssh_user = var.ssh_user - kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating" + kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault,no-floating" depends_on = var.network_id use_access_ip = var.use_access_ip } @@ -421,7 +421,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" { metadata = { ssh_user = var.ssh_user - kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating" + kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault,no-floating" depends_on = var.network_id use_access_ip = var.use_access_ip } diff --git a/contrib/terraform/packet/kubespray.tf b/contrib/terraform/packet/kubespray.tf index 568db0dd771782e40bf60b00c6fdcad4bab8afc2..00cf21ff07e538c8725afe5f2f3ad40d3a18bca2 100644 --- a/contrib/terraform/packet/kubespray.tf +++ b/contrib/terraform/packet/kubespray.tf @@ -19,7 +19,7 @@ resource "packet_device" "k8s_master" { operating_system = var.operating_system billing_cycle = var.billing_cycle project_id = var.packet_project_id - tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"] + tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane", "etcd", "kube-node"] } resource "packet_device" "k8s_master_no_etcd" { @@ -32,7 +32,7 @@ resource "packet_device" "k8s_master_no_etcd" { operating_system = var.operating_system billing_cycle = var.billing_cycle project_id = var.packet_project_id - tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master"] + tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane"] } resource "packet_device" "k8s_etcd" { diff --git a/contrib/terraform/upcloud/templates/inventory.tpl b/contrib/terraform/upcloud/templates/inventory.tpl index 26d65e67ba6d3dadb87cb7ce7e71549c4dd1432a..cb453e3ea29c3dcdd758a01e6773aa28c909452b 100644 --- a/contrib/terraform/upcloud/templates/inventory.tpl +++ b/contrib/terraform/upcloud/templates/inventory.tpl @@ -3,7 +3,7 @@ ${connection_strings_master} ${connection_strings_worker} -[kube-master] +[kube_control_plane] ${list_master} [etcd] @@ -13,5 +13,5 @@ ${list_master} ${list_worker} [k8s-cluster:children] -kube-master +kube_control_plane kube-node diff --git a/contrib/terraform/vsphere/templates/inventory.tpl b/contrib/terraform/vsphere/templates/inventory.tpl index 26d65e67ba6d3dadb87cb7ce7e71549c4dd1432a..cb453e3ea29c3dcdd758a01e6773aa28c909452b 100644 --- a/contrib/terraform/vsphere/templates/inventory.tpl +++ b/contrib/terraform/vsphere/templates/inventory.tpl @@ -3,7 +3,7 @@ ${connection_strings_master} ${connection_strings_worker} -[kube-master] +[kube_control_plane] ${list_master} [etcd] @@ -13,5 +13,5 @@ ${list_master} ${list_worker} [k8s-cluster:children] -kube-master +kube_control_plane kube-node diff --git a/contrib/vault/roles/kubernetes/vault-secrets/tasks/gen_certs_vault.yml b/contrib/vault/roles/kubernetes/vault-secrets/tasks/gen_certs_vault.yml index 8a847b0025efed1f19a01181855b9b4ecde49b30..75f155060a6d1bafcc93f11760d0f46ee6d97af7 100644 --- a/contrib/vault/roles/kubernetes/vault-secrets/tasks/gen_certs_vault.yml +++ b/contrib/vault/roles/kubernetes/vault-secrets/tasks/gen_certs_vault.yml @@ -1,30 +1,30 @@ --- - import_tasks: sync_kube_master_certs.yml - when: inventory_hostname in groups['kube-master'] + when: inventory_hostname in groups['kube_control_plane'] - import_tasks: sync_kube_node_certs.yml when: inventory_hostname in groups['k8s-cluster'] -# Issue admin certs to kube-master hosts +# Issue admin certs to kube_control_plane hosts - include_tasks: ../../../vault/tasks/shared/issue_cert.yml vars: issue_cert_common_name: "admin" issue_cert_copy_ca: "{{ item == kube_admin_certs_needed|first }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube - issue_cert_hosts: "{{ groups['kube-master'] }}" + issue_cert_hosts: "{{ groups['kube_control_plane'] }}" issue_cert_path: "{{ item }}" - issue_cert_role: kube-master + issue_cert_role: kube_control_plane issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_admin_certs_needed|d([]) }}" - when: inventory_hostname in groups['kube-master'] + when: inventory_hostname in groups['kube_control_plane'] - name: gen_certs_vault | Set fact about certificate alt names set_fact: kube_cert_alt_names: >- {{ - groups['kube-master'] + + groups['kube_control_plane'] + ['kubernetes.default.svc.'+cluster_name, 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] + ['localhost'] }} @@ -36,18 +36,18 @@ when: loadbalancer_apiserver is defined run_once: true -# Issue master components certs to kube-master hosts +# Issue master components certs to kube_control_plane hosts - include_tasks: ../../../vault/tasks/shared/issue_cert.yml vars: issue_cert_common_name: "kubernetes" issue_cert_alt_names: "{{ kube_cert_alt_names }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube - issue_cert_hosts: "{{ groups['kube-master'] }}" + issue_cert_hosts: "{{ groups['kube_control_plane'] }}" issue_cert_run_once: true issue_cert_ip_sans: >- [ - {%- for host in groups['kube-master'] -%} + {%- for host in groups['kube_control_plane'] -%} "{{ hostvars[host]['ansible_default_ipv4']['address'] }}", {%- if hostvars[host]['ip'] is defined -%} "{{ hostvars[host]['ip'] }}", @@ -61,11 +61,11 @@ "127.0.0.1","::1","{{ kube_apiserver_ip }}" ] issue_cert_path: "{{ item }}" - issue_cert_role: kube-master + issue_cert_role: kube_control_plane issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_master_components_certs_needed|d([]) }}" - when: inventory_hostname in groups['kube-master'] + when: inventory_hostname in groups['kube_control_plane'] notify: set secret_changed # Issue node certs to k8s-cluster nodes @@ -100,7 +100,7 @@ with_items: "{{ kube_proxy_certs_needed|d([]) }}" when: inventory_hostname in groups['k8s-cluster'] -# Issue front proxy cert to kube-master hosts +# Issue front proxy cert to kube_control_plane hosts - include_tasks: ../../../vault/tasks/shared/issue_cert.yml vars: issue_cert_common_name: "front-proxy-client" @@ -109,10 +109,10 @@ issue_cert_alt_names: "{{ kube_cert_alt_names }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube - issue_cert_hosts: "{{ groups['kube-master'] }}" + issue_cert_hosts: "{{ groups['kube_control_plane'] }}" issue_cert_ip_sans: >- [ - {%- for host in groups['kube-master'] -%} + {%- for host in groups['kube_control_plane'] -%} "{{ hostvars[host]['ansible_default_ipv4']['address'] }}", {%- if hostvars[host]['ip'] is defined -%} "{{ hostvars[host]['ip'] }}", @@ -130,5 +130,5 @@ issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}" - when: inventory_hostname in groups['kube-master'] + when: inventory_hostname in groups['kube_control_plane'] notify: set secret_changed diff --git a/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_master_certs.yml b/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_master_certs.yml index 50e1a01e784dde373beff14e9be5277690c12bbb..6db7c9ddfecc1d7f1f91cedb89628e457e80574c 100644 --- a/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_master_certs.yml +++ b/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_master_certs.yml @@ -29,7 +29,7 @@ sync_file: "{{ item }}" sync_file_dir: "{{ kube_cert_dir }}" sync_file_group: "{{ kube_cert_group }}" - sync_file_hosts: "{{ groups['kube-master'] }}" + sync_file_hosts: "{{ groups['kube_control_plane'] }}" sync_file_is_cert: true sync_file_owner: kube with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"] @@ -49,7 +49,7 @@ sync_file: front-proxy-ca.pem sync_file_dir: "{{ kube_cert_dir }}" sync_file_group: "{{ kube_cert_group }}" - sync_file_hosts: "{{ groups['kube-master'] }}" + sync_file_hosts: "{{ groups['kube_control_plane'] }}" sync_file_owner: kube - name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem @@ -61,7 +61,7 @@ sync_file: "{{ item }}" sync_file_dir: "{{ kube_cert_dir }}" sync_file_group: "{{ kube_cert_group }}" - sync_file_hosts: "{{ groups['kube-master'] }}" + sync_file_hosts: "{{ groups['kube_control_plane'] }}" sync_file_is_cert: true sync_file_owner: kube with_items: ["front-proxy-client.pem"] @@ -81,7 +81,7 @@ sync_file: ca.pem sync_file_dir: "{{ kube_cert_dir }}" sync_file_group: "{{ kube_cert_group }}" - sync_file_hosts: "{{ groups['kube-master'] }}" + sync_file_hosts: "{{ groups['kube_control_plane'] }}" sync_file_owner: kube - name: sync_kube_master_certs | Unset sync_file_results after ca.pem diff --git a/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_node_certs.yml b/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_node_certs.yml index eecb4cfdf784e2c61669fd4ff2803d83aa44205e..059359a5842d116916cda5f7932c16b589b4a67a 100644 --- a/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_node_certs.yml +++ b/contrib/vault/roles/kubernetes/vault-secrets/tasks/sync_kube_node_certs.yml @@ -14,7 +14,7 @@ sync_file_owner: kube with_items: "{{ kube_node_cert_list|default([]) }}" -- name: sync_kube_node_certs | Set facts for kube-master sync_file results +- name: sync_kube_node_certs | Set facts for kube_control_plane sync_file results set_fact: kube_node_certs_needed: "{{ kube_node_certs_needed|default([]) + [item.path] }}" with_items: "{{ sync_file_results|d([]) }}" diff --git a/contrib/vault/roles/vault/defaults/main.yml b/contrib/vault/roles/vault/defaults/main.yml index 0b27e03ff5befc5e3e84f48ac6f5574b101d119b..ececdc2ad42443217114d7697370ea499b4f73c0 100644 --- a/contrib/vault/roles/vault/defaults/main.yml +++ b/contrib/vault/roles/vault/defaults/main.yml @@ -166,16 +166,16 @@ vault_pki_mounts: description: "Kubernetes Root CA" cert_dir: "{{ kube_cert_dir }}" roles: - - name: kube-master - group: kube-master - password: "{{ lookup('password', credentials_dir + '/vault/kube-master.creds length=15') }}" + - name: kube_control_plane + group: kube_control_plane + password: "{{ lookup('password', credentials_dir + '/vault/kube_control_plane.creds length=15') }}" policy_rules: default role_options: allow_any_name: true enforce_hostnames: false organization: "system:masters" - name: front-proxy-client - group: kube-master + group: kube_control_plane password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}" policy_rules: default role_options: diff --git a/contrib/vault/roles/vault/tasks/bootstrap/main.yml b/contrib/vault/roles/vault/tasks/bootstrap/main.yml index e4e67d11fcd348d2640c6373cb3550eb79f7477c..419e22e1bd8660188f4a65b534213714f35d7835 100644 --- a/contrib/vault/roles/vault/tasks/bootstrap/main.yml +++ b/contrib/vault/roles/vault/tasks/bootstrap/main.yml @@ -51,7 +51,7 @@ gen_ca_mount_path: "/{{ vault_pki_mounts.vault.name }}" gen_ca_vault_headers: "{{ vault_headers }}" gen_ca_vault_options: "{{ vault_ca_options.vault }}" - gen_ca_copy_group: "kube-master" + gen_ca_copy_group: "kube_control_plane" when: >- inventory_hostname in groups.vault and not vault_cluster_is_initialized diff --git a/contrib/vault/roles/vault/tasks/bootstrap/sync_vault_certs.yml b/contrib/vault/roles/vault/tasks/bootstrap/sync_vault_certs.yml index cf499099a3ba5171ee5334afa8c0ac76a6de2f5d..28c438ca7674bacccbb86a268e3c311613cd41db 100644 --- a/contrib/vault/roles/vault/tasks/bootstrap/sync_vault_certs.yml +++ b/contrib/vault/roles/vault/tasks/bootstrap/sync_vault_certs.yml @@ -21,7 +21,7 @@ vars: sync_file: "ca.pem" sync_file_dir: "{{ vault_cert_dir }}" - sync_file_hosts: "{{ groups['kube-master'] }}" + sync_file_hosts: "{{ groups['kube_control_plane'] }}" sync_file_owner: vault sync_file_group: root sync_file_is_cert: false diff --git a/contrib/vault/roles/vault/tasks/cluster/main.yml b/contrib/vault/roles/vault/tasks/cluster/main.yml index 3ed23b2cc2d372fa56de9d64ca9b9007cdf99a9c..74b399c250da2305ae85f59be6c97b5ada5401e6 100644 --- a/contrib/vault/roles/vault/tasks/cluster/main.yml +++ b/contrib/vault/roles/vault/tasks/cluster/main.yml @@ -35,7 +35,7 @@ gen_ca_mount_path: "/{{ vault_pki_mounts.kube.name }}" gen_ca_vault_headers: "{{ vault_headers }}" gen_ca_vault_options: "{{ vault_ca_options.kube }}" - gen_ca_copy_group: "kube-master" + gen_ca_copy_group: "kube_control_plane" when: inventory_hostname in groups.vault - include_tasks: ../shared/auth_backend.yml diff --git a/docs/ansible.md b/docs/ansible.md index 848862ff5aa7006e0e7c0976491228ce44f34d97..202a793332e2dd05e194a46922a234d1f10de65a 100644 --- a/docs/ansible.md +++ b/docs/ansible.md @@ -5,7 +5,7 @@ The inventory is composed of 3 groups: * **kube-node** : list of kubernetes nodes where the pods will run. -* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run. +* **kube_control_plane** : list of servers where kubernetes control plane components (apiserver, scheduler, controller) will run. * **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose. Note: do not modify the children of _k8s-cluster_, like putting @@ -18,9 +18,9 @@ k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd When _kube-node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads. If you want it a standalone, make sure those groups do not intersect. -If you want the server to act both as master and node, the server must be defined -on both groups _kube-master_ and _kube-node_. If you want a standalone and -unschedulable master, the server must be defined only in the _kube-master_ and +If you want the server to act both as control-plane and node, the server must be defined +on both groups _kube_control_plane_ and _kube-node_. If you want a standalone and +unschedulable master, the server must be defined only in the _kube_control_plane_ and not _kube-node_. There are also two special groups: @@ -40,7 +40,7 @@ node4 ansible_host=95.54.0.15 ip=10.3.0.4 node5 ansible_host=95.54.0.16 ip=10.3.0.5 node6 ansible_host=95.54.0.17 ip=10.3.0.6 -[kube-master] +[kube_control_plane] node1 node2 @@ -58,7 +58,7 @@ node6 [k8s-cluster:children] kube-node -kube-master +kube_control_plane ``` ## Group vars and overriding variables precedence diff --git a/docs/aws.md b/docs/aws.md index c76d1cfdf43f1ed7e0ec84408ca7ccbe6755119f..0e680e0d12dd7abd943e2614ca955376bc2c5fa0 100644 --- a/docs/aws.md +++ b/docs/aws.md @@ -35,11 +35,11 @@ This will produce an inventory that is passed into Ansible that looks like the f ], "k8s-cluster": { "children": [ - "kube-master", + "kube_control_plane", "kube-node" ] }, - "kube-master": [ + "kube_control_plane": [ "ip-172-31-3-xxx.us-east-2.compute.internal" ], "kube-node": [ @@ -51,7 +51,7 @@ This will produce an inventory that is passed into Ansible that looks like the f Guide: - Create instances in AWS as needed. -- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd` +- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube-node`. You can also share roles like `kube_control_plane, etcd` - Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory. - Set the following AWS credentials and info as environment variables in your terminal: diff --git a/docs/calico.md b/docs/calico.md index 7e5f865681a0470f10d767e780d139aa141066b8..45d1b0e909a0a752129dff91c8d53194501c53de 100644 --- a/docs/calico.md +++ b/docs/calico.md @@ -122,7 +122,7 @@ recommended here: You need to edit your inventory and add: * `calico-rr` group with nodes in it. `calico-rr` can be combined with - `kube-node` and/or `kube-master`. `calico-rr` group also must be a child + `kube-node` and/or `kube_control_plane`. `calico-rr` group also must be a child group of `k8s-cluster` group. * `cluster_id` by route reflector node/group (see details [here](https://hub.docker.com/r/calico/routereflector/)) @@ -138,7 +138,7 @@ node3 ansible_ssh_host=10.210.1.13 ip=10.210.1.13 node4 ansible_ssh_host=10.210.1.14 ip=10.210.1.14 node5 ansible_ssh_host=10.210.1.15 ip=10.210.1.15 -[kube-master] +[kube_control_plane] node2 node3 @@ -155,7 +155,7 @@ node5 [k8s-cluster:children] kube-node -kube-master +kube_control_plane calico-rr [calico-rr] diff --git a/docs/downloads.md b/docs/downloads.md index 781543add9aec66eb368bf2f5361494a3d6551de..4369120d4dce9e2743f54e0a88a9262194ad26a1 100644 --- a/docs/downloads.md +++ b/docs/downloads.md @@ -8,7 +8,7 @@ Kubespray supports several download/upload modes. The default is: There is also a "pull once, push many" mode as well: -* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube-master`. +* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube_control_plane`. * Set ``download_localhost: True`` to make localhost the download delegate. This can be useful if cluster nodes cannot access external addresses. To use this requires that docker is installed and running on the ansible master and that the current user is either in the docker group or can do passwordless sudo, to be able to access docker. NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the docker instance on that node, instead of just the images required for that node. diff --git a/docs/getting-started.md b/docs/getting-started.md index 6b3cd18825911e5af9cfdec207fe53a2ba5d35ca..18a50e017f678773ec7e8fdd8c57f596e7ee4040 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -76,16 +76,16 @@ var in inventory. ## Connecting to Kubernetes -By default, Kubespray configures kube-master hosts with insecure access to +By default, Kubespray configures kube_control_plane hosts with insecure access to kube-apiserver via port 8080. A kubeconfig file is not necessary in this case, because kubectl will use <http://localhost:8080> to connect. The kubeconfig files -generated will point to localhost (on kube-masters) and kube-node hosts will +generated will point to localhost (on kube_control_planes) and kube-node hosts will connect either to a localhost nginx proxy or to a loadbalancer if configured. More details on this process are in the [HA guide](/docs/ha-mode.md). Kubespray permits connecting to the cluster remotely on any IP of any -kube-master host on port 6443 by default. However, this requires -authentication. One can get a kubeconfig from kube-master hosts +kube_control_plane host on port 6443 by default. However, this requires +authentication. One can get a kubeconfig from kube_control_plane hosts (see [below](#accessing-kubernetes-api)) or connect with a [username and password](/docs/vars.md#user-accounts). For more information on kubeconfig and accessing a Kubernetes cluster, refer to @@ -119,7 +119,7 @@ kubectl proxy ## Accessing Kubernetes API -The main client of Kubernetes is `kubectl`. It is installed on each kube-master +The main client of Kubernetes is `kubectl`. It is installed on each kube_control_plane host and can optionally be configured on your ansible host by setting `kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration: diff --git a/docs/ha-mode.md b/docs/ha-mode.md index ddf330715a61a66762c875cccbb8438643c6e0f5..668558f17ac478023635b711517ff2db0f169287 100644 --- a/docs/ha-mode.md +++ b/docs/ha-mode.md @@ -32,7 +32,7 @@ If you choose to NOT use the local internal loadbalancer, you will need to configure your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to a user and is not covered by ansible roles in Kubespray. By default, it only configures a non-HA endpoint, which points to the -`access_ip` or IP address of the first server node in the `kube-master` group. +`access_ip` or IP address of the first server node in the `kube_control_plane` group. It can also configure clients to use endpoints for a given loadbalancer type. The following diagram shows how traffic to the apiserver is directed. @@ -102,16 +102,16 @@ exclusive to `loadbalancer_apiserver_localhost`. Access API endpoints are evaluated automatically, as the following: -| Endpoint type | kube-master | non-master | external | -|------------------------------|------------------|-------------------------|-----------------------| -| Local LB (default) | `https://bip:sp` | `https://lc:nsp` | `https://m[0].aip:sp` | -| Local LB + Unmanaged here LB | `https://bip:sp` | `https://lc:nsp` | `https://ext` | -| External LB, no internal | `https://bip:sp` | `<https://lb:lp>` | `https://lb:lp` | -| No ext/int LB | `https://bip:sp` | `<https://m[0].aip:sp>` | `https://m[0].aip:sp` | +| Endpoint type | kube_control_plane | non-master | external | +|------------------------------|--------------------|-------------------------|-----------------------| +| Local LB (default) | `https://bip:sp` | `https://lc:nsp` | `https://m[0].aip:sp` | +| Local LB + Unmanaged here LB | `https://bip:sp` | `https://lc:nsp` | `https://ext` | +| External LB, no internal | `https://bip:sp` | `<https://lb:lp>` | `https://lb:lp` | +| No ext/int LB | `https://bip:sp` | `<https://m[0].aip:sp>` | `https://m[0].aip:sp` | Where: -* `m[0]` - the first node in the `kube-master` group; +* `m[0]` - the first node in the `kube_control_plane` group; * `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`; * `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray; * `lc` - localhost; diff --git a/docs/integration.md b/docs/integration.md index 4eab2535bda9592670032f0733680414978c4ed8..09c044fa89c25855d4a01705b2bf43197addb0e0 100644 --- a/docs/integration.md +++ b/docs/integration.md @@ -62,7 +62,7 @@ You could rename *all.yml* config to something else, i.e. *kubespray.yml* and cr kubemaster kubemaster-ha - [kube-master:children] + [kube_control_plane:children] kubemaster kubemaster-ha diff --git a/docs/large-deployments.md b/docs/large-deployments.md index 8b8ebef4e8f21d64d1369314d587009ceb7858f2..130bcf0e79eca8cdd18180dc31409e9154e1e311 100644 --- a/docs/large-deployments.md +++ b/docs/large-deployments.md @@ -39,7 +39,7 @@ For a large scaled deployments, consider the following configuration changes: * Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover from host/network interruption much quicker with calico-rr. Note that - calico-rr role must be on a host without kube-master or kube-node role (but + calico-rr role must be on a host without kube_control_plane or kube-node role (but etcd role is okay). * Check out the diff --git a/docs/nodes.md b/docs/nodes.md index 60844794da206636ec76a02b83460e78da594dd9..f369a5f3dd09b682fbbab2decebd1a45936d5932 100644 --- a/docs/nodes.md +++ b/docs/nodes.md @@ -2,9 +2,9 @@ Modified from [comments in #3471](https://github.com/kubernetes-sigs/kubespray/issues/3471#issuecomment-530036084) -## Limitation: Removal of first kube-master and etcd-master +## Limitation: Removal of first kube_control_plane and etcd-master -Currently you can't remove the first node in your kube-master and etcd-master list. If you still want to remove this node you have to: +Currently you can't remove the first node in your kube_control_plane and etcd-master list. If you still want to remove this node you have to: ### 1) Change order of current masters @@ -12,7 +12,7 @@ Modify the order of your master list by pushing your first entry to any other po ```yaml children: - kube-master: + kube_control_plane: hosts: node-1: node-2: @@ -33,7 +33,7 @@ change your inventory to: ```yaml children: - kube-master: + kube_control_plane: hosts: node-2: node-3: @@ -103,10 +103,10 @@ You need to make sure there are always an odd number of etcd nodes in the cluste ### 1) Add the new node running cluster.yml -Update the inventory and run `cluster.yml` passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`. +Update the inventory and run `cluster.yml` passing `--limit=etcd,kube_control_plane -e ignore_assert_errors=yes`. If the node you want to add as an etcd node is already a worker or master node in your cluster, you have to remove him first using `remove-node.yml`. -Run `upgrade-cluster.yml` also passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`. This is necessary to update all etcd configuration in the cluster. +Run `upgrade-cluster.yml` also passing `--limit=etcd,kube_control_plane -e ignore_assert_errors=yes`. This is necessary to update all etcd configuration in the cluster. At this point, you will have an even number of nodes. Everything should still be working, and you should only have problems if the cluster decides to elect a new etcd leader before you remove a node. diff --git a/docs/recover-control-plane.md b/docs/recover-control-plane.md index d24a4c73ab5e359e176ed119d213cf1f9bb879a8..b454310f0a3f88ef8f564eabb2d3ae3ad0cd9d55 100644 --- a/docs/recover-control-plane.md +++ b/docs/recover-control-plane.md @@ -5,8 +5,8 @@ To recover from broken nodes in the control plane use the "recover\-control\-pla * Backup what you can * Provision new nodes to replace the broken ones -* Place the surviving nodes of the control plane first in the "etcd" and "kube-master" groups -* Add the new nodes below the surviving control plane nodes in the "etcd" and "kube-master" groups +* Place the surviving nodes of the control plane first in the "etcd" and "kube\_control\_plane" groups +* Add the new nodes below the surviving control plane nodes in the "etcd" and "kube\_control\_plane" groups Examples of what broken means in this context: @@ -20,9 +20,9 @@ __Note that you need at least one functional node to be able to recover using th ## Runbook * Move any broken etcd nodes into the "broken\_etcd" group, make sure the "etcd\_member\_name" variable is set. -* Move any broken master nodes into the "broken\_kube-master" group. +* Move any broken master nodes into the "broken\_kube\_control\_plane" group. -Then run the playbook with ```--limit etcd,kube-master``` and increase the number of ETCD retries by setting ```-e etcd_retries=10``` or something even larger. The amount of retries required is difficult to predict. +Then run the playbook with ```--limit etcd,kube_control_plane``` and increase the number of ETCD retries by setting ```-e etcd_retries=10``` or something even larger. The amount of retries required is difficult to predict. When finished you should have a fully working control plane again. diff --git a/docs/test_cases.md b/docs/test_cases.md index 3b572d8b87eb74519e68d6901fecaa0b73ed9936..2a8f5e92008950419c83dc9b77bef1e1d328a8cd 100644 --- a/docs/test_cases.md +++ b/docs/test_cases.md @@ -3,10 +3,10 @@ There are four node layout types: `default`, `separate`, `ha`, and `scale`. `default` is a non-HA two nodes setup with one separate `kube-node` -and the `etcd` group merged with the `kube-master`. +and the `etcd` group merged with the `kube_control_plane`. `separate` layout is when there is only node of each type, which includes - a kube-master, kube-node, and etcd cluster member. + a kube_control_plane, kube-node, and etcd cluster member. `ha` layout consists of two etcd nodes, two masters and a single worker node, with role intersection. diff --git a/docs/upgrades.md b/docs/upgrades.md index b325b619fbf8341b64d734d0babc9907efb48075..0a3d6c779d0dd9d87744f58b94314408761ca512 100644 --- a/docs/upgrades.md +++ b/docs/upgrades.md @@ -41,7 +41,7 @@ The var ```-e upgrade_cluster_setup=true``` is needed to be set in order to migr Kubespray also supports cordon, drain and uncordoning of nodes when performing a cluster upgrade. There is a separate playbook used for this purpose. It is important to note that upgrade-cluster.yml can only be used for upgrading an -existing cluster. That means there must be at least 1 kube-master already +existing cluster. That means there must be at least 1 kube_control_plane already deployed. ```ShellSession diff --git a/docs/vars.md b/docs/vars.md index a97eee03513d781b128ea74fe25e07387a98c0a0..310f3f29fefce551d9a7945332e8080b2e4a5c12 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -36,7 +36,7 @@ Some variables of note include: * *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip and access_ip are undefined * *loadbalancer_apiserver* - If defined, all hosts will connect to this - address instead of localhost for kube-masters and kube-master[0] for + address instead of localhost for kube_control_planes and kube_control_plane[0] for kube-nodes. See more details in the [HA guide](/docs/ha-mode.md). * *loadbalancer_apiserver_localhost* - makes all hosts to connect to diff --git a/extra_playbooks/migrate_openstack_provider.yml b/extra_playbooks/migrate_openstack_provider.yml index 0e1584470d77d1c3b0f45dbece904e3d8fe0f16a..9d4adbaa948bed92fbca8a1c5aaa18eec3f8ec94 100644 --- a/extra_playbooks/migrate_openstack_provider.yml +++ b/extra_playbooks/migrate_openstack_provider.yml @@ -1,5 +1,5 @@ --- -- hosts: kube-node:kube-master +- hosts: kube-node:kube_control_plane tasks: - name: Remove old cloud provider config file: @@ -7,7 +7,7 @@ state: absent with_items: - /etc/kubernetes/cloud_config -- hosts: kube-master[0] +- hosts: kube_control_plane[0] tasks: - name: Include kubespray-default variables include_vars: ../roles/kubespray-defaults/defaults/main.yaml diff --git a/extra_playbooks/upgrade-only-k8s.yml b/extra_playbooks/upgrade-only-k8s.yml index 89e8ed1dcf3d2d471455b4923f52fe5127a3c44c..5bdbd012d926ec89222f9182c3a35fe8e5818876 100644 --- a/extra_playbooks/upgrade-only-k8s.yml +++ b/extra_playbooks/upgrade-only-k8s.yml @@ -34,7 +34,7 @@ - { role: kubernetes/preinstall, tags: preinstall } - name: Handle upgrades to master components first to maintain backwards compat. - hosts: kube-master + hosts: kube_control_plane any_errors_fatal: "{{ any_errors_fatal | default(true) }}" serial: 1 roles: @@ -47,7 +47,7 @@ - { role: upgrade/post-upgrade, tags: post-upgrade } - name: Finally handle worker upgrades, based on given batch size - hosts: kube-node:!kube-master + hosts: kube-node:!kube_control_plane any_errors_fatal: "{{ any_errors_fatal | default(true) }}" serial: "{{ serial | default('20%') }}" roles: diff --git a/inventory/local/hosts.ini b/inventory/local/hosts.ini index 7834d27c0ea0a42978004554504316cba41214ad..551941080856e371c7b7ce0f2922e9aeb3fbf488 100644 --- a/inventory/local/hosts.ini +++ b/inventory/local/hosts.ini @@ -1,6 +1,6 @@ node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases -[kube-master] +[kube_control_plane] node1 [etcd] @@ -11,5 +11,5 @@ node1 [k8s-cluster:children] kube-node -kube-master +kube_control_plane calico-rr diff --git a/inventory/sample/inventory.ini b/inventory/sample/inventory.ini index 6babb4e9eed6bed8457e2cf99a27bd624adf52eb..b450bc068df6c8ff6405616e6f861b7191a13011 100644 --- a/inventory/sample/inventory.ini +++ b/inventory/sample/inventory.ini @@ -13,7 +13,7 @@ # [bastion] # bastion ansible_host=x.x.x.x ansible_user=some_user -[kube-master] +[kube_control_plane] # node1 # node2 # node3 @@ -33,6 +33,6 @@ [calico-rr] [k8s-cluster:children] -kube-master +kube_control_plane kube-node calico-rr diff --git a/recover-control-plane.yml b/recover-control-plane.yml index 26be30769ada910a546d1042c0a975e6c403640b..c2b28d093ce016b30cdecb7f5141a163eeac3eb1 100644 --- a/recover-control-plane.yml +++ b/recover-control-plane.yml @@ -2,6 +2,15 @@ - name: Check ansible version import_playbook: ansible_version.yml +- name: Add kube-master nodes to kube_control_plane + # This is for old inventory which contains kube-master instead of kube_control_plane + hosts: kube-master + gather_facts: false + tasks: + - name: add nodes to kube_control_plane group + group_by: + key: 'kube_control_plane' + - hosts: bastion[0] gather_facts: False environment: "{{ proxy_disable_env }}" @@ -15,7 +24,7 @@ - { role: kubespray-defaults} - { role: recover_control_plane/etcd } -- hosts: "{{ groups['kube-master'] | first }}" +- hosts: "{{ groups['kube_control_plane'] | first }}" environment: "{{ proxy_disable_env }}" roles: - { role: kubespray-defaults} @@ -23,7 +32,7 @@ - include: cluster.yml -- hosts: "{{ groups['kube-master'] }}" +- hosts: "{{ groups['kube_control_plane'] }}" environment: "{{ proxy_disable_env }}" roles: - { role: kubespray-defaults} diff --git a/remove-node.yml b/remove-node.yml index b78b71907393f6769c4c69caa637331e56d3a10e..27c886035bea116cc0e3429102dc03fd167b6eec 100644 --- a/remove-node.yml +++ b/remove-node.yml @@ -2,6 +2,15 @@ - name: Check ansible version import_playbook: ansible_version.yml +- name: Add kube-master nodes to kube_control_plane + # This is for old inventory which contains kube-master instead of kube_control_plane + hosts: kube-master + gather_facts: false + tasks: + - name: add nodes to kube_control_plane group + group_by: + key: 'kube_control_plane' + - hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}" gather_facts: no environment: "{{ proxy_disable_env }}" @@ -17,7 +26,7 @@ msg: "Delete nodes confirmation failed" when: delete_nodes_confirmation != "yes" -- hosts: kube-master[0] +- hosts: kube_control_plane[0] gather_facts: no environment: "{{ proxy_disable_env }}" roles: @@ -35,7 +44,7 @@ - { role: reset, tags: reset, when: reset_nodes|default(True)|bool } # Currently cannot remove first master or etcd -- hosts: "{{ node | default('kube-master[1:]:etcd[1:]') }}" +- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}" gather_facts: no environment: "{{ proxy_disable_env }}" roles: diff --git a/reset.yml b/reset.yml index e053e101c8543e0d8852db094c98586200576eb1..81f2389d4935affb23d53a31ee9d01efcf58a350 100644 --- a/reset.yml +++ b/reset.yml @@ -2,6 +2,15 @@ - name: Check ansible version import_playbook: ansible_version.yml +- name: Add kube-master nodes to kube_control_plane + # This is for old inventory which contains kube-master instead of kube_control_plane + hosts: kube-master + gather_facts: false + tasks: + - name: add nodes to kube_control_plane group + group_by: + key: 'kube_control_plane' + - hosts: bastion[0] gather_facts: False environment: "{{ proxy_disable_env }}" diff --git a/roles/container-engine/containerd/molecule/default/molecule.yml b/roles/container-engine/containerd/molecule/default/molecule.yml index b49d73ce0da23d876884226bcef058c004854feb..48f7b5dd027ec2266ca51eb25ae9a5098430caed 100644 --- a/roles/container-engine/containerd/molecule/default/molecule.yml +++ b/roles/container-engine/containerd/molecule/default/molecule.yml @@ -12,7 +12,7 @@ platforms: cpus: 2 memory: 1024 groups: - - kube-master + - kube_control_plane provisioner: name: ansible env: diff --git a/roles/container-engine/cri-o/molecule/default/molecule.yml b/roles/container-engine/cri-o/molecule/default/molecule.yml index a6c36acbac7848df900edc278794547404bcc540..574d491395f0dd6a2dd42b2a8f263d3f3c7201ea 100644 --- a/roles/container-engine/cri-o/molecule/default/molecule.yml +++ b/roles/container-engine/cri-o/molecule/default/molecule.yml @@ -12,25 +12,25 @@ platforms: cpus: 2 memory: 1024 groups: - - kube-master + - kube_control_plane - name: centos7 box: centos/7 cpus: 2 memory: 1024 groups: - - kube-master + - kube_control_plane - name: centos8 box: centos/8 cpus: 2 memory: 1024 groups: - - kube-master + - kube_control_plane - name: fedora box: fedora/33-cloud-base cpus: 2 memory: 1024 groups: - - kube-master + - kube_control_plane provisioner: name: ansible env: diff --git a/roles/container-engine/kata-containers/molecule/default/molecule.yml b/roles/container-engine/kata-containers/molecule/default/molecule.yml index 8cccf8dfc0d3c422f81931b398e38e9922686acb..164a4708391a5367191baa934c6592cf49b38ed7 100644 --- a/roles/container-engine/kata-containers/molecule/default/molecule.yml +++ b/roles/container-engine/kata-containers/molecule/default/molecule.yml @@ -15,14 +15,14 @@ platforms: memory: 1024 nested: true groups: - - kube-master + - kube_control_plane - name: ubuntu20 box: generic/ubuntu2004 cpus: 1 memory: 1024 nested: true groups: - - kube-master + - kube_control_plane provisioner: name: ansible env: diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index e3571145951997104b6829d848b27aaaed2cf686..67450f446add5e2bd5c5989d4a088efadb8c4ebd 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -30,7 +30,7 @@ download_container: true # if this is set to true, uses the localhost for download_run_once mode # (requires docker and sudo to access docker). You may want this option for # local caching of docker images or for Flatcar Container Linux by Kinvolk cluster nodes. -# Otherwise, uses the first node in the kube-master group to store images +# Otherwise, uses the first node in the kube_control_plane group to store images # in the download_run_once mode. download_localhost: false @@ -42,8 +42,8 @@ download_always_pull: false # SSL validation of get_url module. Note that kubespray will still be performing checksum validation. download_validate_certs: true -# Use the first kube-master if download_localhost is not set -download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube-master'][0] }}{% endif %}" +# Use the first kube_control_plane if download_localhost is not set +download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube_control_plane'][0] }}{% endif %}" # Arch of Docker images and needed packages image_arch: "{{host_architecture | default('amd64')}}" @@ -733,7 +733,7 @@ downloads: owner: "root" mode: "0755" groups: - - kube-master + - kube_control_plane crictl: file: true @@ -883,7 +883,7 @@ downloads: owner: "root" mode: "0755" groups: - - kube-master + - kube_control_plane weave_kube: enabled: "{{ kube_network_plugin == 'weave' }}" @@ -973,7 +973,7 @@ downloads: tag: "{{ coredns_image_tag }}" sha256: "{{ coredns_digest_checksum|default(None) }}" groups: - - kube-master + - kube_control_plane nodelocaldns: enabled: "{{ enable_nodelocaldns }}" @@ -991,7 +991,7 @@ downloads: tag: "{{ dnsautoscaler_image_tag }}" sha256: "{{ dnsautoscaler_digest_checksum|default(None) }}" groups: - - kube-master + - kube_control_plane testbox: enabled: false @@ -1011,7 +1011,7 @@ downloads: owner: "root" mode: "0755" groups: - - kube-master + - kube_control_plane registry: enabled: "{{ registry_enabled }}" @@ -1038,7 +1038,7 @@ downloads: tag: "{{ metrics_server_image_tag }}" sha256: "{{ metrics_server_digest_checksum|default(None) }}" groups: - - kube-master + - kube_control_plane addon_resizer: # Currently addon_resizer is only used by metrics server @@ -1048,7 +1048,7 @@ downloads: tag: "{{ addon_resizer_image_tag }}" sha256: "{{ addon_resizer_digest_checksum|default(None) }}" groups: - - kube-master + - kube_control_plane local_volume_provisioner: enabled: "{{ local_volume_provisioner_enabled }}" @@ -1219,7 +1219,7 @@ downloads: tag: "{{ dashboard_image_tag }}" sha256: "{{ dashboard_digest_checksum|default(None) }}" groups: - - kube-master + - kube_control_plane dashboard_metrics_scrapper: enabled: "{{ dashboard_enabled }}" @@ -1228,7 +1228,7 @@ downloads: tag: "{{ dashboard_metrics_scraper_tag }}" sha256: "{{ dashboard_digest_checksum|default(None) }}" groups: - - kube-master + - kube_control_plane download_defaults: container: false diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 2fa45929f30f215790a4a7415f5ec72bfdf453f9..3942283547e49aeb9bf3e6db1d42310799868dcc 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -18,7 +18,7 @@ include_tasks: prep_kubeadm_images.yml when: - not skip_downloads|default(false) - - inventory_hostname in groups['kube-master'] + - inventory_hostname in groups['kube_control_plane'] tags: - download - upload diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml index 8d30208757d64da22e2177db7f420b42918daff7..2f774cfcd8162c79711a390cc078f70597049350 100644 --- a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml +++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml @@ -6,7 +6,7 @@ ignore_errors: true when: - dns_mode in ['coredns', 'coredns_dual'] - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Apps | Delete kubeadm CoreDNS kube: @@ -17,7 +17,7 @@ state: absent when: - dns_mode in ['coredns', 'coredns_dual'] - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - createdby_annotation.stdout != 'kubespray' - name: Kubernetes Apps | Delete kubeadm Kube-DNS service @@ -29,4 +29,4 @@ state: absent when: - dns_mode in ['coredns', 'coredns_dual'] - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/ansible/tasks/coredns.yml b/roles/kubernetes-apps/ansible/tasks/coredns.yml index bb959966b7a0d8ce6db2b167c1ff7313437d6140..0bbb269a0a89c7e05c4f813b4ec7951b30c53631 100644 --- a/roles/kubernetes-apps/ansible/tasks/coredns.yml +++ b/roles/kubernetes-apps/ansible/tasks/coredns.yml @@ -20,7 +20,7 @@ clusterIP: "{{ skydns_server }}" when: - dns_mode in ['coredns', 'coredns_dual'] - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - coredns @@ -38,6 +38,6 @@ coredns_ordinal_suffix: "-secondary" when: - dns_mode == 'coredns_dual' - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - coredns diff --git a/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/roles/kubernetes-apps/ansible/tasks/dashboard.yml index ba6c13b2b71761a5bb3b1d10f9b5c0f1c2b78509..94c041d14ff74c035f9bf0bfb99888c6a0c9e9bf 100644 --- a/roles/kubernetes-apps/ansible/tasks/dashboard.yml +++ b/roles/kubernetes-apps/ansible/tasks/dashboard.yml @@ -6,7 +6,7 @@ with_items: - { file: dashboard.yml, type: deploy, name: kubernetes-dashboard } register: manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Apps | Start dashboard kube: @@ -17,4 +17,4 @@ filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 25ffa72703b9155df40d15613439dbe09ed8ca95..75ee477b0aa9c4eb35d7adc5c6caceef99c99999 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -9,12 +9,12 @@ until: result.status == 200 retries: 20 delay: 1 - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Apps | Cleanup DNS import_tasks: cleanup_dns.yml when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - upgrade - coredns @@ -24,7 +24,7 @@ import_tasks: "coredns.yml" when: - dns_mode in ['coredns', 'coredns_dual'] - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - coredns @@ -32,7 +32,7 @@ import_tasks: "nodelocaldns.yml" when: - enable_nodelocaldns - - inventory_hostname == groups['kube-master'] | first + - inventory_hostname == groups['kube_control_plane'] | first tags: - nodelocaldns @@ -50,7 +50,7 @@ - "{{ nodelocaldns_manifests.results | default({}) }}" when: - dns_mode != 'none' - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped register: resource_result until: resource_result is succeeded diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml index 81121c53bc1794d8f4771d4ed12f00aab7f002ea..46252929a103d8313acaa2a402e8a6e5de2445f1 100644 --- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml +++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -28,7 +28,7 @@ with_items: "{{ netchecker_templates }}" register: manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Apps | Start Netchecker Resources kube: @@ -39,4 +39,4 @@ filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] and not item is skipped + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml index 378dbc92f2e08e08f0e8b30c1ee7dd711a06a87d..ce79ceed4b80e81a49293ed2f3a75106c910278c 100644 --- a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml +++ b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml @@ -10,7 +10,7 @@ secondaryclusterIP: "{{ skydns_server_secondary }}" when: - enable_nodelocaldns - - inventory_hostname == groups['kube-master'] | first + - inventory_hostname == groups['kube_control_plane'] | first tags: - nodelocaldns - coredns @@ -39,7 +39,7 @@ {%- endif -%} when: - enable_nodelocaldns - - inventory_hostname == groups['kube-master'] | first + - inventory_hostname == groups['kube_control_plane'] | first tags: - nodelocaldns - coredns diff --git a/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml b/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml index cec7deaca8a42d5b97ffa5ead2cd5faf08cb9638..ecf6f511d8fdbf0261db8b439e560dbbe308d005 100644 --- a/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml +++ b/roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml @@ -7,7 +7,7 @@ template: src: controller-manager-config.yml.j2 dest: "{{ kube_config_dir }}/controller-manager-config.yml" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: oci - name: "OCI Cloud Controller | Slurp Configuration" @@ -18,14 +18,14 @@ - name: "OCI Cloud Controller | Encode Configuration" set_fact: controller_manager_config_base64: "{{ controller_manager_config.content }}" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: oci - name: "OCI Cloud Controller | Generate Manifests" template: src: oci-cloud-provider.yml.j2 dest: "{{ kube_config_dir }}/oci-cloud-provider.yml" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: oci - name: "OCI Cloud Controller | Apply Manifests" @@ -33,5 +33,5 @@ kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/oci-cloud-provider.yml" state: latest - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: oci diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index 91955ea8adca0871e20e4e44544c0694e414e0be..2f5f110af62d1432897e0b69fd7ee2703c9aa92c 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -9,14 +9,14 @@ until: result.status == 200 retries: 10 delay: 6 - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Apps | Check AppArmor status command: which apparmor_parser register: apparmor_status when: - podsecuritypolicy_enabled - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] failed_when: false - name: Kubernetes Apps | Set apparmor_enabled @@ -24,7 +24,7 @@ apparmor_enabled: "{{ apparmor_status.rc == 0 }}" when: - podsecuritypolicy_enabled - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Apps | Render templates for PodSecurityPolicy template: @@ -37,7 +37,7 @@ - {file: psp-crb.yml, type: rolebinding, name: psp-crb} when: - podsecuritypolicy_enabled - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy kube: @@ -52,7 +52,7 @@ delay: 6 with_items: "{{ psp_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped loop_control: label: "{{ item.item.file }}" @@ -64,7 +64,7 @@ register: node_crb_manifest when: - rbac_enabled - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Apply workaround to allow all nodes with cert O=system:nodes to register kube: @@ -80,7 +80,7 @@ when: - rbac_enabled - node_crb_manifest.changed - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet template: @@ -90,7 +90,7 @@ when: - rbac_enabled - kubelet_authorization_mode_webhook - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: node-webhook - name: Apply webhook ClusterRole @@ -104,7 +104,7 @@ - rbac_enabled - kubelet_authorization_mode_webhook - node_webhook_cr_manifest.changed - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: node-webhook - name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole @@ -115,7 +115,7 @@ when: - rbac_enabled - kubelet_authorization_mode_webhook - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: node-webhook - name: Grant system:nodes the webhook ClusterRole @@ -129,7 +129,7 @@ - rbac_enabled - kubelet_authorization_mode_webhook - node_webhook_crb_manifest.changed - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: node-webhook - include_tasks: oci.yml @@ -140,7 +140,7 @@ - name: PriorityClass | Copy k8s-cluster-critical-pc.yml file copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml - when: inventory_hostname == groups['kube-master']|last + when: inventory_hostname == groups['kube_control_plane']|last - name: PriorityClass | Create k8s-cluster-critical kube: @@ -149,4 +149,4 @@ resource: "PriorityClass" filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml" state: latest - when: inventory_hostname == groups['kube-master']|last + when: inventory_hostname == groups['kube_control_plane']|last diff --git a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml index 22b39b3d407b238c7f449add021aa38b030ab7dc..72142eae63d6c85ff8a610e88219dd42e3189879 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml @@ -6,7 +6,7 @@ when: - cloud_provider is defined - cloud_provider == 'oci' - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Apply OCI RBAC kube: @@ -15,4 +15,4 @@ when: - cloud_provider is defined - cloud_provider == 'oci' - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml index fd3ea42fa67b04eb4b2f322035befaffc9ffc67f..75a0b8a10a1cd94bc01cae2f149bd01510756d46 100644 --- a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml +++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml @@ -38,7 +38,7 @@ - { name: k8s-device-plugin-nvidia-daemonset, file: k8s-device-plugin-nvidia-daemonset.yml, type: daemonset } register: container_engine_accelerator_manifests when: - - inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container + - inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container - name: Container Engine Acceleration Nvidia GPU | Apply manifests for nvidia accelerators kube: @@ -51,4 +51,4 @@ with_items: - "{{ container_engine_accelerator_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container and nvidia_driver_install_supported + - inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container and nvidia_driver_install_supported diff --git a/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml b/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml index 637d7beefebc6ef063c46df1f8adf76768d208b1..46384d281c5f56024176d9821ee562e0e38ad1b1 100644 --- a/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml +++ b/roles/kubernetes-apps/container_runtimes/crun/tasks/main.yaml @@ -6,7 +6,7 @@ dest: "{{ kube_config_dir }}/runtimeclass-crun.yml" mode: "0664" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: crun | Apply manifests kube: @@ -16,4 +16,4 @@ filename: "{{ kube_config_dir }}/runtimeclass-crun.yml" state: "latest" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml b/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml index 34478b9900ca672378bb02e2143aca964109786a..3fb059fe65a370d27d7b3211d3079f03febf7c2b 100644 --- a/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml +++ b/roles/kubernetes-apps/container_runtimes/kata_containers/tasks/main.yaml @@ -20,7 +20,7 @@ with_items: "{{ kata_containers_templates }}" register: kata_containers_manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kata Containers | Apply manifests kube: @@ -31,4 +31,4 @@ state: "latest" with_items: "{{ kata_containers_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml b/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml index 04bb9fd4d3796c5cbc00b3b9b5d741ce857613a8..7b2f41a4cc38ef90a0550d267e708feb3ea765a9 100644 --- a/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml +++ b/roles/kubernetes-apps/csi_driver/aws_ebs/tasks/main.yml @@ -9,7 +9,7 @@ - {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice.yml} - {name: aws-ebs-csi-nodeservice, file: aws-ebs-csi-nodeservice.yml} register: aws_csi_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: aws-ebs-csi-driver - name: AWS CSI Driver | Apply Manifests @@ -20,7 +20,7 @@ with_items: - "{{ aws_csi_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped loop_control: label: "{{ item.item.file }}" diff --git a/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml b/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml index e33ca292f01d8aba3111895bfb7ff096a73681ca..b8bbd7113e513adca3dbafc09f9b246c30bb4bb3 100644 --- a/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml +++ b/roles/kubernetes-apps/csi_driver/azuredisk/tasks/main.yml @@ -8,14 +8,14 @@ dest: "{{ kube_config_dir }}/azure_csi_cloud_config" group: "{{ kube_cert_group }}" mode: 0640 - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: azure-csi-driver - name: Azure CSI Driver | Get base64 cloud-config slurp: src: "{{ kube_config_dir }}/azure_csi_cloud_config" register: cloud_config_secret - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: azure-csi-driver - name: Azure CSI Driver | Generate Manifests @@ -30,7 +30,7 @@ - {name: azure-csi-azuredisk-node, file: azure-csi-azuredisk-node.yml} - {name: azure-csi-node-info-crd.yml.j2, file: azure-csi-node-info-crd.yml} register: azure_csi_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: azure-csi-driver - name: Azure CSI Driver | Apply Manifests @@ -41,7 +41,7 @@ with_items: - "{{ azure_csi_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped loop_control: label: "{{ item.item.file }}" diff --git a/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml b/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml index 14b8275135ef2d470d7c59ad8d29fc111630da9b..47ba0a1d0d7522a6cac3104bf4ed4417ffd79b7f 100644 --- a/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml +++ b/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml @@ -20,14 +20,14 @@ dest: "{{ kube_config_dir }}/cinder_cloud_config" group: "{{ kube_cert_group }}" mode: 0640 - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: cinder-csi-driver - name: Cinder CSI Driver | Get base64 cloud-config slurp: src: "{{ kube_config_dir }}/cinder_cloud_config" register: cloud_config_secret - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: cinder-csi-driver - name: Cinder CSI Driver | Generate Manifests @@ -43,7 +43,7 @@ - {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin.yml} - {name: cinder-csi-poddisruptionbudget, file: cinder-csi-poddisruptionbudget.yml} register: cinder_csi_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: cinder-csi-driver - name: Cinder CSI Driver | Apply Manifests @@ -54,7 +54,7 @@ with_items: - "{{ cinder_csi_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped loop_control: label: "{{ item.item.file }}" diff --git a/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml b/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml index 85d637efbd25e4ab028e57e3e43d98dc39306548..029d7ffe50dba19fc65cb5b488f9480c56895938 100644 --- a/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml +++ b/roles/kubernetes-apps/csi_driver/csi_crd/tasks/main.yml @@ -8,7 +8,7 @@ - {name: volumesnapshotcontents, file: volumesnapshotcontents.yml} - {name: volumesnapshots, file: volumesnapshots.yml} register: csi_crd_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: csi-driver - name: CSI CRD | Apply Manifests @@ -20,7 +20,7 @@ with_items: - "{{ csi_crd_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped loop_control: label: "{{ item.item.file }}" diff --git a/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml b/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml index 7e43158541baa7e7221a2a5dd55eb384c691b434..05961ef56d2e041564cc10f175541ff75d9c516b 100644 --- a/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml +++ b/roles/kubernetes-apps/csi_driver/gcp_pd/tasks/main.yml @@ -11,14 +11,14 @@ dest: "{{ kube_config_dir }}/cloud-sa.json" group: "{{ kube_cert_group }}" mode: 0640 - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: gcp-pd-csi-driver - name: GCP PD CSI Driver | Get base64 cloud-sa.json slurp: src: "{{ kube_config_dir }}/cloud-sa.json" register: gcp_cred_secret - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: gcp-pd-csi-driver - name: GCP PD CSI Driver | Generate Manifests @@ -31,7 +31,7 @@ - {name: gcp-pd-csi-controller, file: gcp-pd-csi-controller.yml} - {name: gcp-pd-csi-node, file: gcp-pd-csi-node.yml} register: gcp_pd_csi_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: gcp-pd-csi-driver - name: GCP PD CSI Driver | Apply Manifests @@ -42,7 +42,7 @@ with_items: - "{{ gcp_pd_csi_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped loop_control: label: "{{ item.item.file }}" diff --git a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml index 4e341b2aff05cb89265bafa2eca924c585331c26..26e8751ac76a2bfdc1a4d7d0f1cb871f840e8fe2 100644 --- a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml +++ b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml @@ -9,7 +9,7 @@ mode: 0640 with_items: - vsphere-csi-cloud-config - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: vsphere-csi-driver - name: vSphere CSI Driver | Generate Manifests @@ -21,13 +21,13 @@ - vsphere-csi-controller-ss.yml - vsphere-csi-node.yml register: vsphere_csi_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: vsphere-csi-driver - name: vSphere CSI Driver | Generate a CSI secret manifest command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml" register: vsphere_csi_secret_manifest - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] no_log: true tags: vsphere-csi-driver @@ -35,7 +35,7 @@ command: cmd: "{{ bin_dir }}/kubectl apply -f -" stdin: "{{ vsphere_csi_secret_manifest.stdout }}" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] no_log: true tags: vsphere-csi-driver @@ -47,7 +47,7 @@ with_items: - "{{ vsphere_csi_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped loop_control: label: "{{ item.item }}" diff --git a/roles/kubernetes-apps/external_cloud_controller/meta/main.yml b/roles/kubernetes-apps/external_cloud_controller/meta/main.yml index b7d1cc6986ccfcf2130f09a68b341a0a1f79ee6d..a75a42995feb6ad048130356a3875ea0965bf9b7 100644 --- a/roles/kubernetes-apps/external_cloud_controller/meta/main.yml +++ b/roles/kubernetes-apps/external_cloud_controller/meta/main.yml @@ -6,7 +6,7 @@ dependencies: - cloud_provider == "external" - external_cloud_provider is defined - external_cloud_provider == "openstack" - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - external-cloud-controller - external-openstack @@ -16,7 +16,7 @@ dependencies: - cloud_provider == "external" - external_cloud_provider is defined - external_cloud_provider == "vsphere" - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - external-cloud-controller - external-vsphere diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml index 220d3916857abe32a9dc2bdf8f11e1daa4c0be41..7c89fdbdffde351ebfa7e94d9370d1928618c89b 100644 --- a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml +++ b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml @@ -20,14 +20,14 @@ dest: "{{ kube_config_dir }}/external_openstack_cloud_config" group: "{{ kube_cert_group }}" mode: 0640 - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: external-openstack - name: External OpenStack Cloud Controller | Get base64 cloud-config slurp: src: "{{ kube_config_dir }}/external_openstack_cloud_config" register: external_openstack_cloud_config_secret - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: external-openstack - name: External OpenStack Cloud Controller | Generate Manifests @@ -42,7 +42,7 @@ - {name: external-openstack-cloud-controller-manager-role-bindings, file: external-openstack-cloud-controller-manager-role-bindings.yml} - {name: external-openstack-cloud-controller-manager-ds, file: external-openstack-cloud-controller-manager-ds.yml} register: external_openstack_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: external-openstack - name: External OpenStack Cloud Controller | Apply Manifests @@ -53,7 +53,7 @@ with_items: - "{{ external_openstack_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped loop_control: label: "{{ item.item.file }}" diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml b/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml index 0dbf3f7dca4652ca217ab1d943159b9ded6229aa..86e16dbe7bec474f65b1763b5727e67d995ccc77 100644 --- a/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml +++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/tasks/main.yml @@ -9,7 +9,7 @@ mode: 0640 with_items: - external-vsphere-cpi-cloud-config - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: external-vsphere - name: External vSphere Cloud Controller | Generate Manifests @@ -22,20 +22,20 @@ - external-vsphere-cloud-controller-manager-role-bindings.yml - external-vsphere-cloud-controller-manager-ds.yml register: external_vsphere_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: external-vsphere - name: External vSphere Cloud Provider Interface | Create a CPI configMap manifest command: "{{ bin_dir }}/kubectl create configmap cloud-config --from-file=vsphere.conf={{ kube_config_dir }}/external-vsphere-cpi-cloud-config -n kube-system --dry-run --save-config -o yaml" register: external_vsphere_configmap_manifest - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: external-vsphere - name: External vSphere Cloud Provider Interface | Apply a CPI configMap manifest command: cmd: "{{ bin_dir }}/kubectl apply -f -" stdin: "{{ external_vsphere_configmap_manifest.stdout }}" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: external-vsphere - name: External vSphere Cloud Controller | Apply Manifests @@ -46,7 +46,7 @@ with_items: - "{{ external_vsphere_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped loop_control: label: "{{ item.item }}" diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml index c93ecfde79b35bf8e7e4016dd59cd67f3f442e2b..15b2ecf2b0eedc661846ebfffeaecf1bf6246221 100644 --- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml @@ -5,7 +5,7 @@ path: "{{ kube_config_dir }}/addons/cephfs_provisioner" state: absent when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - upgrade @@ -14,7 +14,7 @@ {{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }} ignore_errors: yes when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - upgrade @@ -23,7 +23,7 @@ {{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }} ignore_errors: yes when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - upgrade @@ -35,7 +35,7 @@ group: root mode: 0755 when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: CephFS Provisioner | Templates list set_fact: @@ -65,7 +65,7 @@ dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}" with_items: "{{ cephfs_provisioner_templates }}" register: cephfs_provisioner_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: CephFS Provisioner | Apply manifests kube: @@ -76,4 +76,4 @@ filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}" state: "latest" with_items: "{{ cephfs_provisioner_manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml index a723d24f8ebe0cdf5ada01e10113fb9a8ce5365f..1c3606882b9b8623562efb429108a25336bbe4b8 100644 --- a/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml @@ -7,7 +7,7 @@ group: root mode: 0755 when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Local Path Provisioner | Create claim root dir file: @@ -42,7 +42,7 @@ dest: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.file }}" with_items: "{{ local_path_provisioner_templates }}" register: local_path_provisioner_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Local Path Provisioner | Apply manifests kube: @@ -53,4 +53,4 @@ filename: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.item.file }}" state: "latest" with_items: "{{ local_path_provisioner_manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml index b4c4f68eb963a7777acff17d79d2128e2774ab02..88a1788252b22a2240688f46a4006615286df8f7 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml @@ -42,7 +42,7 @@ dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}" with_items: "{{ local_volume_provisioner_templates }}" register: local_volume_provisioner_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Local Volume Provisioner | Apply manifests kube: @@ -53,6 +53,6 @@ filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}" state: "latest" with_items: "{{ local_volume_provisioner_manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] loop_control: label: "{{ item.item.file }}" diff --git a/roles/kubernetes-apps/external_provisioner/meta/main.yml b/roles/kubernetes-apps/external_provisioner/meta/main.yml index 19fe8ba4876021c6201003ea0c87b71655a56f54..13bc8b6e8e91a41be5b496fb361deac438ac8688 100644 --- a/roles/kubernetes-apps/external_provisioner/meta/main.yml +++ b/roles/kubernetes-apps/external_provisioner/meta/main.yml @@ -3,7 +3,7 @@ dependencies: - role: kubernetes-apps/external_provisioner/local_volume_provisioner when: - local_volume_provisioner_enabled - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - apps - local-volume-provisioner diff --git a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml index 7c09168b2b77350bf883846cea893770504c81f8..e25e0b1437346f81a31d34fc90e254ae811428ca 100644 --- a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml @@ -5,7 +5,7 @@ path: "{{ kube_config_dir }}/addons/rbd_provisioner" state: absent when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - upgrade @@ -14,7 +14,7 @@ {{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }} ignore_errors: yes when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - upgrade @@ -23,7 +23,7 @@ {{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }} ignore_errors: yes when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - upgrade @@ -35,7 +35,7 @@ group: root mode: 0755 when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: RBD Provisioner | Templates list set_fact: @@ -65,7 +65,7 @@ dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}" with_items: "{{ rbd_provisioner_templates }}" register: rbd_provisioner_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: RBD Provisioner | Apply manifests kube: @@ -76,4 +76,4 @@ filename: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.item.file }}" state: "latest" with_items: "{{ rbd_provisioner_manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml index 77f3df4e0211430036ab4394acdff6fdd255cb09..2e8b2f89f4d1dd755a75913af3087956c0a702ea 100644 --- a/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml +++ b/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/tasks/main.yml @@ -20,7 +20,7 @@ - { name: alb-ingress-deploy, file: alb-ingress-deploy.yml, type: deploy } register: alb_ingress_manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: ALB Ingress Controller | Apply manifests kube: @@ -32,4 +32,4 @@ state: "latest" with_items: "{{ alb_ingress_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/ingress_controller/ambassador/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/ambassador/tasks/main.yml index 91524dea208207776369732cc0093ead1e4e9774..e4cbc8bcc415a3a7845caf7ecde7fd71e571f546 100644 --- a/roles/kubernetes-apps/ingress_controller/ambassador/tasks/main.yml +++ b/roles/kubernetes-apps/ingress_controller/ambassador/tasks/main.yml @@ -8,7 +8,7 @@ group: root mode: 0755 when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Ambassador | Templates list set_fact: @@ -29,7 +29,7 @@ loop: "{{ ingress_ambassador_templates }}" register: ingress_ambassador_manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Ambassador | Apply manifests kube: @@ -41,7 +41,7 @@ state: "latest" loop: "{{ ingress_ambassador_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] # load the AmbassadorInstallation _after_ the CustomResourceDefinition has been loaded @@ -57,7 +57,7 @@ loop: "{{ ingress_ambassador_cr_templates }}" register: ingress_ambassador_cr_manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Ambassador | Apply AmbassadorInstallation kube: @@ -69,4 +69,4 @@ state: "latest" loop: "{{ ingress_ambassador_cr_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml index c8fdce8f16eff240cd67c862db5b587e932e76fe..42112b0d5ea1574d88f15357c51dbd516cd8c7c5 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml @@ -5,7 +5,7 @@ path: "{{ kube_config_dir }}/addons/cert_manager" state: absent when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - upgrade @@ -14,7 +14,7 @@ {{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }} ignore_errors: yes when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - upgrade @@ -26,7 +26,7 @@ group: root mode: 0755 when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Cert Manager | Templates list set_fact: @@ -54,7 +54,7 @@ with_items: "{{ cert_manager_templates }}" register: cert_manager_manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Cert Manager | Apply manifests kube: @@ -65,12 +65,12 @@ state: "latest" with_items: "{{ cert_manager_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Cert Manager | Wait for Webhook pods become ready command: "{{ bin_dir }}/kubectl wait po --namespace={{ cert_manager_namespace }} --selector app=webhook --for=condition=Ready --timeout=600s" register: cert_manager_webhook_pods_ready - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Cert Manager | Create ClusterIssuer manifest template: @@ -78,7 +78,7 @@ dest: "{{ kube_config_dir }}/addons/cert_manager/clusterissuer-cert-manager.yml" register: cert_manager_clusterissuer_manifest when: - - inventory_hostname == groups['kube-master'][0] and cert_manager_webhook_pods_ready is succeeded + - inventory_hostname == groups['kube_control_plane'][0] and cert_manager_webhook_pods_ready is succeeded - name: Cert Manager | Apply ClusterIssuer manifest kube: @@ -86,4 +86,4 @@ kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/addons/cert_manager/clusterissuer-cert-manager.yml" state: "latest" - when: inventory_hostname == groups['kube-master'][0] and cert_manager_clusterissuer_manifest is succeeded + when: inventory_hostname == groups['kube_control_plane'][0] and cert_manager_clusterissuer_manifest is succeeded diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml index b8c575817e96957ec336d4ac310f4f08e82190e8..05d35b3ac179b18b1775a9f5ab021c7b9d6b551d 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml @@ -8,7 +8,7 @@ group: root mode: 0755 when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: NGINX Ingress Controller | Templates list set_fact: @@ -38,7 +38,7 @@ with_items: "{{ ingress_nginx_templates }}" register: ingress_nginx_manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: NGINX Ingress Controller | Apply manifests kube: @@ -50,4 +50,4 @@ state: "latest" with_items: "{{ ingress_nginx_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml index e0f8e9a12e0cbaa1676cfc4cdd47f7d1e67b86f2..a3b1f1dfe9125d9ce37f422e8b5482caff387de7 100644 --- a/roles/kubernetes-apps/meta/main.yml +++ b/roles/kubernetes-apps/meta/main.yml @@ -2,7 +2,7 @@ dependencies: - role: kubernetes-apps/ansible when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - role: kubernetes-apps/helm when: @@ -13,21 +13,21 @@ dependencies: - role: kubernetes-apps/registry when: - registry_enabled - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - registry - role: kubernetes-apps/metrics_server when: - metrics_server_enabled - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - metrics_server - role: kubernetes-apps/csi_driver/csi_crd when: - cinder_csi_enabled - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - csi-driver @@ -69,19 +69,19 @@ dependencies: - role: kubernetes-apps/persistent_volumes when: - persistent_volumes_enabled - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - persistent_volumes - role: kubernetes-apps/snapshots - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: - snapshots - csi-driver - role: kubernetes-apps/container_runtimes when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - container-runtimes @@ -94,13 +94,13 @@ dependencies: when: - cloud_provider is defined - cloud_provider == "oci" - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - oci - role: kubernetes-apps/metallb when: - metallb_enabled - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - metallb diff --git a/roles/kubernetes-apps/metallb/tasks/main.yml b/roles/kubernetes-apps/metallb/tasks/main.yml index 5d3c58d6e4b8b3ffe9c5e1720b7583b10705d205..990500c2837e5d9ed3936de0536aabc30972d2c4 100644 --- a/roles/kubernetes-apps/metallb/tasks/main.yml +++ b/roles/kubernetes-apps/metallb/tasks/main.yml @@ -22,7 +22,7 @@ register: apparmor_status when: - podsecuritypolicy_enabled - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] failed_when: false - name: Kubernetes Apps | Set apparmor_enabled @@ -30,7 +30,7 @@ apparmor_enabled: "{{ apparmor_status.rc == 0 }}" when: - podsecuritypolicy_enabled - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: "Kubernetes Apps | Lay Down MetalLB" become: true @@ -38,7 +38,7 @@ with_items: ["metallb.yml", "metallb-config.yml"] register: "rendering" when: - - "inventory_hostname == groups['kube-master'][0]" + - "inventory_hostname == groups['kube_control_plane'][0]" - name: "Kubernetes Apps | Install and configure MetalLB" kube: @@ -49,7 +49,7 @@ become: true with_items: "{{ rendering.results }}" when: - - "inventory_hostname == groups['kube-master'][0]" + - "inventory_hostname == groups['kube_control_plane'][0]" - name: Kubernetes Apps | Check existing secret of MetalLB command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist" @@ -57,18 +57,18 @@ become: true ignore_errors: yes when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Apps | Create random bytes for MetalLB command: "openssl rand -base64 32" register: metallb_rand when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - metallb_secret.rc != 0 - name: Kubernetes Apps | Install secret of MetalLB if not existing command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system create secret generic memberlist --from-literal=secretkey={{ metallb_rand.stdout }}" become: true when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - metallb_secret.rc != 0 diff --git a/roles/kubernetes-apps/metrics_server/tasks/main.yml b/roles/kubernetes-apps/metrics_server/tasks/main.yml index d7dc45443a2f3b73530db3479a8093cd89c06272..c3be4b8308d4296dc36aa32124071e79070ec32c 100644 --- a/roles/kubernetes-apps/metrics_server/tasks/main.yml +++ b/roles/kubernetes-apps/metrics_server/tasks/main.yml @@ -2,14 +2,14 @@ # If all masters have node role, there are no tainted master and toleration should not be specified. - name: Check all masters are node or not set_fact: - masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube-master']) == groups['kube-master'] }}" + masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}" - name: Metrics Server | Delete addon dir file: path: "{{ kube_config_dir }}/addons/metrics_server" state: absent when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] tags: - upgrade @@ -21,7 +21,7 @@ group: root mode: 0755 when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Metrics Server | Templates list set_fact: @@ -43,7 +43,7 @@ with_items: "{{ metrics_server_templates }}" register: metrics_server_manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Metrics Server | Apply manifests kube: @@ -54,4 +54,4 @@ state: "latest" with_items: "{{ metrics_server_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml index b495106b199b807769ff66a3f77cba5c4e688d8d..db7e3f268f5ecd4d7733700f0c1b3ff421e35027 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -8,4 +8,4 @@ filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ canal_manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] and not item is skipped + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml index 1baaa1ce634ea37dc91a28a7b1193c42cdb21730..d3d6ceec5357d136474e782b5b11e5d8bd286bc5 100644 --- a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml @@ -8,7 +8,7 @@ filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ cilium_node_manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] and not item is skipped + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped - name: Cilium | Wait for pods to run command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 @@ -17,4 +17,4 @@ retries: 30 delay: 10 ignore_errors: yes - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml index 3ed49db810d6f904d42595585a4a5ec872c62806..ff56d246112927d6dad540a345a5813903041ce7 100644 --- a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml @@ -8,7 +8,7 @@ filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ flannel_node_manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] and not item is skipped + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped - name: Flannel | Wait for flannel subnet.env file presence wait_for: diff --git a/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml index 56d21717c9c65c17c97b7f0cf9f9a97d4609b9c0..9f4250183ec00c54f4115a15f35e8c895de0962c 100644 --- a/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml @@ -6,4 +6,4 @@ filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ kube_ovn_node_manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] and not item is skipped + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml index 8694e496f31e0debc0120868320b7c7cbe040bea..3e483bf7fa891d245b6dc741e11c2d90e8afc8c4 100644 --- a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml @@ -8,7 +8,7 @@ resource: "ds" namespace: "kube-system" state: "latest" - delegate_to: "{{ groups['kube-master'] | first }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" run_once: true - name: kube-router | Wait for kube-router pods to be ready @@ -18,6 +18,6 @@ retries: 30 delay: 10 ignore_errors: yes - delegate_to: "{{ groups['kube-master'] | first }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" run_once: true changed_when: false diff --git a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml index eb496502848eab2b2ac0e5c8a4551c56c1a8458d..232d3e4034af4089c3c8a6046d3b7431f1323bbc 100644 --- a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml @@ -8,4 +8,4 @@ filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ multus_manifest_1.results }} + {{ multus_manifest_2.results }}" - when: inventory_hostname == groups['kube-master'][0] and not item is skipped + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/roles/kubernetes-apps/network_plugin/ovn4nfv/tasks/main.yml b/roles/kubernetes-apps/network_plugin/ovn4nfv/tasks/main.yml index 1262ee6b956d8ebfb5eff23a199cb381a83866cc..987ff2949b9613d314d97f6a88f94fc72333ee4a 100644 --- a/roles/kubernetes-apps/network_plugin/ovn4nfv/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/ovn4nfv/tasks/main.yml @@ -6,4 +6,4 @@ filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ ovn4nfv_node_manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] and not item is skipped + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml index daeea97b07d76ae076c48a7aa2da4983a2af26c3..bc0f932d8d66cc7f084b74af215a2117243b8a08 100644 --- a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml @@ -8,7 +8,7 @@ resource: "ds" namespace: "kube-system" state: "latest" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Weave | Wait for Weave to become available uri: @@ -18,4 +18,4 @@ retries: 180 delay: 5 until: "weave_status.status == 200 and 'Status: ready' in weave_status.content" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml index 006c35d1e9da35132ef596de13fd2d0e03b615a6..7588c1f7279d634d65f51fd1589b908ea3e7da8a 100644 --- a/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml +++ b/roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/tasks/main.yml @@ -5,7 +5,7 @@ dest: "{{ kube_config_dir }}/aws-ebs-csi-storage-class.yml" register: manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Persistent Volumes | Add AWS EBS CSI Storage Class kube: @@ -15,5 +15,5 @@ filename: "{{ kube_config_dir }}/aws-ebs-csi-storage-class.yml" state: "latest" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - manifests.changed diff --git a/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml index 04cca761851b96a79d54b6d842b3fd006b1bf8e1..04ac99ef835806f59c69110e0beab1eb2895a4eb 100644 --- a/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml +++ b/roles/kubernetes-apps/persistent_volumes/azuredisk-csi/tasks/main.yml @@ -5,7 +5,7 @@ dest: "{{ kube_config_dir }}/azure-csi-storage-class.yml" register: manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Persistent Volumes | Add Azure CSI Storage Class kube: @@ -15,5 +15,5 @@ filename: "{{ kube_config_dir }}/azure-csi-storage-class.yml" state: "latest" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - manifests.changed diff --git a/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml index f94f8ca3eb3e46363b4268f5fd6129e24914fd4f..c8ca8bc15bfbee8a4adb0b2f6dbf962b92c3fa2d 100644 --- a/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml +++ b/roles/kubernetes-apps/persistent_volumes/cinder-csi/tasks/main.yml @@ -5,7 +5,7 @@ dest: "{{ kube_config_dir }}/cinder-csi-storage-class.yml" register: manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Persistent Volumes | Add Cinder CSI Storage Class kube: @@ -15,5 +15,5 @@ filename: "{{ kube_config_dir }}/cinder-csi-storage-class.yml" state: "latest" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - manifests.changed diff --git a/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml index f1935e76b22bd6197ca9497c56b379917c7c0b09..d85e68fb45a70956db5db91a9260bd068eb88da4 100644 --- a/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml +++ b/roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/tasks/main.yml @@ -5,7 +5,7 @@ dest: "{{ kube_config_dir }}/gcp-pd-csi-storage-class.yml" register: manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Persistent Volumes | Add GCP PD CSI Storage Class kube: @@ -15,5 +15,5 @@ filename: "{{ kube_config_dir }}/gcp-pd-csi-storage-class.yml" state: "latest" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - manifests.changed diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml index 629c6add7700bed80c42b7c2488d26bea61114fd..cc42224e127daa45e08225a9e5ec71e6f19b6e84 100644 --- a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml +++ b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml @@ -5,7 +5,7 @@ dest: "{{ kube_config_dir }}/openstack-storage-class.yml" register: manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class kube: @@ -15,5 +15,5 @@ filename: "{{ kube_config_dir }}/openstack-storage-class.yml" state: "latest" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - manifests.changed diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index bbd39d63f59621d420dbea15999f3c0a2adf33ab..10f13893d9d04c34c8346441c0326be1b5110144 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -19,7 +19,7 @@ - {name: calico-kube-controllers, file: calico-kube-crb.yml, type: clusterrolebinding} register: calico_kube_manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - rbac_enabled or item.type not in rbac_resources - name: Start of Calico kube controllers @@ -33,7 +33,7 @@ with_items: - "{{ calico_kube_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped loop_control: label: "{{ item.item.file }}" diff --git a/roles/kubernetes-apps/registry/tasks/main.yml b/roles/kubernetes-apps/registry/tasks/main.yml index aa367649806e5925955aac1aa2516dc2fe8306f2..6b8b5e7bcbd3c006b278169561f8aa280a3cb5f0 100644 --- a/roles/kubernetes-apps/registry/tasks/main.yml +++ b/roles/kubernetes-apps/registry/tasks/main.yml @@ -38,7 +38,7 @@ dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}" with_items: "{{ registry_templates }}" register: registry_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Registry | Apply manifests kube: @@ -49,7 +49,7 @@ filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}" state: "latest" with_items: "{{ registry_manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Registry | Create PVC manifests template: @@ -61,7 +61,7 @@ when: - registry_storage_class != none and registry_storage_class - registry_disk_size != none and registry_disk_size - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Registry | Apply PVC manifests kube: @@ -75,4 +75,4 @@ when: - registry_storage_class != none and registry_storage_class - registry_disk_size != none and registry_disk_size - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml b/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml index 32940af085a7ca8676d7f7477b4eef2cbd90ceb8..b979501cd2abf004bfa97a7959069aa16fc1cf24 100644 --- a/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml +++ b/roles/kubernetes-apps/snapshots/cinder-csi/tasks/main.yml @@ -5,7 +5,7 @@ dest: "{{ kube_config_dir }}/cinder-csi-snapshot-class.yml" register: manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kubernetes Snapshots | Add Cinder CSI Snapshot Class kube: @@ -13,5 +13,5 @@ filename: "{{ kube_config_dir }}/cinder-csi-snapshot-class.yml" state: "latest" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - manifests.changed diff --git a/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml b/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml index feeee4a412252b9b1b3be03b4aad4450e793a784..58f9c2ca26a15d7afb53e35e186c56b743bc0c88 100644 --- a/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml +++ b/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml @@ -7,7 +7,7 @@ - {name: rbac-snapshot-controller, file: rbac-snapshot-controller.yml} - {name: snapshot-controller, file: snapshot-controller.yml} register: snapshot_controller_manifests - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] tags: snapshot-controller - name: Snapshot Controller | Apply Manifests @@ -18,7 +18,7 @@ with_items: - "{{ snapshot_controller_manifests.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped loop_control: label: "{{ item.item.file }}" diff --git a/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml b/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml index 234fa9bffee047950bdef43cd4461394580a6463..b88f57c3cd1a0db6500e6113edfca4dd383da4f6 100644 --- a/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml +++ b/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml @@ -28,7 +28,7 @@ kube_encrypt_token: "{{ kube_encrypt_token_extracted }}" delegate_to: "{{ item }}" delegate_facts: true - with_inventory_hostnames: kube-master + with_inventory_hostnames: kube_control_plane when: kube_encrypt_token_extracted is defined - name: Write secrets for encrypting secret data at rest diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml index 6f961f2bcbe24f99ae77bafe4cfb7299b3d61475..1af7f0c6e7261fb551c8f49495810db8ef904842 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml @@ -18,15 +18,15 @@ --upload-certs register: kubeadm_upload_cert when: - - inventory_hostname == groups['kube-master']|first + - inventory_hostname == groups['kube_control_plane']|first - name: Parse certificate key if not set set_fact: - kubeadm_certificate_key: "{{ hostvars[groups['kube-master'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}" + kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}" run_once: yes when: - - hostvars[groups['kube-master'][0]]['kubeadm_upload_cert'] is defined - - hostvars[groups['kube-master'][0]]['kubeadm_upload_cert'] is not skipped + - hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is defined + - hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is not skipped - name: Create kubeadm ControlPlane config template: @@ -35,7 +35,7 @@ mode: 0640 backup: yes when: - - inventory_hostname != groups['kube-master']|first + - inventory_hostname != groups['kube_control_plane']|first - not kubeadm_already_run.stat.exists - name: Wait for k8s apiserver @@ -64,5 +64,5 @@ throttle: 1 until: kubeadm_join_control_plane is succeeded when: - - inventory_hostname != groups['kube-master']|first + - inventory_hostname != groups['kube_control_plane']|first - kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml index 5a51a24be83737942c35ebbcb190a41019c1954d..ba214dcc3c2685efb726b675aa28ccc4097cde97 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -25,7 +25,7 @@ - name: kubeadm | aggregate all SANs set_fact: - apiserver_sans: "{{ (sans_base + groups['kube-master'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn) | unique }}" + apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn) | unique }}" vars: sans_base: - "kubernetes" @@ -38,12 +38,12 @@ sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}" sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}" sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}" - sans_access_ip: "{{ groups['kube-master'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}" - sans_ip: "{{ groups['kube-master'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}" - sans_address: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}" + sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}" + sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}" + sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}" sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}" - sans_hostname: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}" - sans_fqdn: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}" + sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}" + sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}" tags: facts - name: Create audit-policy directory @@ -86,7 +86,7 @@ register: apiserver_sans_check changed_when: "'does match certificate' not in apiserver_sans_check.stdout" when: - - inventory_hostname == groups['kube-master']|first + - inventory_hostname == groups['kube_control_plane']|first - kubeadm_already_run.stat.exists - name: kubeadm | regenerate apiserver cert 1/2 @@ -97,7 +97,7 @@ - apiserver.crt - apiserver.key when: - - inventory_hostname == groups['kube-master']|first + - inventory_hostname == groups['kube_control_plane']|first - kubeadm_already_run.stat.exists - apiserver_sans_check.changed @@ -107,7 +107,7 @@ init phase certs apiserver --config={{ kube_config_dir }}/kubeadm-config.yaml when: - - inventory_hostname == groups['kube-master']|first + - inventory_hostname == groups['kube_control_plane']|first - kubeadm_already_run.stat.exists - apiserver_sans_check.changed @@ -123,7 +123,7 @@ # Retry is because upload config sometimes fails retries: 3 until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - when: inventory_hostname == groups['kube-master']|first and not kubeadm_already_run.stat.exists + when: inventory_hostname == groups['kube_control_plane']|first and not kubeadm_already_run.stat.exists failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr environment: PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" @@ -132,7 +132,7 @@ - name: set kubeadm certificate key set_fact: kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}" - with_items: "{{ hostvars[groups['kube-master'][0]]['kubeadm_init'].stdout_lines | default([]) }}" + with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}" when: - kubeadm_certificate_key is not defined - (item | trim) is match('.*--certificate-key.*') @@ -143,7 +143,7 @@ {{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create {{ kubeadm_token }} changed_when: false when: - - inventory_hostname == groups['kube-master']|first + - inventory_hostname == groups['kube_control_plane']|first - kubeadm_token is defined - kubeadm_refresh_token tags: @@ -156,7 +156,7 @@ retries: 5 delay: 5 until: temp_token is succeeded - delegate_to: "{{ groups['kube-master'] | first }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" when: kubeadm_token is not defined tags: - kubeadm_token @@ -180,7 +180,7 @@ # FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file. - name: kubeadm | Remove taint for master with node role command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} {{ item }}" - delegate_to: "{{ groups['kube-master'] | first }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" with_items: - "node-role.kubernetes.io/master:NoSchedule-" - "node-role.kubernetes.io/control-plane:NoSchedule-" diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml index 39fb4f3f98e8739e104a27a688d8dc750489b89d..0570ee9d0930b9084cbd468832e5d10e1b796ec1 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml @@ -3,7 +3,7 @@ uri: url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz" validate_certs: false - when: inventory_hostname in groups['kube-master'] + when: inventory_hostname in groups['kube_control_plane'] register: _result retries: 60 delay: 5 @@ -23,7 +23,7 @@ # Retry is because upload config sometimes fails retries: 3 until: kubeadm_upgrade.rc == 0 - when: inventory_hostname == groups['kube-master']|first + when: inventory_hostname == groups['kube_control_plane']|first failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr environment: PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" @@ -40,7 +40,7 @@ --etcd-upgrade={{ etcd_kubeadm_enabled | bool | lower }} --force register: kubeadm_upgrade - when: inventory_hostname != groups['kube-master']|first + when: inventory_hostname != groups['kube_control_plane']|first failed_when: - kubeadm_upgrade.rc != 0 - '"field is immutable" not in kubeadm_upgrade.stderr' diff --git a/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2 b/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2 index 3c5e0c18f3d091ad63fbfa4bcbaf9467035a16bf..825d983c63d4716ceec451f1878566a292a4ce46 100644 --- a/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2 +++ b/roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2 @@ -3,7 +3,7 @@ Description=Timer to renew K8S control plane certificates [Timer] # First Monday of each month -OnCalendar=Mon *-*-1..7 03:{{ groups['kube-master'].index(inventory_hostname) }}0:00 +OnCalendar=Mon *-*-1..7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00 [Install] WantedBy=multi-user.target diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 index 50025330a66cf1651b2196c8084fb3b17d51e0dc..c0c6e54397b599a75d2b57b0c54cb80c6fa48d19 100644 --- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 @@ -16,7 +16,7 @@ nodeRegistration: {% if kube_override_hostname|default('') %} name: {{ kube_override_hostname }} {% endif %} -{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %} +{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube-node'] %} taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml index b5c0f255253d0def1afac1edf7752998745d0a92..787613e60659600ebc6d3f2ab83b20b875a8e40e 100644 --- a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml +++ b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml @@ -1,7 +1,7 @@ --- - name: Parse certificate key if not set set_fact: - kubeadm_certificate_key: "{{ hostvars[groups['kube-master'][0]]['kubeadm_certificate_key'] }}" + kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_certificate_key'] }}" when: kubeadm_certificate_key is undefined - name: Pull control plane certs down diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index 148226e6df5cd7d3241c40cf33f44fa558a0d632..5cb654320a28e2d029583bc177831d0ba9f47335 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -25,7 +25,7 @@ get_checksum: no get_mime: no register: kubeadm_ca_stat - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" run_once: true - name: Calculate kubeadm CA cert hash @@ -36,14 +36,14 @@ when: - kubeadm_ca_stat.stat is defined - kubeadm_ca_stat.stat.exists - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" run_once: true changed_when: false - name: Create kubeadm token for joining nodes with 24h expiration (default) command: "{{ bin_dir }}/kubeadm token create" register: temp_token - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" when: kubeadm_token is not defined changed_when: false @@ -118,7 +118,7 @@ args: executable: /bin/bash run_once: true - delegate_to: "{{ groups['kube-master']|first }}" + delegate_to: "{{ groups['kube_control_plane']|first }}" delegate_facts: false when: - kubeadm_config_api_fqdn is not defined @@ -138,7 +138,7 @@ - name: Restart all kube-proxy pods to ensure that they load the new configmap command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" run_once: true - delegate_to: "{{ groups['kube-master']|first }}" + delegate_to: "{{ groups['kube_control_plane']|first }}" delegate_facts: false when: - kubeadm_config_api_fqdn is not defined @@ -151,6 +151,6 @@ include_tasks: kubeadm_etcd_node.yml when: - etcd_kubeadm_enabled - - inventory_hostname not in groups['kube-master'] + - inventory_hostname not in groups['kube_control_plane'] - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool - kube_network_plugin != "calico" or calico_datastore == "etcd" diff --git a/roles/kubernetes/node-label/tasks/main.yml b/roles/kubernetes/node-label/tasks/main.yml index 9522d29b54b6dbcede473a1332ee120b137e6e04..d01fda83588c4d8c1238d644c5cb7b70947bd46d 100644 --- a/roles/kubernetes/node-label/tasks/main.yml +++ b/roles/kubernetes/node-label/tasks/main.yml @@ -9,7 +9,7 @@ until: result.status == 200 retries: 10 delay: 6 - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Set role node label to empty list set_fact: @@ -42,6 +42,6 @@ command: >- {{ bin_dir }}/kubectl label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true loop: "{{ role_node_labels + inventory_node_labels }}" - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" changed_when: false ... diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml index f7deae705648971e97e10947459a354e45af1cd6..c24a1fedc0d0450109ef66b5879534cc23cef023 100644 --- a/roles/kubernetes/node/tasks/install.yml +++ b/roles/kubernetes/node/tasks/install.yml @@ -8,7 +8,7 @@ tags: - kubeadm when: - - not inventory_hostname in groups['kube-master'] + - not inventory_hostname in groups['kube_control_plane'] - name: install | Copy kubelet binary from download dir copy: diff --git a/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 b/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 index ef3269fc85c3da71a37b0c841e7c80704ca0f8b2..1d5d7d945f004408ee36e72620760158122b37e4 100644 --- a/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 +++ b/roles/kubernetes/node/templates/loadbalancer/haproxy.cfg.j2 @@ -38,6 +38,6 @@ backend kube_api_backend default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100 option httpchk GET /healthz http-check expect status 200 - {% for host in groups['kube-master'] -%} + {% for host in groups['kube_control_plane'] -%} server {{ host }} {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }} check check-ssl verify none {% endfor -%} diff --git a/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 b/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 index 6361a6f391bdc99d4463c62a314b3318955267b1..38e34aa40cbc6326bc20c69b2873e882b75e4273 100644 --- a/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 +++ b/roles/kubernetes/node/templates/loadbalancer/nginx.conf.j2 @@ -13,7 +13,7 @@ events { stream { upstream kube_apiserver { least_conn; - {% for host in groups['kube-master'] -%} + {% for host in groups['kube_control_plane'] -%} server {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }}; {% endfor -%} } diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml index ec78c50b6d0c074f69f0de6bb710a9f707a8045a..6325ac336c855f7d9d565726db57c296aa1ea984 100644 --- a/roles/kubernetes/preinstall/handlers/main.yml +++ b/roles/kubernetes/preinstall/handlers/main.yml @@ -55,7 +55,7 @@ get_checksum: no get_mime: no register: kube_apiserver_set - when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' + when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' # FIXME(mattymo): Also restart for kubeadm mode - name: Preinstall | kube-controller configured @@ -65,13 +65,13 @@ get_checksum: no get_mime: no register: kube_controller_set - when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' + when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' - name: Preinstall | restart kube-controller-manager docker shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" when: - container_manager == "docker" - - inventory_hostname in groups['kube-master'] + - inventory_hostname in groups['kube_control_plane'] - dns_mode != 'none' - resolvconf_mode == 'host_resolvconf' - kube_controller_set.stat.exists @@ -80,7 +80,7 @@ shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" when: - container_manager in ['crio', 'containerd'] - - inventory_hostname in groups['kube-master'] + - inventory_hostname in groups['kube_control_plane'] - dns_mode != 'none' - resolvconf_mode == 'host_resolvconf' - kube_controller_set.stat.exists @@ -89,7 +89,7 @@ shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" when: - container_manager == "docker" - - inventory_hostname in groups['kube-master'] + - inventory_hostname in groups['kube_control_plane'] - dns_mode != 'none' - resolvconf_mode == 'host_resolvconf' @@ -97,7 +97,7 @@ shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" when: - container_manager in ['crio', 'containerd'] - - inventory_hostname in groups['kube-master'] + - inventory_hostname in groups['kube_control_plane'] - dns_mode != 'none' - resolvconf_mode == 'host_resolvconf' diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml index fe18b23fe331253afc315ea0bf8cdd44a8eed67d..c2bc22555ef7e61210f1a0e7328292af84b345b5 100644 --- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml @@ -1,9 +1,9 @@ --- -- name: Stop if either kube-master or kube-node group is empty +- name: Stop if either kube_control_plane or kube-node group is empty assert: that: "groups.get('{{ item }}')" with_items: - - kube-master + - kube_control_plane - kube-node run_once: true when: not ignore_assert_errors @@ -79,7 +79,7 @@ that: ansible_memtotal_mb >= minimal_master_memory_mb when: - not ignore_assert_errors - - inventory_hostname in groups['kube-master'] + - inventory_hostname in groups['kube_control_plane'] - name: Stop if memory is too small for nodes assert: @@ -136,7 +136,7 @@ assert: that: rbac_enabled and kube_api_anonymous_auth when: - - kube_apiserver_insecure_port == 0 and inventory_hostname in groups['kube-master'] + - kube_apiserver_insecure_port == 0 and inventory_hostname in groups['kube_control_plane'] - not ignore_assert_errors - name: Stop if kernel version is too low @@ -193,7 +193,7 @@ - kube_network_plugin == 'calico' - 'calico_version_on_server.stdout is defined' - calico_version_on_server.stdout - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] run_once: yes - name: "Check that cluster_id is set if calico_rr enabled" @@ -204,7 +204,7 @@ when: - kube_network_plugin == 'calico' - peer_with_calico_rr - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] run_once: yes - name: "Check that calico_rr nodes are in k8s-cluster group" diff --git a/roles/kubernetes/tokens/tasks/check-tokens.yml b/roles/kubernetes/tokens/tasks/check-tokens.yml index c8fe3812fec6a92dc500ffc94a2be6d4af45e795..ae75f0d04a512ce8d0351aae7509c5e0a226bc09 100644 --- a/roles/kubernetes/tokens/tasks/check-tokens.yml +++ b/roles/kubernetes/tokens/tasks/check-tokens.yml @@ -5,7 +5,7 @@ get_attributes: no get_checksum: yes get_mime: no - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" register: known_tokens_master run_once: true @@ -32,7 +32,7 @@ set_fact: sync_tokens: >- {%- set tokens = {'sync': False} -%} - {%- for server in groups['kube-master'] | intersect(ansible_play_batch) + {%- for server in groups['kube_control_plane'] | intersect(ansible_play_batch) if (not hostvars[server].known_tokens.stat.exists) or (hostvars[server].known_tokens.stat.checksum|default('') != known_tokens_master.stat.checksum|default('')) -%} {%- set _ = tokens.update({'sync': True}) -%} diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml index 2b94ce4f3493f0ea9bbcc903e526bd7683323f57..40d4910d28a9ab30b6a250859eed9a1a13b16d64 100644 --- a/roles/kubernetes/tokens/tasks/gen_tokens.yml +++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml @@ -5,7 +5,7 @@ dest: "{{ kube_script_dir }}/kube-gen-token.sh" mode: 0700 run_once: yes - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" when: gen_tokens|default(false) - name: Gen_tokens | generate tokens for master components @@ -14,11 +14,11 @@ TOKEN_DIR: "{{ kube_token_dir }}" with_nested: - [ "system:kubectl" ] - - "{{ groups['kube-master'] }}" + - "{{ groups['kube_control_plane'] }}" register: gentoken_master changed_when: "'Added' in gentoken_master.stdout" run_once: yes - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" when: gen_tokens|default(false) - name: Gen_tokens | generate tokens for node components @@ -31,14 +31,14 @@ register: gentoken_node changed_when: "'Added' in gentoken_node.stdout" run_once: yes - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" when: gen_tokens|default(false) - name: Gen_tokens | Get list of tokens from first master command: "find {{ kube_token_dir }} -maxdepth 1 -type f" register: tokens_list check_mode: no - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" run_once: true when: sync_tokens|default(false) @@ -49,7 +49,7 @@ executable: /bin/bash register: tokens_data check_mode: no - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" run_once: true when: sync_tokens|default(false) @@ -58,7 +58,7 @@ args: executable: /bin/bash when: - - inventory_hostname in groups['kube-master'] + - inventory_hostname in groups['kube_control_plane'] - sync_tokens|default(false) - - inventory_hostname != groups['kube-master'][0] + - inventory_hostname != groups['kube_control_plane'][0] - tokens_data.stdout diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 3b66cab846ee60cf2e35e1dc67f827f0bdf7f553..782e15d400609987c1348515fde0a0ae49a1da73 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -447,11 +447,11 @@ ssl_ca_dirs: |- ] # Vars for pointing to kubernetes api endpoints -is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}" -kube_apiserver_count: "{{ groups['kube-master'] | length }}" +is_kube_master: "{{ inventory_hostname in groups['kube_control_plane'] }}" +kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}" kube_apiserver_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}" kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}" -first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(fallback_ips[groups['kube-master'][0]])) }}" +first_kube_master: "{{ hostvars[groups['kube_control_plane'][0]]['access_ip'] | default(hostvars[groups['kube_control_plane'][0]]['ip'] | default(fallback_ips[groups['kube_control_plane'][0]])) }}" loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}" loadbalancer_apiserver_type: "nginx" # applied if only external loadbalancer_apiserver is defined, otherwise ignored @@ -483,7 +483,7 @@ kube_apiserver_client_key: "{{ kube_cert_dir }}/ca.key" etcd_events_cluster_enabled: false # etcd group can be empty when kubeadm manages etcd -etcd_hosts: "{{ groups['etcd'] | default(groups['kube-master']) }}" +etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}" # Vars for pointing to etcd endpoints is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}" diff --git a/roles/kubespray-defaults/tasks/no_proxy.yml b/roles/kubespray-defaults/tasks/no_proxy.yml index 9544185377b1e002c512c72d2487a27d73ec0eb2..984bb50a282d46eb30b9da397b989cb65ca6c7c1 100644 --- a/roles/kubespray-defaults/tasks/no_proxy.yml +++ b/roles/kubespray-defaults/tasks/no_proxy.yml @@ -7,7 +7,7 @@ {{ loadbalancer_apiserver.address | default('') }}, {%- endif -%} {%- if no_proxy_exclude_workers | default(false) -%} - {% set cluster_or_master = 'kube-master' %} + {% set cluster_or_master = 'kube_control_plane' %} {%- else -%} {% set cluster_or_master = 'k8s-cluster' %} {%- endif -%} diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml index 78e4cb881806111bd3c8a0eff34500736051370a..a0a656707bfaf84b6a00742bb50d6e32307f2048 100644 --- a/roles/network_plugin/calico/tasks/check.yml +++ b/roles/network_plugin/calico/tasks/check.yml @@ -43,7 +43,7 @@ changed_when: False register: calico run_once: True - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" - name: "Set calico_pool_conf" set_fact: diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml index c4831cbbc8215d7fc8b37c43e101b59ffbd7f474..d214b29b5ec456a68f38ba34e1d83826a1e21022 100644 --- a/roles/network_plugin/calico/tasks/install.yml +++ b/roles/network_plugin/calico/tasks/install.yml @@ -39,7 +39,7 @@ include_tasks: typha_certs.yml when: - typha_secure - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Calico | Install calicoctl wrapper script template: @@ -74,14 +74,14 @@ delay: "{{ retry_stagger | random + 3 }}" changed_when: false when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined assert: that: "[calico_pool_cidr] | ipaddr(kube_pods_subnet) | length == 1" msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - 'calico_conf.stdout == "0"' - calico_pool_cidr is defined @@ -97,7 +97,7 @@ delay: "{{ retry_stagger | random + 3 }}" changed_when: false when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - enable_dual_stack_networks - name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined @@ -105,7 +105,7 @@ that: "[calico_pool_cidr_ipv6] | ipaddr(kube_pods_subnet_ipv6) | length == 1" msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0" - calico_pool_cidr_ipv6 is defined - enable_dual_stack_networks @@ -134,9 +134,9 @@ filename: "{{ kube_config_dir }}/kdd-crds.yml" state: "latest" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] when: - - inventory_hostname in groups['kube-master'] + - inventory_hostname in groups['kube_control_plane'] - calico_datastore == "kdd" - name: Calico | Configure calico network pool @@ -157,7 +157,7 @@ "vxlanMode": "{{ calico_vxlan_mode }}", "natOutgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }} }} when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - 'calico_conf.stdout == "0"' - name: Calico | Configure calico ipv6 network pool (version >= v3.3.0) @@ -176,7 +176,7 @@ "vxlanMode": "{{ calico_vxlan_mode_ipv6 }}", "natOutgoing": {{ nat_outgoing_ipv6|default(false) and not peer_with_router_ipv6|default(false) }} }} when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0" - calico_version is version("v3.3.0", ">=") - enable_dual_stack_networks | bool @@ -214,7 +214,7 @@ "serviceExternalIPs": {{ _service_external_ips|default([]) }} }} changed_when: false when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Calico | Configure peering with router(s) at global scope command: @@ -238,7 +238,7 @@ with_items: - "{{ peers|selectattr('scope','defined')|selectattr('scope','equalto', 'global')|list|default([]) }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - peer_with_router|default(false) - name: Calico | Configure peering with route reflectors at global scope @@ -264,7 +264,7 @@ with_items: - "{{ groups['calico-rr'] | default([]) }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - peer_with_calico_rr|default(false) - name: Calico | Configure route reflectors to peer with each other @@ -290,7 +290,7 @@ with_items: - "{{ groups['calico-rr'] | default([]) }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - peer_with_calico_rr|default(false) - name: Calico | Create calico manifests @@ -305,7 +305,7 @@ - {name: calico, file: calico-crb.yml, type: clusterrolebinding} register: calico_node_manifests when: - - inventory_hostname in groups['kube-master'] + - inventory_hostname in groups['kube_control_plane'] - rbac_enabled or item.type not in rbac_resources - name: Calico | Create calico manifests for typha @@ -316,7 +316,7 @@ - {name: calico, file: calico-typha.yml, type: typha} register: calico_node_typha_manifest when: - - inventory_hostname in groups['kube-master'] + - inventory_hostname in groups['kube_control_plane'] - typha_enabled and calico_datastore == "kdd" - name: Start Calico resources @@ -331,7 +331,7 @@ - "{{ calico_node_manifests.results }}" - "{{ calico_node_typha_manifest.results }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped loop_control: label: "{{ item.item.file }}" @@ -340,7 +340,7 @@ wait_for: path: /etc/cni/net.d/calico-kubeconfig when: - - inventory_hostname not in groups['kube-master'] + - inventory_hostname not in groups['kube_control_plane'] - calico_datastore == "kdd" - name: Calico | Configure node asNumber for per node peering diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml index 517218a88a60e8c86f50bbfcad35b92caac61681..e3ca15065d52311492554dd9daf405e80584b735 100644 --- a/roles/network_plugin/calico/tasks/pre.yml +++ b/roles/network_plugin/calico/tasks/pre.yml @@ -22,6 +22,6 @@ args: executable: /bin/bash register: calico_kubelet_name - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" when: - "cloud_provider is defined" diff --git a/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 b/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 index e6e4ec6e873f1e9dbcd44ba965606896e4b6bc0c..a6c080cf4856cffbec5c855ae505cb33ccfc81d3 100644 --- a/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 +++ b/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash DATASTORE_TYPE=kubernetes \ -{% if inventory_hostname in groups['kube-master'] %} +{% if inventory_hostname in groups['kube_control_plane'] %} KUBECONFIG=/etc/kubernetes/admin.conf \ {% else %} KUBECONFIG=/etc/cni/net.d/calico-kubeconfig \ diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml index 982182446da103937f4964858f5986a4ffb75c4c..320c20ad3a4a28d7a510a596f4dc6987eb84332d 100644 --- a/roles/network_plugin/canal/tasks/main.yml +++ b/roles/network_plugin/canal/tasks/main.yml @@ -59,7 +59,7 @@ - {name: canal-flannel, file: canal-crb-flannel.yml, type: clusterrolebinding} register: canal_manifests when: - - inventory_hostname in groups['kube-master'] + - inventory_hostname in groups['kube_control_plane'] - name: Canal | Install calicoctl wrapper script template: diff --git a/roles/network_plugin/cilium/tasks/install.yml b/roles/network_plugin/cilium/tasks/install.yml index 7a8750d5dac8da61fee318c7a1d176fe12385b01..1470d2d975ec8daf14fad5eddc23245cb08e9b02 100644 --- a/roles/network_plugin/cilium/tasks/install.yml +++ b/roles/network_plugin/cilium/tasks/install.yml @@ -39,7 +39,7 @@ - {name: cilium, file: cilium-sa.yml, type: sa} register: cilium_node_manifests when: - - inventory_hostname in groups['kube-master'] + - inventory_hostname in groups['kube_control_plane'] - name: Cilium | Enable portmap addon template: diff --git a/roles/network_plugin/cilium/tasks/main.yml b/roles/network_plugin/cilium/tasks/main.yml index 515536094faa85525b775de40f8e6680ea06904f..c3bee558e138eac42be6a2eb3438f2888baa8066 100644 --- a/roles/network_plugin/cilium/tasks/main.yml +++ b/roles/network_plugin/cilium/tasks/main.yml @@ -1,4 +1,4 @@ --- - import_tasks: check.yml -- include_tasks: install.yml \ No newline at end of file +- include_tasks: install.yml diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml index 115743ace085fd422dad39c6a25d9d2995c0f79c..8db000c30336472438518f3c6b068364bc71cb6c 100644 --- a/roles/network_plugin/flannel/tasks/main.yml +++ b/roles/network_plugin/flannel/tasks/main.yml @@ -8,4 +8,4 @@ - {name: kube-flannel, file: cni-flannel.yml, type: ds} register: flannel_node_manifests when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/network_plugin/kube-ovn/tasks/main.yml b/roles/network_plugin/kube-ovn/tasks/main.yml index c416f120aad01f5a07158333649d789624bacfcd..2efafa4cd6341dffcfd0185667040464c3d88aaf 100644 --- a/roles/network_plugin/kube-ovn/tasks/main.yml +++ b/roles/network_plugin/kube-ovn/tasks/main.yml @@ -1,9 +1,9 @@ --- - name: Kube-OVN | Label ovn-db node command: >- - {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master + {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: Kube-OVN | Create Kube-OVN manifests template: diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml index 6b8719e8aad7ff3b37349a1bffcf110cf029d2f7..6be517bc45082c26325b3891d2b6a14042ee959a 100644 --- a/roles/network_plugin/kube-router/tasks/annotate.yml +++ b/roles/network_plugin/kube-router/tasks/annotate.yml @@ -1,21 +1,21 @@ --- -- name: kube-router | Add annotations on kube-master +- name: kube-router | Add annotations on kube_control_plane command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_master }}" - delegate_to: "{{ groups['kube-master'][0] }}" - when: kube_router_annotations_master is defined and inventory_hostname in groups['kube-master'] + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane'] - name: kube-router | Add annotations on kube-node command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_node }}" - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" when: kube_router_annotations_node is defined and inventory_hostname in groups['kube-node'] - name: kube-router | Add common annotations on all servers command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_all }}" - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s-cluster'] diff --git a/roles/network_plugin/kube-router/tasks/main.yml b/roles/network_plugin/kube-router/tasks/main.yml index 48d8abe325adab3a213b6113fa82d70a171274d4..f107eed64c44086901385f74e65b04b089ae2fa5 100644 --- a/roles/network_plugin/kube-router/tasks/main.yml +++ b/roles/network_plugin/kube-router/tasks/main.yml @@ -55,5 +55,5 @@ template: src: kube-router.yml.j2 dest: "{{ kube_config_dir }}/kube-router.yml" - delegate_to: "{{ groups['kube-master'] | first }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" run_once: true diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml index 45f877f6fb0a66fc4ef86e12653e0be713d9fb7f..191df8ceff0e7e3068a57631ccc5fa0599ce343b 100644 --- a/roles/network_plugin/macvlan/tasks/main.yml +++ b/roles/network_plugin/macvlan/tasks/main.yml @@ -3,7 +3,7 @@ command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'" changed_when: false register: node_pod_cidr_cmd - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" - name: Macvlan | set node_pod_cidr set_fact: diff --git a/roles/network_plugin/ovn4nfv/tasks/main.yml b/roles/network_plugin/ovn4nfv/tasks/main.yml index 32a4c2dc5eace1aceeac22053d04b6dc8a08c335..26dbd32bdb576daa1638223600cc328a007b71a3 100644 --- a/roles/network_plugin/ovn4nfv/tasks/main.yml +++ b/roles/network_plugin/ovn4nfv/tasks/main.yml @@ -1,9 +1,9 @@ --- - name: ovn4nfv | Label control-plane node command: >- - {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane + {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - name: ovn4nfv | Create ovn4nfv-k8s manifests template: diff --git a/roles/recover_control_plane/control-plane/tasks/main.yml b/roles/recover_control_plane/control-plane/tasks/main.yml index 5f4b6a922a1a18ff5d0feac5d0c6da0ca7d632dd..450e6f36d946888b81c462eeedeb7043ae26cba7 100644 --- a/roles/recover_control_plane/control-plane/tasks/main.yml +++ b/roles/recover_control_plane/control-plane/tasks/main.yml @@ -8,22 +8,22 @@ retries: 6 delay: 10 changed_when: false - when: groups['broken_kube-master'] + when: groups['broken_kube_control_plane'] -- name: Delete broken kube-master nodes from cluster +- name: Delete broken kube_control_plane nodes from cluster command: "{{ bin_dir }}/kubectl delete node {{ item }}" environment: - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" - with_items: "{{ groups['broken_kube-master'] }}" + with_items: "{{ groups['broken_kube_control_plane'] }}" register: delete_broken_kube_masters failed_when: false - when: groups['broken_kube-master'] + when: groups['broken_kube_control_plane'] -- name: Fail if unable to delete broken kube-master nodes from cluster +- name: Fail if unable to delete broken kube_control_plane nodes from cluster fail: - msg: "Unable to delete broken kube-master node: {{ item.item }}" + msg: "Unable to delete broken kube_control_plane node: {{ item.item }}" loop: "{{ delete_broken_kube_masters.results }}" changed_when: false when: - - groups['broken_kube-master'] + - groups['broken_kube_control_plane'] - "item.rc != 0 and not 'NotFound' in item.stderr" diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml index c4660ef87c703a540217dbaa62afba59abceafbb..fd4c6fc582bab127e4a8ace199d873900dbac5d5 100644 --- a/roles/remove-node/post-remove/tasks/main.yml +++ b/roles/remove-node/post-remove/tasks/main.yml @@ -1,5 +1,5 @@ --- - name: Delete node # noqa 301 command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}" - delegate_to: "{{ groups['kube-master']|first }}" + delegate_to: "{{ groups['kube_control_plane']|first }}" ignore_errors: yes \ No newline at end of file diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml index 42316e209d4fd1d7323a9713b88a7035fadfc323..ba9c945313d18a3ee85d17dc1c5d9fc01d7b03e9 100644 --- a/roles/remove-node/pre-remove/tasks/main.yml +++ b/roles/remove-node/pre-remove/tasks/main.yml @@ -11,7 +11,7 @@ | jq "select(. | test(\"^{{ hostvars[item]['kube_override_hostname']|default(item) }}$\"))" loop: "{{ node.split(',') | default(groups['kube-node']) }}" register: nodes - delegate_to: "{{ groups['kube-master']|first }}" + delegate_to: "{{ groups['kube_control_plane']|first }}" changed_when: false run_once: true @@ -33,7 +33,7 @@ loop: "{{ nodes_to_drain }}" register: result failed_when: result.rc != 0 and not allow_ungraceful_removal - delegate_to: "{{ groups['kube-master']|first }}" + delegate_to: "{{ groups['kube_control_plane']|first }}" run_once: true until: result.rc == 0 or allow_ungraceful_removal retries: "{{ drain_retries }}" diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml index 5e6309e174a9790721aacc8d380f1b2b64e2c480..805677f8684fa893004933e34f67b09d4cfa22c5 100644 --- a/roles/upgrade/post-upgrade/tasks/main.yml +++ b/roles/upgrade/post-upgrade/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Uncordon node command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf uncordon {{ kube_override_hostname|default(inventory_hostname) }}" - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" when: - needs_cordoning|default(false) diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml index bf436d360cc812ae7526c2c2777eccc3b6f19826..d969175e3d60c5c4eb80253bb8d45c67cfafdeb7 100644 --- a/roles/upgrade/pre-upgrade/tasks/main.yml +++ b/roles/upgrade/pre-upgrade/tasks/main.yml @@ -21,7 +21,7 @@ {{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }} -o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }' register: kubectl_node_ready - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" failed_when: false changed_when: false @@ -32,7 +32,7 @@ {{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }} -o jsonpath='{ .spec.unschedulable }' register: kubectl_node_schedulable - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" failed_when: false changed_when: false @@ -49,12 +49,12 @@ block: - name: Cordon node command: "{{ bin_dir }}/kubectl cordon {{ kube_override_hostname|default(inventory_hostname) }}" - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" - name: Check kubectl version command: "{{ bin_dir }}/kubectl version --client --short" register: kubectl_version - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" run_once: yes changed_when: false when: @@ -90,6 +90,6 @@ fail: msg: "Failed to drain node {{ inventory_hostname }}" when: upgrade_node_fail_if_drain_fails - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" when: - needs_cordoning diff --git a/scale.yml b/scale.yml index 3e727483770bed831d16358d31f46433c9c0e430..f6a8578e77bb68c23f5320b7be284a8944a99d6b 100644 --- a/scale.yml +++ b/scale.yml @@ -2,6 +2,15 @@ - name: Check ansible version import_playbook: ansible_version.yml +- name: Add kube-master nodes to kube_control_plane + # This is for old inventory which contains kube-master instead of kube_control_plane + hosts: kube-master + gather_facts: false + tasks: + - name: add nodes to kube_control_plane group + group_by: + key: 'kube_control_plane' + - hosts: bastion[0] gather_facts: False environment: "{{ proxy_disable_env }}" @@ -32,8 +41,8 @@ - { role: kubespray-defaults } - { role: etcd, tags: etcd, etcd_cluster_setup: false } -- name: Download images to ansible host cache via first kube-master node - hosts: kube-master[0] +- name: Download images to ansible host cache via first kube_control_plane node + hosts: kube_control_plane[0] gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -64,7 +73,7 @@ - { role: kubernetes/node, tags: node } - name: Upload control plane certs and retrieve encryption key - hosts: kube-master | first + hosts: kube_control_plane | first environment: "{{ proxy_disable_env }}" gather_facts: False tags: kubeadm diff --git a/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 b/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 index b842c97a7b83e4f61d5f7565e97cdf0327154e12..8e59e2f3cdc2205dcb7e5ca819ea4edaad5fc9b0 100644 --- a/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 +++ b/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 @@ -4,6 +4,10 @@ instance-{{ loop.index }} ansible_ssh_host={{instance.stdout}} {% endfor %} {% if mode is defined and mode in ["separate", "separate-scale"] %} +[kube_control_plane] +instance-1 + +# TODO(oomichi): Remove all kube-master groups from this file after releasing v2.16. [kube-master] instance-1 @@ -13,6 +17,10 @@ instance-2 [etcd] instance-3 {% elif mode is defined and mode in ["ha", "ha-scale"] %} +[kube_control_plane] +instance-1 +instance-2 + [kube-master] instance-1 instance-2 @@ -25,6 +33,9 @@ instance-1 instance-2 instance-3 {% elif mode == "default" %} +[kube_control_plane] +instance-1 + [kube-master] instance-1 @@ -34,6 +45,9 @@ instance-2 [etcd] instance-1 {% elif mode == "aio" %} +[kube_control_plane] +instance-1 + [kube-master] instance-1 @@ -46,6 +60,10 @@ instance-1 [vault] instance-1 {% elif mode == "ha-recover" %} +[kube_control_plane] +instance-1 +instance-2 + [kube-master] instance-1 instance-2 @@ -64,6 +82,11 @@ instance-2 [broken_etcd] instance-2 etcd_member_name=etcd3 {% elif mode == "ha-recover-noquorum" %} +[kube_control_plane] +instance-3 +instance-1 +instance-2 + [kube-master] instance-3 instance-1 diff --git a/tests/scripts/testcases_run.sh b/tests/scripts/testcases_run.sh index 9f9870b57674f125157970d6d5dd198b6895015f..5c27747cef5e22b60942f455f323d43880c4f51f 100755 --- a/tests/scripts/testcases_run.sh +++ b/tests/scripts/testcases_run.sh @@ -65,7 +65,7 @@ fi # Test control plane recovery if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}:!fake_hosts" -e reset_confirmation=yes reset.yml - ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube-master:!fake_hosts recover-control-plane.yml + ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube_control_plane:!fake_hosts recover-control-plane.yml fi # Tests Cases diff --git a/tests/templates/inventory-aws.j2 b/tests/templates/inventory-aws.j2 index 3ed86eb963ef25aa0e24df35ade63db2c8a155b6..f5bba6fd79de490ece7fc8484dfc7de606948feb 100644 --- a/tests/templates/inventory-aws.j2 +++ b/tests/templates/inventory-aws.j2 @@ -2,7 +2,7 @@ node1 ansible_ssh_host={{ec2.instances[0].public_ip}} ansible_ssh_user={{ssh_use node2 ansible_ssh_host={{ec2.instances[1].public_ip}} ansible_ssh_user={{ssh_user}} node3 ansible_ssh_host={{ec2.instances[2].public_ip}} ansible_ssh_user={{ssh_user}} -[kube-master] +[kube_control_plane] node1 node2 @@ -21,12 +21,12 @@ node2 [k8s-cluster:children] kube-node -kube-master +kube_control_plane calico-rr [calico-rr] -[broken_kube-master] +[broken_kube_control_plane] node2 [broken_etcd] diff --git a/tests/templates/inventory-do.j2 b/tests/templates/inventory-do.j2 index ab7d95220b9ff80216bcdf223e6e2ed6712152f3..f11306ce356489196ffee9013e02302b21963717 100644 --- a/tests/templates/inventory-do.j2 +++ b/tests/templates/inventory-do.j2 @@ -3,7 +3,7 @@ {% endfor %} {% if mode is defined and mode == "separate" %} -[kube-master] +[kube_control_plane] {{droplets.results[0].droplet.name}} [kube-node] @@ -15,7 +15,7 @@ [vault] {{droplets.results[2].droplet.name}} {% elif mode is defined and mode == "ha" %} -[kube-master] +[kube_control_plane] {{droplets.results[0].droplet.name}} {{droplets.results[1].droplet.name}} @@ -30,13 +30,13 @@ {{droplets.results[1].droplet.name}} {{droplets.results[2].droplet.name}} -[broken_kube-master] +[broken_kube_control_plane] {{droplets.results[1].droplet.name}} [broken_etcd] {{droplets.results[2].droplet.name}} {% else %} -[kube-master] +[kube_control_plane] {{droplets.results[0].droplet.name}} [kube-node] @@ -53,5 +53,5 @@ [k8s-cluster:children] kube-node -kube-master +kube_control_plane calico-rr diff --git a/tests/templates/inventory-gce.j2 b/tests/templates/inventory-gce.j2 index 55f67deecccd581fe77a3b0a7a3e18fed8b486e4..f78f5a96f9d75ac0acf95de8090bac1cfb4eac89 100644 --- a/tests/templates/inventory-gce.j2 +++ b/tests/templates/inventory-gce.j2 @@ -9,7 +9,7 @@ {{node3}} ansible_ssh_host={{gce.instance_data[2].public_ip}} {% endif %} {% if mode is defined and mode in ["separate", "separate-scale"] %} -[kube-master] +[kube_control_plane] {{node1}} [kube-node] @@ -21,7 +21,7 @@ [vault] {{node3}} {% elif mode is defined and mode in ["ha", "ha-scale"] %} -[kube-master] +[kube_control_plane] {{node1}} {{node2}} @@ -38,14 +38,14 @@ {{node2}} {{node3}} -[broken_kube-master] +[broken_kube_control_plane] {{node2}} [etcd] {{node2}} {{node3}} {% elif mode == "default" %} -[kube-master] +[kube_control_plane] {{node1}} [kube-node] @@ -57,7 +57,7 @@ [vault] {{node1}} {% elif mode == "aio" %} -[kube-master] +[kube_control_plane] {{node1}} [kube-node] @@ -72,7 +72,7 @@ [k8s-cluster:children] kube-node -kube-master +kube_control_plane calico-rr [calico-rr] diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml index 330e5e6bf24e1316c73f8dec4d0db4ab9bba1790..adf0a35c92d5db2e19775fda6d9fcdaed251b237 100644 --- a/tests/testcases/010_check-apiserver.yml +++ b/tests/testcases/010_check-apiserver.yml @@ -1,5 +1,5 @@ --- -- hosts: kube-master +- hosts: kube_control_plane tasks: - name: Check the API servers are responding diff --git a/tests/testcases/015_check-nodes-ready.yml b/tests/testcases/015_check-nodes-ready.yml index b5bf60938aa06ab37453a637333a3050a2a7acd7..0faa1d46b99bfed4cffd3afdfa51d63489a2f901 100644 --- a/tests/testcases/015_check-nodes-ready.yml +++ b/tests/testcases/015_check-nodes-ready.yml @@ -1,5 +1,5 @@ --- -- hosts: kube-master[0] +- hosts: kube_control_plane[0] tasks: - name: Force binaries directory for Flatcar Container Linux by Kinvolk diff --git a/tests/testcases/020_check-pods-running.yml b/tests/testcases/020_check-pods-running.yml index 6af07b13784c2adeecc2343e8d152b46794d5f12..edea22a5c024effacf6a8e15faa9522c1bc25ef8 100644 --- a/tests/testcases/020_check-pods-running.yml +++ b/tests/testcases/020_check-pods-running.yml @@ -1,5 +1,5 @@ --- -- hosts: kube-master[0] +- hosts: kube_control_plane[0] tasks: - name: Force binaries directory for Flatcar Container Linux by Kinvolk diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index d2ab583db435e4d4419b0ed35db9da1d7c58f317..5b18d6a8b0d3ce445e9a71ed9558150475e167e8 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -1,5 +1,5 @@ --- -- hosts: kube-master[0] +- hosts: kube_control_plane[0] vars: test_image_repo: busybox test_image_tag: latest diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml index a2a53b76ac94dbd3afdcee4ba0e4f64431add65a..174c9750c797ba8d17e983d993a0bad7c870792b 100644 --- a/tests/testcases/040_check-network-adv.yml +++ b/tests/testcases/040_check-network-adv.yml @@ -37,7 +37,7 @@ until: ncs_pod.stdout.find('Running') != -1 retries: 3 delay: 10 - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Wait for netchecker agents shell: "set -o pipefail && {{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'" @@ -48,12 +48,12 @@ retries: 3 delay: 10 failed_when: false - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Get netchecker pods command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}" run_once: true - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" no_log: false with_items: - netchecker-agent @@ -63,14 +63,14 @@ - debug: var: nca_pod.stdout_lines failed_when: not nca_pod is success - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube_control_plane'][0] - name: Get netchecker agents uri: url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/" return_content: yes run_once: true - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" register: agents retries: 18 delay: "{{ agent_report_interval }}" @@ -94,7 +94,7 @@ url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check" status_code: 200 return_content: yes - delegate_to: "{{ groups['kube-master'][0] }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" run_once: true register: result retries: 3 @@ -115,13 +115,13 @@ command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy" no_log: false when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not result is success - name: Get logs from other apps command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - not result is success no_log: false with_items: @@ -184,7 +184,7 @@ }' EOF when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - kube_network_plugin_multus|default(false)|bool - name: Annotate pod with macvlan network @@ -208,7 +208,7 @@ image: dougbtv/centos-network EOF when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - kube_network_plugin_multus|default(false)|bool - name: Check secondary macvlan interface @@ -218,5 +218,5 @@ retries: 90 changed_when: false when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube_control_plane'][0] - kube_network_plugin_multus|default(false)|bool diff --git a/tests/testcases/100_check-k8s-conformance.yml b/tests/testcases/100_check-k8s-conformance.yml index 9716b3dac32e47716351bdbb1819a79b15cab550..3830f2ca28ea350a1e60fd568000d0e75e988c2d 100644 --- a/tests/testcases/100_check-k8s-conformance.yml +++ b/tests/testcases/100_check-k8s-conformance.yml @@ -1,5 +1,5 @@ --- -- hosts: kube-master[0] +- hosts: kube_control_plane[0] vars: sonobuoy_version: 0.20.0 sonobuoy_arch: amd64 diff --git a/tests/testcases/roles/cluster-dump/tasks/main.yml b/tests/testcases/roles/cluster-dump/tasks/main.yml index 589a712e0ec0db15861c69d10a91de34cdfccf5f..966a13c3ddc7586e9bb4e5b3c08b261f2090f8c3 100644 --- a/tests/testcases/roles/cluster-dump/tasks/main.yml +++ b/tests/testcases/roles/cluster-dump/tasks/main.yml @@ -2,17 +2,17 @@ - name: Generate dump folder command: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump" no_log: true - when: inventory_hostname in groups['kube-master'] + when: inventory_hostname in groups['kube_control_plane'] - name: Compress directory cluster-dump archive: path: /tmp/cluster-dump dest: /tmp/cluster-dump.tgz - when: inventory_hostname in groups['kube-master'] + when: inventory_hostname in groups['kube_control_plane'] - name: Fetch dump file fetch: src: /tmp/cluster-dump.tgz dest: "{{ lookup('env', 'CI_PROJECT_DIR') }}/cluster-dump/{{ inventory_hostname }}.tgz" flat: true - when: inventory_hostname in groups['kube-master'] + when: inventory_hostname in groups['kube_control_plane'] diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml index b53668408e567f9992c4b11609ccfbc15cfc0d74..6fd30537b260377d1edcf832d38bee7b31a2f6d8 100644 --- a/upgrade-cluster.yml +++ b/upgrade-cluster.yml @@ -2,6 +2,15 @@ - name: Check ansible version import_playbook: ansible_version.yml +- name: Add kube-master nodes to kube_control_plane + # This is for old inventory which contains kube-master instead of kube_control_plane + hosts: kube-master + gather_facts: false + tasks: + - name: add nodes to kube_control_plane group + group_by: + key: 'kube_control_plane' + - hosts: bastion[0] gather_facts: False environment: "{{ proxy_disable_env }}" @@ -26,8 +35,8 @@ tags: always import_playbook: facts.yml -- name: Download images to ansible host cache via first kube-master node - hosts: kube-master[0] +- name: Download images to ansible host cache via first kube_control_plane node + hosts: kube_control_plane[0] gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -84,7 +93,7 @@ - name: Handle upgrades to master components first to maintain backwards compat. gather_facts: False - hosts: kube-master + hosts: kube_control_plane any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" serial: 1 @@ -101,7 +110,7 @@ - { role: upgrade/post-upgrade, tags: post-upgrade } - name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes - hosts: kube-master:calico-rr:kube-node + hosts: kube_control_plane:calico-rr:kube-node gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" serial: "{{ serial | default('20%') }}" @@ -114,7 +123,7 @@ - { role: kubernetes-apps/policy_controller, tags: policy-controller } - name: Finally handle worker upgrades, based on given batch size - hosts: kube-node:calico-rr:!kube-master + hosts: kube-node:calico-rr:!kube_control_plane gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -128,7 +137,7 @@ - { role: kubernetes/node-label, tags: node-label } - { role: upgrade/post-upgrade, tags: post-upgrade } -- hosts: kube-master[0] +- hosts: kube_control_plane[0] gather_facts: False any_errors_fatal: true environment: "{{ proxy_disable_env }}" @@ -144,7 +153,7 @@ - { role: kubespray-defaults } - { role: network_plugin/calico/rr, tags: network } -- hosts: kube-master +- hosts: kube_control_plane gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}"