From acbf44a1b4cd6dc1700acf3eb777b94336a9a154 Mon Sep 17 00:00:00 2001
From: Luke Simmons <luke.simmons@vgregion.se>
Date: Mon, 27 Mar 2023 11:25:55 +0200
Subject: [PATCH] Adds support for Ansible collections (#9582)

---
 .gitignore                                    |   4 +
 .gitlab-ci/lint.yml                           |  24 ++
 .gitlab-ci/vagrant.yml                        |   5 +
 README.md                                     |   4 +
 cluster.yml                                   | 132 +------
 docs/ansible_collection.md                    |  38 ++
 galaxy.yml                                    |  47 +++
 library/kube.py                               | 358 +-----------------
 .../ansible_version.yml                       |   0
 playbooks/cluster.yml                         | 131 +++++++
 facts.yml => playbooks/facts.yml              |   0
 .../legacy_groups.yml                         |   0
 playbooks/recover-control-plane.yml           |  34 ++
 playbooks/remove-node.yml                     |  50 +++
 playbooks/reset.yml                           |  39 ++
 playbooks/scale.yml                           | 124 ++++++
 playbooks/upgrade-cluster.yml                 | 170 +++++++++
 plugins/modules/kube.py                       | 357 +++++++++++++++++
 recover-control-plane.yml                     |  35 +-
 remove-node.yml                               |  51 +--
 reset.yml                                     |  40 +-
 scale.yml                                     | 125 +-----
 tests/ansible.cfg                             |   1 +
 .../vagrant_ubuntu20-flannel-collection.rb    |   9 +
 .../vagrant_ubuntu20-flannel-collection.yml   |   3 +
 tests/scripts/check_galaxy_version.sh         |  18 +
 tests/scripts/testcases_run.sh                |  33 ++
 upgrade-cluster.yml                           | 171 +--------
 28 files changed, 1104 insertions(+), 899 deletions(-)
 create mode 100644 docs/ansible_collection.md
 create mode 100644 galaxy.yml
 mode change 100644 => 120000 library/kube.py
 rename ansible_version.yml => playbooks/ansible_version.yml (100%)
 create mode 100644 playbooks/cluster.yml
 rename facts.yml => playbooks/facts.yml (100%)
 rename legacy_groups.yml => playbooks/legacy_groups.yml (100%)
 create mode 100644 playbooks/recover-control-plane.yml
 create mode 100644 playbooks/remove-node.yml
 create mode 100644 playbooks/reset.yml
 create mode 100644 playbooks/scale.yml
 create mode 100644 playbooks/upgrade-cluster.yml
 create mode 100644 plugins/modules/kube.py
 create mode 100644 tests/files/vagrant_ubuntu20-flannel-collection.rb
 create mode 100644 tests/files/vagrant_ubuntu20-flannel-collection.yml
 create mode 100755 tests/scripts/check_galaxy_version.sh

diff --git a/.gitignore b/.gitignore
index 4f576a99f..5b841a68f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -114,3 +114,7 @@ roles/**/molecule/**/__pycache__/
 # Temp location used by our scripts
 scripts/tmp/
 tmp.md
+
+# Ansible collection files
+kubernetes_sigs-kubespray*tar.gz
+ansible_collections
diff --git a/.gitlab-ci/lint.yml b/.gitlab-ci/lint.yml
index 6d935ba38..c5c9bcf6b 100644
--- a/.gitlab-ci/lint.yml
+++ b/.gitlab-ci/lint.yml
@@ -39,11 +39,28 @@ syntax-check:
     ANSIBLE_VERBOSITY: "3"
   script:
     - ansible-playbook --syntax-check cluster.yml
+    - ansible-playbook --syntax-check playbooks/cluster.yml
     - ansible-playbook --syntax-check upgrade-cluster.yml
+    - ansible-playbook --syntax-check playbooks/upgrade-cluster.yml
     - ansible-playbook --syntax-check reset.yml
+    - ansible-playbook --syntax-check playbooks/reset.yml
     - ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
   except: ['triggers', 'master']
 
+collection-build-install-sanity-check:
+  extends: .job
+  stage: unit-tests
+  tags: [light]
+  variables:
+    ANSIBLE_COLLECTIONS_PATH: "./ansible_collections"
+  script:
+    - ansible-galaxy collection build
+    - ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
+    - ansible-galaxy collection list $(egrep -i '(name:\s+|namespace:\s+)' galaxy.yml | awk '{print $2}' | tr '\n' '.' | sed 's|\.$||g') | grep "^kubernetes_sigs.kubespray"
+    - test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/cluster.yml
+    - test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/reset.yml
+  except: ['triggers', 'master']
+
 tox-inventory-builder:
   stage: unit-tests
   tags: [light]
@@ -75,6 +92,13 @@ check-readme-versions:
   script:
     - tests/scripts/check_readme_versions.sh
 
+check-galaxy-version:
+  stage: unit-tests
+  tags: [light]
+  image: python:3
+  script:
+    - tests/scripts/check_galaxy_version.sh
+
 check-typo:
   stage: unit-tests
   tags: [light]
diff --git a/.gitlab-ci/vagrant.yml b/.gitlab-ci/vagrant.yml
index d2a407499..52aec6c78 100644
--- a/.gitlab-ci/vagrant.yml
+++ b/.gitlab-ci/vagrant.yml
@@ -45,6 +45,11 @@ vagrant_ubuntu20-flannel:
   when: on_success
   allow_failure: false
 
+vagrant_ubuntu20-flannel-collection:
+  stage: deploy-part2
+  extends: .vagrant
+  when: on_success
+
 vagrant_ubuntu16-kube-router-sep:
   stage: deploy-part2
   extends: .vagrant
diff --git a/README.md b/README.md
index 2153502c1..e26c092af 100644
--- a/README.md
+++ b/README.md
@@ -77,6 +77,10 @@ docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inve
 ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
 ```
 
+#### Collection
+
+See [here](docs/ansible_collection.md) if you wish to use this repository as an Ansible collection
+
 ### Vagrant
 
 For Vagrant we need to install Python dependencies for provisioning tasks.
diff --git a/cluster.yml b/cluster.yml
index 5f163de6a..7190af45f 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -1,131 +1,3 @@
 ---
-- name: Check ansible version
-  import_playbook: ansible_version.yml
-
-- name: Ensure compatibility with old groups
-  import_playbook: legacy_groups.yml
-
-- hosts: bastion[0]
-  gather_facts: False
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
-
-- hosts: k8s_cluster:etcd
-  strategy: linear
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  gather_facts: false
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: bootstrap-os, tags: bootstrap-os}
-
-- name: Gather facts
-  tags: always
-  import_playbook: facts.yml
-
-- hosts: k8s_cluster:etcd
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes/preinstall, tags: preinstall }
-    - { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
-    - { role: download, tags: download, when: "not skip_downloads" }
-
-- hosts: etcd:kube_control_plane
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - role: etcd
-      tags: etcd
-      vars:
-        etcd_cluster_setup: true
-        etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
-      when: etcd_deployment_type != "kubeadm"
-
-- hosts: k8s_cluster
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - role: etcd
-      tags: etcd
-      vars:
-        etcd_cluster_setup: false
-        etcd_events_cluster_setup: false
-      when:
-        - etcd_deployment_type != "kubeadm"
-        - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
-        - kube_network_plugin != "calico" or calico_datastore == "etcd"
-
-- hosts: k8s_cluster
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes/node, tags: node }
-
-- hosts: kube_control_plane
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes/control-plane, tags: master }
-    - { role: kubernetes/client, tags: client }
-    - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
-
-- hosts: k8s_cluster
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes/kubeadm, tags: kubeadm}
-    - { role: kubernetes/node-label, tags: node-label }
-    - { role: network_plugin, tags: network }
-
-- hosts: calico_rr
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
-
-- hosts: kube_control_plane[0]
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
-
-- hosts: kube_control_plane
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
-    - { role: kubernetes-apps/network_plugin, tags: network }
-    - { role: kubernetes-apps/policy_controller, tags: policy-controller }
-    - { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
-    - { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
-    - { role: kubernetes-apps, tags: apps }
-
-- name: Apply resolv.conf changes now that cluster DNS is up
-  hosts: k8s_cluster
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
+- name: Install Kubernetes
+  ansible.builtin.import_playbook: playbooks/cluster.yml
diff --git a/docs/ansible_collection.md b/docs/ansible_collection.md
new file mode 100644
index 000000000..449b75a98
--- /dev/null
+++ b/docs/ansible_collection.md
@@ -0,0 +1,38 @@
+# Ansible collection
+
+Kubespray can be installed as an [Ansible collection](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html).
+
+## Requirements
+
+- An inventory file with the appropriate host groups. See the [README](../README.md#usage).
+- A `group_vars` directory. These group variables **need** to match the appropriate variable names under `inventory/local/group_vars`. See the [README](../README.md#usage).
+
+## Usage
+
+1. Add Kubespray to your requirements.yml file
+
+   ```yaml
+   collections:
+   - name: https://github.com/kubernetes_sigs/kubespray
+     type: git
+     version: v2.21.0
+   ```
+
+2. Install your collection
+
+   ```ShellSession
+   ansible-galaxy install -r requirements.yml
+   ```
+
+3. Create a playbook to install your Kubernetes cluster
+
+   ```yaml
+   - name: Install Kubernetes
+     ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
+   ```
+
+4. Update INVENTORY and PLAYBOOK so that they point to your inventory file and the playbook you created above, and then install Kubespray
+
+   ```ShellSession
+   ansible-playbook -i INVENTORY --become --become-user=root PLAYBOOK
+   ```
diff --git a/galaxy.yml b/galaxy.yml
new file mode 100644
index 000000000..2d06d27db
--- /dev/null
+++ b/galaxy.yml
@@ -0,0 +1,47 @@
+---
+namespace: kubernetes_sigs
+description: Deploy a production ready Kubernetes cluster
+name: kubespray
+version: 2.21.0
+readme: README.md
+authors:
+  - luksi1
+tags:
+  - kubernetes
+  - kubespray
+repository: https://github.com/kubernetes-sigs/kubespray
+build_ignore:
+  - .github
+  - '*.tar.gz'
+  - extra_playbooks
+  - inventory
+  - scripts
+  - test-infra
+  - .ansible-lint
+  - .editorconfig
+  - .gitignore
+  - .gitlab-ci
+  - .gitlab-ci.yml
+  - .gitmodules
+  - .markdownlint.yaml
+  - .nojekyll
+  - .pre-commit-config.yaml
+  - .yamllint
+  - Dockerfile
+  - FILES.json
+  - MANIFEST.json
+  - Makefile
+  - Vagrantfile
+  - _config.yml
+  - ansible.cfg
+  - requirements*txt
+  - setup.cfg
+  - setup.py
+  - index.html
+  - reset.yml
+  - cluster.yml
+  - scale.yml
+  - recover-control-plane.yml
+  - remove-node.yml
+  - upgrade-cluster.yml
+  - library
diff --git a/library/kube.py b/library/kube.py
deleted file mode 100644
index cb9f4f0cf..000000000
--- a/library/kube.py
+++ /dev/null
@@ -1,357 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-DOCUMENTATION = """
----
-module: kube
-short_description: Manage Kubernetes Cluster
-description:
-  - Create, replace, remove, and stop resources within a Kubernetes Cluster
-version_added: "2.0"
-options:
-  name:
-    required: false
-    default: null
-    description:
-      - The name associated with resource
-  filename:
-    required: false
-    default: null
-    description:
-      - The path and filename of the resource(s) definition file(s).
-      - To operate on several files this can accept a comma separated list of files or a list of files.
-    aliases: [ 'files', 'file', 'filenames' ]
-  kubectl:
-    required: false
-    default: null
-    description:
-      - The path to the kubectl bin
-  namespace:
-    required: false
-    default: null
-    description:
-      - The namespace associated with the resource(s)
-  resource:
-    required: false
-    default: null
-    description:
-      - The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
-  label:
-    required: false
-    default: null
-    description:
-      - The labels used to filter specific resources.
-  server:
-    required: false
-    default: null
-    description:
-      - The url for the API server that commands are executed against.
-  force:
-    required: false
-    default: false
-    description:
-      - A flag to indicate to force delete, replace, or stop.
-  wait:
-    required: false
-    default: false
-    description:
-      - A flag to indicate to wait for resources to be created before continuing to the next step
-  all:
-    required: false
-    default: false
-    description:
-      - A flag to indicate delete all, stop all, or all namespaces when checking exists.
-  log_level:
-    required: false
-    default: 0
-    description:
-      - Indicates the level of verbosity of logging by kubectl.
-  state:
-    required: false
-    choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
-    default: present
-    description:
-      - present handles checking existence or creating if definition file provided,
-        absent handles deleting resource(s) based on other options,
-        latest handles creating or updating based on existence,
-        reloaded handles updating resource(s) definition using definition file,
-        stopped handles stopping resource(s) based on other options.
-  recursive:
-    required: false
-    default: false
-    description:
-      - Process the directory used in -f, --filename recursively.
-        Useful when you want to manage related manifests organized
-        within the same directory.
-requirements:
-  - kubectl
-author: "Kenny Jones (@kenjones-cisco)"
-"""
-
-EXAMPLES = """
-- name: test nginx is present
-  kube: name=nginx resource=rc state=present
-
-- name: test nginx is stopped
-  kube: name=nginx resource=rc state=stopped
-
-- name: test nginx is absent
-  kube: name=nginx resource=rc state=absent
-
-- name: test nginx is present
-  kube: filename=/tmp/nginx.yml
-
-- name: test nginx and postgresql are present
-  kube: files=/tmp/nginx.yml,/tmp/postgresql.yml
-
-- name: test nginx and postgresql are present
-  kube:
-    files:
-      - /tmp/nginx.yml
-      - /tmp/postgresql.yml
-"""
-
-
-class KubeManager(object):
-
-    def __init__(self, module):
-
-        self.module = module
-
-        self.kubectl = module.params.get('kubectl')
-        if self.kubectl is None:
-            self.kubectl =  module.get_bin_path('kubectl', True)
-        self.base_cmd = [self.kubectl]
-
-        if module.params.get('server'):
-            self.base_cmd.append('--server=' + module.params.get('server'))
-
-        if module.params.get('log_level'):
-            self.base_cmd.append('--v=' + str(module.params.get('log_level')))
-
-        if module.params.get('namespace'):
-            self.base_cmd.append('--namespace=' + module.params.get('namespace'))
-
-
-        self.all = module.params.get('all')
-        self.force = module.params.get('force')
-        self.wait = module.params.get('wait')
-        self.name = module.params.get('name')
-        self.filename = [f.strip() for f in module.params.get('filename') or []]
-        self.resource = module.params.get('resource')
-        self.label = module.params.get('label')
-        self.recursive = module.params.get('recursive')
-
-    def _execute(self, cmd):
-        args = self.base_cmd + cmd
-        try:
-            rc, out, err = self.module.run_command(args)
-            if rc != 0:
-                self.module.fail_json(
-                    msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err))
-        except Exception as exc:
-            self.module.fail_json(
-                msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
-        return out.splitlines()
-
-    def _execute_nofail(self, cmd):
-        args = self.base_cmd + cmd
-        rc, out, err = self.module.run_command(args)
-        if rc != 0:
-            return None
-        return out.splitlines()
-
-    def create(self, check=True, force=True):
-        if check and self.exists():
-            return []
-
-        cmd = ['apply']
-
-        if force:
-            cmd.append('--force')
-
-        if self.wait:
-            cmd.append('--wait')
-
-        if self.recursive:
-            cmd.append('--recursive={}'.format(self.recursive))
-
-        if not self.filename:
-            self.module.fail_json(msg='filename required to create')
-
-        cmd.append('--filename=' + ','.join(self.filename))
-
-        return self._execute(cmd)
-
-    def replace(self, force=True):
-
-        cmd = ['apply']
-
-        if force:
-            cmd.append('--force')
-
-        if self.wait:
-            cmd.append('--wait')
-
-        if self.recursive:
-            cmd.append('--recursive={}'.format(self.recursive))
-
-        if not self.filename:
-            self.module.fail_json(msg='filename required to reload')
-
-        cmd.append('--filename=' + ','.join(self.filename))
-
-        return self._execute(cmd)
-
-    def delete(self):
-
-        if not self.force and not self.exists():
-            return []
-
-        cmd = ['delete']
-
-        if self.filename:
-            cmd.append('--filename=' + ','.join(self.filename))
-            if self.recursive:
-                cmd.append('--recursive={}'.format(self.recursive))
-        else:
-            if not self.resource:
-                self.module.fail_json(msg='resource required to delete without filename')
-
-            cmd.append(self.resource)
-
-            if self.name:
-                cmd.append(self.name)
-
-            if self.label:
-                cmd.append('--selector=' + self.label)
-
-            if self.all:
-                cmd.append('--all')
-
-            if self.force:
-                cmd.append('--ignore-not-found')
-
-            if self.recursive:
-                cmd.append('--recursive={}'.format(self.recursive))
-
-        return self._execute(cmd)
-
-    def exists(self):
-        cmd = ['get']
-
-        if self.filename:
-            cmd.append('--filename=' + ','.join(self.filename))
-            if self.recursive:
-                cmd.append('--recursive={}'.format(self.recursive))
-        else:
-            if not self.resource:
-                self.module.fail_json(msg='resource required without filename')
-
-            cmd.append(self.resource)
-
-            if self.name:
-                cmd.append(self.name)
-
-            if self.label:
-                cmd.append('--selector=' + self.label)
-
-            if self.all:
-                cmd.append('--all-namespaces')
-
-        cmd.append('--no-headers')
-
-        result = self._execute_nofail(cmd)
-        if not result:
-            return False
-        return True
-
-    # TODO: This is currently unused, perhaps convert to 'scale' with a replicas param?
-    def stop(self):
-
-        if not self.force and not self.exists():
-            return []
-
-        cmd = ['stop']
-
-        if self.filename:
-            cmd.append('--filename=' + ','.join(self.filename))
-            if self.recursive:
-                cmd.append('--recursive={}'.format(self.recursive))
-        else:
-            if not self.resource:
-                self.module.fail_json(msg='resource required to stop without filename')
-
-            cmd.append(self.resource)
-
-            if self.name:
-                cmd.append(self.name)
-
-            if self.label:
-                cmd.append('--selector=' + self.label)
-
-            if self.all:
-                cmd.append('--all')
-
-            if self.force:
-                cmd.append('--ignore-not-found')
-
-        return self._execute(cmd)
-
-
-def main():
-
-    module = AnsibleModule(
-        argument_spec=dict(
-            name=dict(),
-            filename=dict(type='list', aliases=['files', 'file', 'filenames']),
-            namespace=dict(),
-            resource=dict(),
-            label=dict(),
-            server=dict(),
-            kubectl=dict(),
-            force=dict(default=False, type='bool'),
-            wait=dict(default=False, type='bool'),
-            all=dict(default=False, type='bool'),
-            log_level=dict(default=0, type='int'),
-            state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped', 'exists']),
-            recursive=dict(default=False, type='bool'),
-            ),
-            mutually_exclusive=[['filename', 'list']]
-        )
-
-    changed = False
-
-    manager = KubeManager(module)
-    state = module.params.get('state')
-    if state == 'present':
-        result = manager.create(check=False)
-
-    elif state == 'absent':
-        result = manager.delete()
-
-    elif state == 'reloaded':
-        result = manager.replace()
-
-    elif state == 'stopped':
-        result = manager.stop()
-
-    elif state == 'latest':
-        result = manager.replace()
-
-    elif state == 'exists':
-        result = manager.exists()
-        module.exit_json(changed=changed,
-                     msg='%s' % result)
-
-    else:
-        module.fail_json(msg='Unrecognized state %s.' % state)
-
-    module.exit_json(changed=changed,
-                     msg='success: %s' % (' '.join(result))
-                     )
-
-
-from ansible.module_utils.basic import *  # noqa
-if __name__ == '__main__':
-    main()
diff --git a/library/kube.py b/library/kube.py
new file mode 120000
index 000000000..e5d33a6a8
--- /dev/null
+++ b/library/kube.py
@@ -0,0 +1 @@
+../plugins/modules/kube.py
\ No newline at end of file
diff --git a/ansible_version.yml b/playbooks/ansible_version.yml
similarity index 100%
rename from ansible_version.yml
rename to playbooks/ansible_version.yml
diff --git a/playbooks/cluster.yml b/playbooks/cluster.yml
new file mode 100644
index 000000000..5f163de6a
--- /dev/null
+++ b/playbooks/cluster.yml
@@ -0,0 +1,131 @@
+---
+- name: Check ansible version
+  import_playbook: ansible_version.yml
+
+- name: Ensure compatibility with old groups
+  import_playbook: legacy_groups.yml
+
+- hosts: bastion[0]
+  gather_facts: False
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
+
+- hosts: k8s_cluster:etcd
+  strategy: linear
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  gather_facts: false
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: bootstrap-os, tags: bootstrap-os}
+
+- name: Gather facts
+  tags: always
+  import_playbook: facts.yml
+
+- hosts: k8s_cluster:etcd
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes/preinstall, tags: preinstall }
+    - { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
+    - { role: download, tags: download, when: "not skip_downloads" }
+
+- hosts: etcd:kube_control_plane
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - role: etcd
+      tags: etcd
+      vars:
+        etcd_cluster_setup: true
+        etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
+      when: etcd_deployment_type != "kubeadm"
+
+- hosts: k8s_cluster
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - role: etcd
+      tags: etcd
+      vars:
+        etcd_cluster_setup: false
+        etcd_events_cluster_setup: false
+      when:
+        - etcd_deployment_type != "kubeadm"
+        - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+        - kube_network_plugin != "calico" or calico_datastore == "etcd"
+
+- hosts: k8s_cluster
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes/node, tags: node }
+
+- hosts: kube_control_plane
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes/control-plane, tags: master }
+    - { role: kubernetes/client, tags: client }
+    - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
+
+- hosts: k8s_cluster
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes/kubeadm, tags: kubeadm}
+    - { role: kubernetes/node-label, tags: node-label }
+    - { role: network_plugin, tags: network }
+
+- hosts: calico_rr
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
+
+- hosts: kube_control_plane[0]
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
+
+- hosts: kube_control_plane
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
+    - { role: kubernetes-apps/network_plugin, tags: network }
+    - { role: kubernetes-apps/policy_controller, tags: policy-controller }
+    - { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
+    - { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
+    - { role: kubernetes-apps, tags: apps }
+
+- name: Apply resolv.conf changes now that cluster DNS is up
+  hosts: k8s_cluster
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
diff --git a/facts.yml b/playbooks/facts.yml
similarity index 100%
rename from facts.yml
rename to playbooks/facts.yml
diff --git a/legacy_groups.yml b/playbooks/legacy_groups.yml
similarity index 100%
rename from legacy_groups.yml
rename to playbooks/legacy_groups.yml
diff --git a/playbooks/recover-control-plane.yml b/playbooks/recover-control-plane.yml
new file mode 100644
index 000000000..77ec5bec4
--- /dev/null
+++ b/playbooks/recover-control-plane.yml
@@ -0,0 +1,34 @@
+---
+- name: Check ansible version
+  import_playbook: ansible_version.yml
+
+- name: Ensure compatibility with old groups
+  import_playbook: legacy_groups.yml
+
+- hosts: bastion[0]
+  gather_facts: False
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults}
+    - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
+
+- hosts: etcd[0]
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults}
+    - role: recover_control_plane/etcd
+      when: etcd_deployment_type != "kubeadm"
+
+- hosts: kube_control_plane[0]
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults}
+    - { role: recover_control_plane/control-plane }
+
+- import_playbook: cluster.yml
+
+- hosts: kube_control_plane
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults}
+    - { role: recover_control_plane/post-recover }
diff --git a/playbooks/remove-node.yml b/playbooks/remove-node.yml
new file mode 100644
index 000000000..b9fdb93d6
--- /dev/null
+++ b/playbooks/remove-node.yml
@@ -0,0 +1,50 @@
+---
+- name: Check ansible version
+  import_playbook: ansible_version.yml
+
+- name: Ensure compatibility with old groups
+  import_playbook: legacy_groups.yml
+
+- hosts: bastion[0]
+  gather_facts: False
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
+
+- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
+  gather_facts: no
+  tasks:
+    - name: Confirm Execution
+      pause:
+        prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
+      register: pause_result
+      run_once: True
+      when:
+        - not (skip_confirmation | default(false) | bool)
+
+    - name: Fail if user does not confirm deletion
+      fail:
+        msg: "Delete nodes confirmation failed"
+      when: pause_result.user_input | default('yes') != 'yes'
+
+- name: Gather facts
+  import_playbook: facts.yml
+  when: reset_nodes|default(True)|bool
+
+- hosts: "{{ node | default('kube_node') }}"
+  gather_facts: no
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
+    - { role: remove-node/pre-remove, tags: pre-remove }
+    - { role: remove-node/remove-etcd-node }
+    - { role: reset, tags: reset, when: reset_nodes|default(True)|bool }
+
+# Currently cannot remove first master or etcd
+- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
+  gather_facts: no
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
+    - { role: remove-node/post-remove, tags: post-remove }
diff --git a/playbooks/reset.yml b/playbooks/reset.yml
new file mode 100644
index 000000000..6fa9fa3ac
--- /dev/null
+++ b/playbooks/reset.yml
@@ -0,0 +1,39 @@
+---
+- name: Check ansible version
+  import_playbook: ansible_version.yml
+
+- name: Ensure compatibility with old groups
+  import_playbook: legacy_groups.yml
+
+- hosts: bastion[0]
+  gather_facts: False
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults}
+    - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
+
+- name: Gather facts
+  import_playbook: facts.yml
+
+- hosts: etcd:k8s_cluster:calico_rr
+  gather_facts: False
+  vars_prompt:
+    name: "reset_confirmation"
+    prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
+    default: "no"
+    private: no
+
+  pre_tasks:
+    - name: check confirmation
+      fail:
+        msg: "Reset confirmation failed"
+      when: reset_confirmation != "yes"
+
+    - name: Gather information about installed services
+      service_facts:
+
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults}
+    - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_early: true }
+    - { role: reset, tags: reset }
diff --git a/playbooks/scale.yml b/playbooks/scale.yml
new file mode 100644
index 000000000..8e79bfa03
--- /dev/null
+++ b/playbooks/scale.yml
@@ -0,0 +1,124 @@
+---
+- name: Check ansible version
+  import_playbook: ansible_version.yml
+
+- name: Ensure compatibility with old groups
+  import_playbook: legacy_groups.yml
+
+- hosts: bastion[0]
+  gather_facts: False
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
+
+- name: Bootstrap any new workers
+  hosts: kube_node
+  strategy: linear
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  gather_facts: false
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: bootstrap-os, tags: bootstrap-os }
+
+- name: Gather facts
+  tags: always
+  import_playbook: facts.yml
+
+- name: Generate the etcd certificates beforehand
+  hosts: etcd:kube_control_plane
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - role: etcd
+      tags: etcd
+      vars:
+        etcd_cluster_setup: false
+        etcd_events_cluster_setup: false
+      when:
+        - etcd_deployment_type != "kubeadm"
+        - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+        - kube_network_plugin != "calico" or calico_datastore == "etcd"
+
+- name: Download images to ansible host cache via first kube_control_plane node
+  hosts: kube_control_plane[0]
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost" }
+    - { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" }
+    - { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
+
+- name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
+  hosts: kube_node
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes/preinstall, tags: preinstall }
+    - { role: container-engine, tags: "container-engine", when: deploy_container_engine }
+    - { role: download, tags: download, when: "not skip_downloads" }
+    - role: etcd
+      tags: etcd
+      vars:
+        etcd_cluster_setup: false
+      when:
+        - etcd_deployment_type != "kubeadm"
+        - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+        - kube_network_plugin != "calico" or calico_datastore == "etcd"
+
+- name: Target only workers to get kubelet installed and checking in on any new nodes(node)
+  hosts: kube_node
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes/node, tags: node }
+
+- name: Upload control plane certs and retrieve encryption key
+  hosts: kube_control_plane | first
+  environment: "{{ proxy_disable_env }}"
+  gather_facts: False
+  tags: kubeadm
+  roles:
+    - { role: kubespray-defaults }
+  tasks:
+    - name: Upload control plane certificates
+      command: >-
+        {{ bin_dir }}/kubeadm init phase
+        --config {{ kube_config_dir }}/kubeadm-config.yaml
+        upload-certs
+        --upload-certs
+      environment: "{{ proxy_disable_env }}"
+      register: kubeadm_upload_cert
+      changed_when: false
+    - name: set fact 'kubeadm_certificate_key' for later use
+      set_fact:
+        kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}"
+      when: kubeadm_certificate_key is not defined
+
+- name: Target only workers to get kubelet installed and checking in on any new nodes(network)
+  hosts: kube_node
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes/kubeadm, tags: kubeadm }
+    - { role: kubernetes/node-label, tags: node-label }
+    - { role: network_plugin, tags: network }
+
+- name: Apply resolv.conf changes now that cluster DNS is up
+  hosts: k8s_cluster
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
diff --git a/playbooks/upgrade-cluster.yml b/playbooks/upgrade-cluster.yml
new file mode 100644
index 000000000..39dd95a01
--- /dev/null
+++ b/playbooks/upgrade-cluster.yml
@@ -0,0 +1,170 @@
+---
+- name: Check ansible version
+  import_playbook: ansible_version.yml
+
+- name: Ensure compatibility with old groups
+  import_playbook: legacy_groups.yml
+
+- hosts: bastion[0]
+  gather_facts: False
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
+
+- hosts: k8s_cluster:etcd:calico_rr
+  strategy: linear
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  gather_facts: false
+  environment: "{{ proxy_disable_env }}"
+  vars:
+    # Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
+    # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
+    ansible_ssh_pipelining: false
+  roles:
+    - { role: kubespray-defaults }
+    - { role: bootstrap-os, tags: bootstrap-os}
+
+- name: Gather facts
+  tags: always
+  import_playbook: facts.yml
+
+- name: Download images to ansible host cache via first kube_control_plane node
+  hosts: kube_control_plane[0]
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost"}
+    - { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" }
+    - { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
+
+- name: Prepare nodes for upgrade
+  hosts: k8s_cluster:etcd:calico_rr
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes/preinstall, tags: preinstall }
+    - { role: download, tags: download, when: "not skip_downloads" }
+
+- name: Upgrade container engine on non-cluster nodes
+  hosts: etcd:calico_rr:!k8s_cluster
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  serial: "{{ serial | default('20%') }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: container-engine, tags: "container-engine", when: deploy_container_engine }
+
+- hosts: etcd:kube_control_plane
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - role: etcd
+      tags: etcd
+      vars:
+        etcd_cluster_setup: true
+        etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
+      when: etcd_deployment_type != "kubeadm"
+
+- hosts: k8s_cluster
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - role: etcd
+      tags: etcd
+      vars:
+        etcd_cluster_setup: false
+        etcd_events_cluster_setup: false
+      when:
+        - etcd_deployment_type != "kubeadm"
+        - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
+        - kube_network_plugin != "calico" or calico_datastore == "etcd"
+
+- name: Handle upgrades to master components first to maintain backwards compat.
+  gather_facts: False
+  hosts: kube_control_plane
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  serial: 1
+  roles:
+    - { role: kubespray-defaults }
+    - { role: upgrade/pre-upgrade, tags: pre-upgrade }
+    - { role: container-engine, tags: "container-engine", when: deploy_container_engine }
+    - { role: kubernetes/node, tags: node }
+    - { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true }
+    - { role: kubernetes/client, tags: client }
+    - { role: kubernetes/node-label, tags: node-label }
+    - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
+    - { role: kubernetes-apps, tags: csi-driver }
+    - { role: upgrade/post-upgrade, tags: post-upgrade }
+
+- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
+  hosts: kube_control_plane:calico_rr:kube_node
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  serial: "{{ serial | default('20%') }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
+    - { role: network_plugin, tags: network }
+    - { role: kubernetes-apps/network_plugin, tags: network }
+    - { role: kubernetes-apps/policy_controller, tags: policy-controller }
+
+- name: Finally handle worker upgrades, based on given batch size
+  hosts: kube_node:calico_rr:!kube_control_plane
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  serial: "{{ serial | default('20%') }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: upgrade/pre-upgrade, tags: pre-upgrade }
+    - { role: container-engine, tags: "container-engine", when: deploy_container_engine }
+    - { role: kubernetes/node, tags: node }
+    - { role: kubernetes/kubeadm, tags: kubeadm }
+    - { role: kubernetes/node-label, tags: node-label }
+    - { role: upgrade/post-upgrade, tags: post-upgrade }
+
+- hosts: kube_control_plane[0]
+  gather_facts: False
+  any_errors_fatal: true
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
+
+- hosts: calico_rr
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: network_plugin/calico/rr, tags: network }
+
+- hosts: kube_control_plane
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
+    - { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
+    - { role: kubernetes-apps, tags: apps }
+
+- name: Apply resolv.conf changes now that cluster DNS is up
+  hosts: k8s_cluster
+  gather_facts: False
+  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
+  environment: "{{ proxy_disable_env }}"
+  roles:
+    - { role: kubespray-defaults }
+    - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
diff --git a/plugins/modules/kube.py b/plugins/modules/kube.py
new file mode 100644
index 000000000..cb9f4f0cf
--- /dev/null
+++ b/plugins/modules/kube.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+DOCUMENTATION = """
+---
+module: kube
+short_description: Manage Kubernetes Cluster
+description:
+  - Create, replace, remove, and stop resources within a Kubernetes Cluster
+version_added: "2.0"
+options:
+  name:
+    required: false
+    default: null
+    description:
+      - The name associated with resource
+  filename:
+    required: false
+    default: null
+    description:
+      - The path and filename of the resource(s) definition file(s).
+      - To operate on several files this can accept a comma separated list of files or a list of files.
+    aliases: [ 'files', 'file', 'filenames' ]
+  kubectl:
+    required: false
+    default: null
+    description:
+      - The path to the kubectl bin
+  namespace:
+    required: false
+    default: null
+    description:
+      - The namespace associated with the resource(s)
+  resource:
+    required: false
+    default: null
+    description:
+      - The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
+  label:
+    required: false
+    default: null
+    description:
+      - The labels used to filter specific resources.
+  server:
+    required: false
+    default: null
+    description:
+      - The url for the API server that commands are executed against.
+  force:
+    required: false
+    default: false
+    description:
+      - A flag to indicate to force delete, replace, or stop.
+  wait:
+    required: false
+    default: false
+    description:
+      - A flag to indicate to wait for resources to be created before continuing to the next step
+  all:
+    required: false
+    default: false
+    description:
+      - A flag to indicate delete all, stop all, or all namespaces when checking exists.
+  log_level:
+    required: false
+    default: 0
+    description:
+      - Indicates the level of verbosity of logging by kubectl.
+  state:
+    required: false
+    choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
+    default: present
+    description:
+      - present handles checking existence or creating if definition file provided,
+        absent handles deleting resource(s) based on other options,
+        latest handles creating or updating based on existence,
+        reloaded handles updating resource(s) definition using definition file,
+        stopped handles stopping resource(s) based on other options.
+  recursive:
+    required: false
+    default: false
+    description:
+      - Process the directory used in -f, --filename recursively.
+        Useful when you want to manage related manifests organized
+        within the same directory.
+requirements:
+  - kubectl
+author: "Kenny Jones (@kenjones-cisco)"
+"""
+
+EXAMPLES = """
+- name: test nginx is present
+  kube: name=nginx resource=rc state=present
+
+- name: test nginx is stopped
+  kube: name=nginx resource=rc state=stopped
+
+- name: test nginx is absent
+  kube: name=nginx resource=rc state=absent
+
+- name: test nginx is present
+  kube: filename=/tmp/nginx.yml
+
+- name: test nginx and postgresql are present
+  kube: files=/tmp/nginx.yml,/tmp/postgresql.yml
+
+- name: test nginx and postgresql are present
+  kube:
+    files:
+      - /tmp/nginx.yml
+      - /tmp/postgresql.yml
+"""
+
+
+class KubeManager(object):
+
+    def __init__(self, module):
+
+        self.module = module
+
+        self.kubectl = module.params.get('kubectl')
+        if self.kubectl is None:
+            self.kubectl =  module.get_bin_path('kubectl', True)
+        self.base_cmd = [self.kubectl]
+
+        if module.params.get('server'):
+            self.base_cmd.append('--server=' + module.params.get('server'))
+
+        if module.params.get('log_level'):
+            self.base_cmd.append('--v=' + str(module.params.get('log_level')))
+
+        if module.params.get('namespace'):
+            self.base_cmd.append('--namespace=' + module.params.get('namespace'))
+
+
+        self.all = module.params.get('all')
+        self.force = module.params.get('force')
+        self.wait = module.params.get('wait')
+        self.name = module.params.get('name')
+        self.filename = [f.strip() for f in module.params.get('filename') or []]
+        self.resource = module.params.get('resource')
+        self.label = module.params.get('label')
+        self.recursive = module.params.get('recursive')
+
+    def _execute(self, cmd):
+        args = self.base_cmd + cmd
+        try:
+            rc, out, err = self.module.run_command(args)
+            if rc != 0:
+                self.module.fail_json(
+                    msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err))
+        except Exception as exc:
+            self.module.fail_json(
+                msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
+        return out.splitlines()
+
+    def _execute_nofail(self, cmd):
+        args = self.base_cmd + cmd
+        rc, out, err = self.module.run_command(args)
+        if rc != 0:
+            return None
+        return out.splitlines()
+
+    def create(self, check=True, force=True):
+        if check and self.exists():
+            return []
+
+        cmd = ['apply']
+
+        if force:
+            cmd.append('--force')
+
+        if self.wait:
+            cmd.append('--wait')
+
+        if self.recursive:
+            cmd.append('--recursive={}'.format(self.recursive))
+
+        if not self.filename:
+            self.module.fail_json(msg='filename required to create')
+
+        cmd.append('--filename=' + ','.join(self.filename))
+
+        return self._execute(cmd)
+
+    def replace(self, force=True):
+
+        cmd = ['apply']
+
+        if force:
+            cmd.append('--force')
+
+        if self.wait:
+            cmd.append('--wait')
+
+        if self.recursive:
+            cmd.append('--recursive={}'.format(self.recursive))
+
+        if not self.filename:
+            self.module.fail_json(msg='filename required to reload')
+
+        cmd.append('--filename=' + ','.join(self.filename))
+
+        return self._execute(cmd)
+
+    def delete(self):
+
+        if not self.force and not self.exists():
+            return []
+
+        cmd = ['delete']
+
+        if self.filename:
+            cmd.append('--filename=' + ','.join(self.filename))
+            if self.recursive:
+                cmd.append('--recursive={}'.format(self.recursive))
+        else:
+            if not self.resource:
+                self.module.fail_json(msg='resource required to delete without filename')
+
+            cmd.append(self.resource)
+
+            if self.name:
+                cmd.append(self.name)
+
+            if self.label:
+                cmd.append('--selector=' + self.label)
+
+            if self.all:
+                cmd.append('--all')
+
+            if self.force:
+                cmd.append('--ignore-not-found')
+
+            if self.recursive:
+                cmd.append('--recursive={}'.format(self.recursive))
+
+        return self._execute(cmd)
+
+    def exists(self):
+        cmd = ['get']
+
+        if self.filename:
+            cmd.append('--filename=' + ','.join(self.filename))
+            if self.recursive:
+                cmd.append('--recursive={}'.format(self.recursive))
+        else:
+            if not self.resource:
+                self.module.fail_json(msg='resource required without filename')
+
+            cmd.append(self.resource)
+
+            if self.name:
+                cmd.append(self.name)
+
+            if self.label:
+                cmd.append('--selector=' + self.label)
+
+            if self.all:
+                cmd.append('--all-namespaces')
+
+        cmd.append('--no-headers')
+
+        result = self._execute_nofail(cmd)
+        if not result:
+            return False
+        return True
+
+    # TODO: This is currently unused, perhaps convert to 'scale' with a replicas param?
+    def stop(self):
+
+        if not self.force and not self.exists():
+            return []
+
+        cmd = ['stop']
+
+        if self.filename:
+            cmd.append('--filename=' + ','.join(self.filename))
+            if self.recursive:
+                cmd.append('--recursive={}'.format(self.recursive))
+        else:
+            if not self.resource:
+                self.module.fail_json(msg='resource required to stop without filename')
+
+            cmd.append(self.resource)
+
+            if self.name:
+                cmd.append(self.name)
+
+            if self.label:
+                cmd.append('--selector=' + self.label)
+
+            if self.all:
+                cmd.append('--all')
+
+            if self.force:
+                cmd.append('--ignore-not-found')
+
+        return self._execute(cmd)
+
+
+def main():
+
+    module = AnsibleModule(
+        argument_spec=dict(
+            name=dict(),
+            filename=dict(type='list', aliases=['files', 'file', 'filenames']),
+            namespace=dict(),
+            resource=dict(),
+            label=dict(),
+            server=dict(),
+            kubectl=dict(),
+            force=dict(default=False, type='bool'),
+            wait=dict(default=False, type='bool'),
+            all=dict(default=False, type='bool'),
+            log_level=dict(default=0, type='int'),
+            state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped', 'exists']),
+            recursive=dict(default=False, type='bool'),
+            ),
+            mutually_exclusive=[['filename', 'list']]
+        )
+
+    changed = False
+
+    manager = KubeManager(module)
+    state = module.params.get('state')
+    if state == 'present':
+        result = manager.create(check=False)
+
+    elif state == 'absent':
+        result = manager.delete()
+
+    elif state == 'reloaded':
+        result = manager.replace()
+
+    elif state == 'stopped':
+        result = manager.stop()
+
+    elif state == 'latest':
+        result = manager.replace()
+
+    elif state == 'exists':
+        result = manager.exists()
+        module.exit_json(changed=changed,
+                     msg='%s' % result)
+
+    else:
+        module.fail_json(msg='Unrecognized state %s.' % state)
+
+    module.exit_json(changed=changed,
+                     msg='success: %s' % (' '.join(result))
+                     )
+
+
+from ansible.module_utils.basic import *  # noqa
+if __name__ == '__main__':
+    main()
diff --git a/recover-control-plane.yml b/recover-control-plane.yml
index 77ec5bec4..dc284e71d 100644
--- a/recover-control-plane.yml
+++ b/recover-control-plane.yml
@@ -1,34 +1,3 @@
 ---
-- name: Check ansible version
-  import_playbook: ansible_version.yml
-
-- name: Ensure compatibility with old groups
-  import_playbook: legacy_groups.yml
-
-- hosts: bastion[0]
-  gather_facts: False
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults}
-    - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
-
-- hosts: etcd[0]
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults}
-    - role: recover_control_plane/etcd
-      when: etcd_deployment_type != "kubeadm"
-
-- hosts: kube_control_plane[0]
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults}
-    - { role: recover_control_plane/control-plane }
-
-- import_playbook: cluster.yml
-
-- hosts: kube_control_plane
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults}
-    - { role: recover_control_plane/post-recover }
+- name: Recover control panel
+  ansible.builtin.import_playbook: playbooks/recover-control-panel.yml
diff --git a/remove-node.yml b/remove-node.yml
index b9fdb93d6..bb00e07bc 100644
--- a/remove-node.yml
+++ b/remove-node.yml
@@ -1,50 +1,3 @@
 ---
-- name: Check ansible version
-  import_playbook: ansible_version.yml
-
-- name: Ensure compatibility with old groups
-  import_playbook: legacy_groups.yml
-
-- hosts: bastion[0]
-  gather_facts: False
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
-
-- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
-  gather_facts: no
-  tasks:
-    - name: Confirm Execution
-      pause:
-        prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
-      register: pause_result
-      run_once: True
-      when:
-        - not (skip_confirmation | default(false) | bool)
-
-    - name: Fail if user does not confirm deletion
-      fail:
-        msg: "Delete nodes confirmation failed"
-      when: pause_result.user_input | default('yes') != 'yes'
-
-- name: Gather facts
-  import_playbook: facts.yml
-  when: reset_nodes|default(True)|bool
-
-- hosts: "{{ node | default('kube_node') }}"
-  gather_facts: no
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
-    - { role: remove-node/pre-remove, tags: pre-remove }
-    - { role: remove-node/remove-etcd-node }
-    - { role: reset, tags: reset, when: reset_nodes|default(True)|bool }
-
-# Currently cannot remove first master or etcd
-- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
-  gather_facts: no
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
-    - { role: remove-node/post-remove, tags: post-remove }
+- name: Remove node
+  ansible.builtin.import_playbook: playbooks/remove-node.yml
\ No newline at end of file
diff --git a/reset.yml b/reset.yml
index 6fa9fa3ac..286593d71 100644
--- a/reset.yml
+++ b/reset.yml
@@ -1,39 +1,3 @@
 ---
-- name: Check ansible version
-  import_playbook: ansible_version.yml
-
-- name: Ensure compatibility with old groups
-  import_playbook: legacy_groups.yml
-
-- hosts: bastion[0]
-  gather_facts: False
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults}
-    - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
-
-- name: Gather facts
-  import_playbook: facts.yml
-
-- hosts: etcd:k8s_cluster:calico_rr
-  gather_facts: False
-  vars_prompt:
-    name: "reset_confirmation"
-    prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
-    default: "no"
-    private: no
-
-  pre_tasks:
-    - name: check confirmation
-      fail:
-        msg: "Reset confirmation failed"
-      when: reset_confirmation != "yes"
-
-    - name: Gather information about installed services
-      service_facts:
-
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults}
-    - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_early: true }
-    - { role: reset, tags: reset }
+- name: Reset the cluster
+  ansible.builtin.import_playbook: playbooks/reset.yml
diff --git a/scale.yml b/scale.yml
index 8e79bfa03..b78fc69fd 100644
--- a/scale.yml
+++ b/scale.yml
@@ -1,124 +1,3 @@
 ---
-- name: Check ansible version
-  import_playbook: ansible_version.yml
-
-- name: Ensure compatibility with old groups
-  import_playbook: legacy_groups.yml
-
-- hosts: bastion[0]
-  gather_facts: False
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
-
-- name: Bootstrap any new workers
-  hosts: kube_node
-  strategy: linear
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  gather_facts: false
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: bootstrap-os, tags: bootstrap-os }
-
-- name: Gather facts
-  tags: always
-  import_playbook: facts.yml
-
-- name: Generate the etcd certificates beforehand
-  hosts: etcd:kube_control_plane
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - role: etcd
-      tags: etcd
-      vars:
-        etcd_cluster_setup: false
-        etcd_events_cluster_setup: false
-      when:
-        - etcd_deployment_type != "kubeadm"
-        - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
-        - kube_network_plugin != "calico" or calico_datastore == "etcd"
-
-- name: Download images to ansible host cache via first kube_control_plane node
-  hosts: kube_control_plane[0]
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost" }
-    - { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" }
-    - { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
-
-- name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
-  hosts: kube_node
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes/preinstall, tags: preinstall }
-    - { role: container-engine, tags: "container-engine", when: deploy_container_engine }
-    - { role: download, tags: download, when: "not skip_downloads" }
-    - role: etcd
-      tags: etcd
-      vars:
-        etcd_cluster_setup: false
-      when:
-        - etcd_deployment_type != "kubeadm"
-        - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
-        - kube_network_plugin != "calico" or calico_datastore == "etcd"
-
-- name: Target only workers to get kubelet installed and checking in on any new nodes(node)
-  hosts: kube_node
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes/node, tags: node }
-
-- name: Upload control plane certs and retrieve encryption key
-  hosts: kube_control_plane | first
-  environment: "{{ proxy_disable_env }}"
-  gather_facts: False
-  tags: kubeadm
-  roles:
-    - { role: kubespray-defaults }
-  tasks:
-    - name: Upload control plane certificates
-      command: >-
-        {{ bin_dir }}/kubeadm init phase
-        --config {{ kube_config_dir }}/kubeadm-config.yaml
-        upload-certs
-        --upload-certs
-      environment: "{{ proxy_disable_env }}"
-      register: kubeadm_upload_cert
-      changed_when: false
-    - name: set fact 'kubeadm_certificate_key' for later use
-      set_fact:
-        kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}"
-      when: kubeadm_certificate_key is not defined
-
-- name: Target only workers to get kubelet installed and checking in on any new nodes(network)
-  hosts: kube_node
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes/kubeadm, tags: kubeadm }
-    - { role: kubernetes/node-label, tags: node-label }
-    - { role: network_plugin, tags: network }
-
-- name: Apply resolv.conf changes now that cluster DNS is up
-  hosts: k8s_cluster
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
+- name: Scale the cluster
+  ansible.builtin.import_playbook: playbooks/scale.yml
\ No newline at end of file
diff --git a/tests/ansible.cfg b/tests/ansible.cfg
index ad2827219..88531bed8 100644
--- a/tests/ansible.cfg
+++ b/tests/ansible.cfg
@@ -12,3 +12,4 @@ stdout_callback = skippy
 library = ./library:../library
 callbacks_enabled = profile_tasks
 jinja2_extensions = jinja2.ext.do
+roles_path = ../roles
diff --git a/tests/files/vagrant_ubuntu20-flannel-collection.rb b/tests/files/vagrant_ubuntu20-flannel-collection.rb
new file mode 100644
index 000000000..c739f58a2
--- /dev/null
+++ b/tests/files/vagrant_ubuntu20-flannel-collection.rb
@@ -0,0 +1,9 @@
+$os = "ubuntu2004"
+
+# For CI we are not worries about data persistence across reboot
+$libvirt_volume_cache = "unsafe"
+
+# Checking for box update can trigger API rate limiting
+# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
+$box_check_update = false
+$vm_cpus = 2
\ No newline at end of file
diff --git a/tests/files/vagrant_ubuntu20-flannel-collection.yml b/tests/files/vagrant_ubuntu20-flannel-collection.yml
new file mode 100644
index 000000000..6f8916feb
--- /dev/null
+++ b/tests/files/vagrant_ubuntu20-flannel-collection.yml
@@ -0,0 +1,3 @@
+---
+# Kubespray settings
+kube_network_plugin: flannel
diff --git a/tests/scripts/check_galaxy_version.sh b/tests/scripts/check_galaxy_version.sh
new file mode 100755
index 000000000..b6679dba4
--- /dev/null
+++ b/tests/scripts/check_galaxy_version.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+set -e
+
+version_from_galaxy=$(grep "^version:" galaxy.yml | awk '{print $2}')
+version_from_docs=$(grep -P "^\s+version:\sv\d+\.\d+\.\d+" docs/ansible_collection.md | awk '{print $2}')
+
+if [[ $KUBESPRAY_VERSION != "v${version_from_galaxy}" ]]
+then
+	echo "Please update galaxy.yml version to match the KUBESPRAY_VERSION. Be sure to remove the \"v\" to adhere"
+	echo "to semenatic versioning"
+	exit 1
+fi
+
+if [[ $KUBESPRAY_VERSION != "${version_from_docs}" ]]
+then
+	echo "Please update the documentation for Ansible collections under docs/ansible_collection.md to reflect the KUBESPRAY_VERSION"
+	exit 1
+fi
diff --git a/tests/scripts/testcases_run.sh b/tests/scripts/testcases_run.sh
index eac0afe72..65eb08567 100755
--- a/tests/scripts/testcases_run.sh
+++ b/tests/scripts/testcases_run.sh
@@ -78,6 +78,39 @@ if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
   ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube_control_plane:!fake_hosts recover-control-plane.yml
 fi
 
+# Test collection build and install by installing our collection, emptying our repository, adding
+# cluster.yml, reset.yml, and remote-node.yml files that simply point to our collection's playbooks, and then
+# running the same tests as before
+if [[ "${CI_JOB_NAME}" =~ "collection" ]]; then
+  # Build and install collection
+  ansible-galaxy collection build
+  ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
+
+  # Simply remove all of our files and directories except for our tests directory
+  # to be absolutely certain that none of our playbooks or roles
+  # are interfering with our collection
+  find -maxdepth 1 ! -name tests -exec rm -rfv {} \;
+
+  # Write cluster.yml
+cat > cluster.yml <<EOF
+- name: Install Kubernetes
+  ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
+EOF
+
+  # Write reset.yml
+cat > reset.yml <<EOF
+- name: Remove Kubernetes
+  ansible.builtin.import_playbook: kubernetes_sigs.kubespray.reset
+EOF
+
+  # Write remove-node.yml
+cat > remove-node.yml <<EOF
+- name: Remove node from Kubernetes
+  ansible.builtin.import_playbook: kubernetes_sigs.kubespray.remote-node
+EOF
+
+fi
+
 # Tests Cases
 ## Test Master API
 ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/010_check-apiserver.yml $ANSIBLE_LOG_LEVEL
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index 39dd95a01..b92b1d4ff 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -1,170 +1,3 @@
 ---
-- name: Check ansible version
-  import_playbook: ansible_version.yml
-
-- name: Ensure compatibility with old groups
-  import_playbook: legacy_groups.yml
-
-- hosts: bastion[0]
-  gather_facts: False
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
-
-- hosts: k8s_cluster:etcd:calico_rr
-  strategy: linear
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  gather_facts: false
-  environment: "{{ proxy_disable_env }}"
-  vars:
-    # Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
-    # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
-    ansible_ssh_pipelining: false
-  roles:
-    - { role: kubespray-defaults }
-    - { role: bootstrap-os, tags: bootstrap-os}
-
-- name: Gather facts
-  tags: always
-  import_playbook: facts.yml
-
-- name: Download images to ansible host cache via first kube_control_plane node
-  hosts: kube_control_plane[0]
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost"}
-    - { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" }
-    - { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
-
-- name: Prepare nodes for upgrade
-  hosts: k8s_cluster:etcd:calico_rr
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes/preinstall, tags: preinstall }
-    - { role: download, tags: download, when: "not skip_downloads" }
-
-- name: Upgrade container engine on non-cluster nodes
-  hosts: etcd:calico_rr:!k8s_cluster
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  serial: "{{ serial | default('20%') }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: container-engine, tags: "container-engine", when: deploy_container_engine }
-
-- hosts: etcd:kube_control_plane
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - role: etcd
-      tags: etcd
-      vars:
-        etcd_cluster_setup: true
-        etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
-      when: etcd_deployment_type != "kubeadm"
-
-- hosts: k8s_cluster
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - role: etcd
-      tags: etcd
-      vars:
-        etcd_cluster_setup: false
-        etcd_events_cluster_setup: false
-      when:
-        - etcd_deployment_type != "kubeadm"
-        - kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
-        - kube_network_plugin != "calico" or calico_datastore == "etcd"
-
-- name: Handle upgrades to master components first to maintain backwards compat.
-  gather_facts: False
-  hosts: kube_control_plane
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  serial: 1
-  roles:
-    - { role: kubespray-defaults }
-    - { role: upgrade/pre-upgrade, tags: pre-upgrade }
-    - { role: container-engine, tags: "container-engine", when: deploy_container_engine }
-    - { role: kubernetes/node, tags: node }
-    - { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true }
-    - { role: kubernetes/client, tags: client }
-    - { role: kubernetes/node-label, tags: node-label }
-    - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
-    - { role: kubernetes-apps, tags: csi-driver }
-    - { role: upgrade/post-upgrade, tags: post-upgrade }
-
-- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
-  hosts: kube_control_plane:calico_rr:kube_node
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  serial: "{{ serial | default('20%') }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
-    - { role: network_plugin, tags: network }
-    - { role: kubernetes-apps/network_plugin, tags: network }
-    - { role: kubernetes-apps/policy_controller, tags: policy-controller }
-
-- name: Finally handle worker upgrades, based on given batch size
-  hosts: kube_node:calico_rr:!kube_control_plane
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  serial: "{{ serial | default('20%') }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: upgrade/pre-upgrade, tags: pre-upgrade }
-    - { role: container-engine, tags: "container-engine", when: deploy_container_engine }
-    - { role: kubernetes/node, tags: node }
-    - { role: kubernetes/kubeadm, tags: kubeadm }
-    - { role: kubernetes/node-label, tags: node-label }
-    - { role: upgrade/post-upgrade, tags: post-upgrade }
-
-- hosts: kube_control_plane[0]
-  gather_facts: False
-  any_errors_fatal: true
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
-
-- hosts: calico_rr
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: network_plugin/calico/rr, tags: network }
-
-- hosts: kube_control_plane
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
-    - { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
-    - { role: kubernetes-apps, tags: apps }
-
-- name: Apply resolv.conf changes now that cluster DNS is up
-  hosts: k8s_cluster
-  gather_facts: False
-  any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
-  environment: "{{ proxy_disable_env }}"
-  roles:
-    - { role: kubespray-defaults }
-    - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
+- name: Upgrade cluster
+  ansible.builtin.import_playbook: playbooks/upgrade-cluster.yml
\ No newline at end of file
-- 
GitLab