From 9a7b021eb805cfa9177c0fbd75305ec90216363c Mon Sep 17 00:00:00 2001
From: Vlad Korolev <vlad@v-lad.org>
Date: Wed, 28 Aug 2024 01:30:56 -0400
Subject: [PATCH] =?UTF-8?q?Do=20not=20use=20=E2=80=98yes/no=E2=80=99=20for?=
 =?UTF-8?q?=20boolean=20values=20(#11472)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Consistent boolean values in ansible playbooks
---
 .yamllint                                     |  1 -
 contrib/azurerm/generate-inventory.yml        |  2 +-
 contrib/azurerm/generate-inventory_2.yml      |  2 +-
 contrib/azurerm/generate-templates.yml        |  2 +-
 contrib/dind/dind-cluster.yaml                |  2 +-
 contrib/dind/kubespray-dind.yaml              |  2 +-
 .../dind/roles/dind-cluster/tasks/main.yaml   |  4 +-
 contrib/dind/roles/dind-host/tasks/main.yaml  |  2 +-
 contrib/kvm-setup/kvm-setup.yml               |  4 +-
 .../kvm-setup/roles/kvm-setup/tasks/main.yml  |  6 +--
 .../roles/kvm-setup/tasks/sysctl.yml          |  4 +-
 .../glusterfs/roles/glusterfs/README.md       |  2 +-
 .../roles/glusterfs/client/defaults/main.yml  |  2 +-
 .../glusterfs/client/tasks/setup-Debian.yml   |  2 +-
 .../roles/glusterfs/server/defaults/main.yml  |  2 +-
 .../roles/glusterfs/server/tasks/main.yml     |  6 +--
 .../glusterfs/server/tasks/setup-Debian.yml   |  2 +-
 .../heketi/heketi-tear-down.yml               |  2 +-
 contrib/offline/generate_list.yml             |  2 +-
 .../os-services/roles/prepare/tasks/main.yml  |  4 +-
 extra_playbooks/upgrade-only-k8s.yml          |  2 +-
 playbooks/ansible_version.yml                 |  2 +-
 playbooks/boilerplate.yml                     |  2 +-
 playbooks/cluster.yml                         | 16 +++---
 playbooks/facts.yml                           |  2 +-
 playbooks/install_etcd.yml                    |  2 +-
 playbooks/remove_node.yml                     |  8 +--
 playbooks/reset.yml                           |  4 +-
 playbooks/scale.yml                           | 14 +++---
 playbooks/upgrade_cluster.yml                 | 20 ++++----
 roles/adduser/defaults/main.yml               |  8 +--
 roles/adduser/vars/coreos.yml                 |  4 +-
 roles/adduser/vars/debian.yml                 |  8 +--
 roles/adduser/vars/redhat.yml                 |  8 +--
 .../molecule/default/converge.yml             |  2 +-
 roles/bootstrap-os/tasks/amzn.yml             |  8 +--
 roles/bootstrap-os/tasks/centos.yml           |  6 +--
 roles/bootstrap-os/tasks/fedora-coreos.yml    |  2 +-
 roles/bootstrap-os/tasks/main.yml             |  2 +-
 roles/bootstrap-os/tasks/opensuse.yml         |  6 +--
 roles/bootstrap-os/tasks/redhat.yml           |  6 +--
 roles/bootstrap-os/vars/fedora-coreos.yml     |  2 +-
 .../containerd-common/tasks/main.yml          |  6 +--
 .../containerd/handlers/main.yml              |  6 +--
 .../containerd/molecule/default/prepare.yml   |  4 +-
 .../containerd/tasks/main.yml                 |  6 +--
 .../cri-dockerd/handlers/main.yml             |  4 +-
 .../container-engine/cri-o/handlers/main.yml  |  2 +-
 .../cri-o/molecule/default/prepare.yml        |  4 +-
 roles/container-engine/cri-o/tasks/main.yaml  |  6 +--
 .../cri-o/tasks/setup-amazon.yaml             |  4 +-
 .../container-engine/crictl/handlers/main.yml |  2 +-
 .../docker-storage/tasks/main.yml             |  2 +-
 .../container-engine/docker/handlers/main.yml |  2 +-
 roles/container-engine/docker/tasks/main.yml  | 10 ++--
 .../docker/tasks/set_facts_dns.yml            |  8 +--
 roles/container-engine/gvisor/tasks/main.yml  |  2 +-
 .../kata-containers/tasks/main.yml            |  2 +-
 .../nerdctl/handlers/main.yml                 |  2 +-
 roles/container-engine/runc/tasks/main.yml    |  6 +--
 roles/container-engine/skopeo/tasks/main.yml  |  6 +--
 .../validate-container-engine/tasks/main.yml  | 18 +++----
 roles/download/tasks/check_pull_required.yml  |  2 +-
 roles/download/tasks/download_container.yml   | 14 +++---
 roles/download/tasks/download_file.yml        |  6 +--
 roles/download/tasks/extract_file.yml         |  2 +-
 roles/download/tasks/prep_download.yml        |  4 +-
 roles/etcd/handlers/backup.yml                |  6 +--
 roles/etcd/handlers/main.yml                  |  4 +-
 roles/etcd/tasks/check_certs.yml              |  6 +--
 roles/etcd/tasks/configure.yml                | 28 +++++------
 roles/etcd/tasks/gen_certs_script.yml         | 14 +++---
 roles/etcd/tasks/gen_nodes_certs_script.yml   |  2 +-
 roles/etcd/tasks/install_docker.yml           |  4 +-
 roles/etcd/tasks/install_host.yml             |  2 +-
 roles/etcd/tasks/join_etcd-events_member.yml  |  4 +-
 roles/etcd/tasks/join_etcd_member.yml         |  4 +-
 roles/etcd/tasks/main.yml                     |  2 +-
 roles/etcdctl_etcdutl/tasks/main.yml          |  4 +-
 roles/kubernetes-apps/ansible/tasks/main.yml  |  2 +-
 roles/kubernetes-apps/argocd/tasks/main.yml   | 24 ++++-----
 .../cluster_roles/tasks/main.yml              |  2 +-
 .../csi_driver/vsphere/defaults/main.yml      |  2 +-
 roles/kubernetes-apps/helm/tasks/main.yml     |  6 +--
 .../helm/tasks/pyyaml-flatcar.yml             |  4 +-
 roles/kubernetes-apps/krew/tasks/krew.yml     |  8 +--
 .../network_plugin/weave/tasks/main.yml       |  2 +-
 roles/kubernetes/client/tasks/main.yml        | 26 +++++-----
 .../control-plane/handlers/main.yml           |  6 +--
 .../tasks/define-first-kube-control.yml       |  4 +-
 .../control-plane/tasks/encrypt-at-rest.yml   |  6 +--
 .../control-plane/tasks/kubeadm-backup.yml    |  4 +-
 .../tasks/kubeadm-fix-apiserver.yml           |  2 +-
 .../control-plane/tasks/kubeadm-secondary.yml |  4 +-
 .../control-plane/tasks/kubeadm-setup.yml     |  6 +--
 .../kubelet-fix-client-cert-rotation.yml      |  4 +-
 roles/kubernetes/control-plane/tasks/main.yml |  2 +-
 roles/kubernetes/kubeadm/tasks/main.yml       | 18 +++----
 roles/kubernetes/node-label/tasks/main.yml    |  2 +-
 roles/kubernetes/node/tasks/facts.yml         |  2 +-
 roles/kubernetes/node/tasks/kubelet.yml       |  6 +--
 .../node/tasks/loadbalancer/haproxy.yml       |  8 +--
 .../node/tasks/loadbalancer/kube-vip.yml      |  6 +--
 .../node/tasks/loadbalancer/nginx-proxy.yml   |  8 +--
 roles/kubernetes/node/tasks/main.yml          | 10 ++--
 roles/kubernetes/node/tasks/pre_upgrade.yml   |  2 +-
 roles/kubernetes/preinstall/handlers/main.yml | 14 +++---
 .../preinstall/tasks/0010-swapoff.yml         |  6 +--
 .../preinstall/tasks/0020-set_facts.yml       | 42 ++++++++--------
 .../preinstall/tasks/0040-verify-settings.yml | 14 +++---
 .../tasks/0050-create_directories.yml         |  6 +--
 .../preinstall/tasks/0060-resolvconf.yml      |  2 +-
 .../0062-networkmanager-unmanaged-devices.yml |  2 +-
 .../tasks/0063-networkmanager-dns.yml         |  6 +--
 .../preinstall/tasks/0070-system-packages.yml |  2 +-
 .../tasks/0080-system-configurations.yml      | 32 ++++++------
 .../preinstall/tasks/0090-etchosts.yml        | 18 +++----
 .../preinstall/tasks/0100-dhclient-hooks.yml  |  4 +-
 .../tasks/0110-dhclient-hooks-undo.yml        |  2 +-
 .../tasks/0120-growpart-azure-centos-7.yml    |  4 +-
 roles/kubernetes/preinstall/tasks/main.yml    |  6 +--
 .../kubernetes/tokens/tasks/check-tokens.yml  | 12 ++---
 roles/kubernetes/tokens/tasks/gen_tokens.yml  | 10 ++--
 .../kubespray-defaults/tasks/fallback_ips.yml | 10 ++--
 roles/kubespray-defaults/tasks/no_proxy.yml   |  6 +--
 roles/network_plugin/calico/rr/tasks/pre.yml  |  2 +-
 roles/network_plugin/calico/tasks/check.yml   | 50 +++++++++----------
 roles/network_plugin/calico/tasks/install.yml | 36 ++++++-------
 roles/network_plugin/calico/tasks/repos.yml   |  8 +--
 roles/network_plugin/calico/tasks/reset.yml   | 12 ++---
 roles/network_plugin/cilium/tasks/install.yml |  4 +-
 .../cilium/tasks/reset_iface.yml              |  6 +--
 roles/network_plugin/cni/tasks/main.yml       |  2 +-
 roles/network_plugin/flannel/tasks/reset.yml  | 12 ++---
 .../kube-router/tasks/reset.yml               | 12 ++---
 roles/network_plugin/macvlan/tasks/main.yml   |  4 +-
 .../recover_control_plane/etcd/tasks/main.yml |  4 +-
 .../remove-etcd-node/tasks/main.yml           |  2 +-
 roles/reset/tasks/main.yml                    | 30 +++++------
 roles/upgrade/post-upgrade/tasks/main.yml     |  2 +-
 roles/upgrade/pre-upgrade/tasks/main.yml      |  2 +-
 .../win_nodes/kubernetes_patch/tasks/main.yml |  2 +-
 scripts/collect-info.yaml                     |  4 +-
 tests/cloud_playbooks/cleanup-packet.yml      |  2 +-
 tests/cloud_playbooks/create-packet.yml       |  2 +-
 tests/cloud_playbooks/delete-packet.yml       |  2 +-
 .../roles/packet-ci/tasks/delete-vms.yml      |  6 +--
 tests/cloud_playbooks/wait-for-ssh.yml        |  4 +-
 .../packet_ubuntu20-all-in-one-docker.yml     |  2 +-
 ...t_ubuntu20-calico-all-in-one-hardening.yml |  2 +-
 .../packet_ubuntu20-calico-all-in-one.yml     |  2 +-
 ...buntu20-calico-etcd-kubeadm-upgrade-ha.yml |  2 +-
 .../packet_ubuntu20-calico-etcd-kubeadm.yml   |  2 +-
 .../packet_ubuntu22-all-in-one-docker.yml     |  2 +-
 .../packet_ubuntu22-calico-all-in-one.yml     |  2 +-
 .../packet_ubuntu24-all-in-one-docker.yml     |  2 +-
 .../packet_ubuntu24-calico-all-in-one.yml     |  2 +-
 .../packet_ubuntu24-calico-etcd-datastore.yml |  2 +-
 tests/testcases/010_check-apiserver.yml       |  2 +-
 tests/testcases/030_check-network.yml         |  4 +-
 tests/testcases/040_check-network-adv.yml     |  4 +-
 tests/testcases/100_check-k8s-conformance.yml |  2 +-
 162 files changed, 507 insertions(+), 508 deletions(-)

diff --git a/.yamllint b/.yamllint
index eb061917e..56786e0a9 100644
--- a/.yamllint
+++ b/.yamllint
@@ -26,4 +26,3 @@ rules:
   octal-values:
     forbid-implicit-octal: true # yamllint defaults to false
     forbid-explicit-octal: true # yamllint defaults to false
-  truthy: disable
diff --git a/contrib/azurerm/generate-inventory.yml b/contrib/azurerm/generate-inventory.yml
index 01ee38662..59e1e90b6 100644
--- a/contrib/azurerm/generate-inventory.yml
+++ b/contrib/azurerm/generate-inventory.yml
@@ -1,6 +1,6 @@
 ---
 - name: Generate Azure inventory
   hosts: localhost
-  gather_facts: False
+  gather_facts: false
   roles:
     - generate-inventory
diff --git a/contrib/azurerm/generate-inventory_2.yml b/contrib/azurerm/generate-inventory_2.yml
index 9173e1d82..8c2cbff86 100644
--- a/contrib/azurerm/generate-inventory_2.yml
+++ b/contrib/azurerm/generate-inventory_2.yml
@@ -1,6 +1,6 @@
 ---
 - name: Generate Azure inventory
   hosts: localhost
-  gather_facts: False
+  gather_facts: false
   roles:
     - generate-inventory_2
diff --git a/contrib/azurerm/generate-templates.yml b/contrib/azurerm/generate-templates.yml
index f1fcb626f..f2cf231bc 100644
--- a/contrib/azurerm/generate-templates.yml
+++ b/contrib/azurerm/generate-templates.yml
@@ -1,6 +1,6 @@
 ---
 - name: Generate Azure templates
   hosts: localhost
-  gather_facts: False
+  gather_facts: false
   roles:
     - generate-templates
diff --git a/contrib/dind/dind-cluster.yaml b/contrib/dind/dind-cluster.yaml
index 258837d08..0c61c3f2d 100644
--- a/contrib/dind/dind-cluster.yaml
+++ b/contrib/dind/dind-cluster.yaml
@@ -1,7 +1,7 @@
 ---
 - name: Create nodes as docker containers
   hosts: localhost
-  gather_facts: False
+  gather_facts: false
   roles:
     - { role: dind-host }
 
diff --git a/contrib/dind/kubespray-dind.yaml b/contrib/dind/kubespray-dind.yaml
index ecfb5573a..6d57cf00d 100644
--- a/contrib/dind/kubespray-dind.yaml
+++ b/contrib/dind/kubespray-dind.yaml
@@ -15,7 +15,7 @@ docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check
 
 dns_mode: coredns
 
-deploy_netchecker: True
+deploy_netchecker: true
 netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
 netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
 netcheck_agent_image_tag: v1.0
diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml
index dcb086c64..1a3630f9f 100644
--- a/contrib/dind/roles/dind-cluster/tasks/main.yaml
+++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml
@@ -14,7 +14,7 @@
     src: "/bin/true"
     dest: "{{ item }}"
     state: link
-    force: yes
+    force: true
   with_items:
     # DIND box may have swap enable, don't bother
     - /sbin/swapoff
@@ -58,7 +58,7 @@
     name: "{{ distro_user }}"
     uid: 1000
     # groups: sudo
-    append: yes
+    append: true
 
 - name: Allow password-less sudo to "{{ distro_user }}"
   copy:
diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml
index 56c8ff4c5..e0dc71008 100644
--- a/contrib/dind/roles/dind-host/tasks/main.yaml
+++ b/contrib/dind/roles/dind-host/tasks/main.yaml
@@ -19,7 +19,7 @@
     state: started
     hostname: "{{ item }}"
     command: "{{ distro_init }}"
-    # recreate: yes
+    # recreate: true
     privileged: true
     tmpfs:
       - /sys/module/nf_conntrack/parameters
diff --git a/contrib/kvm-setup/kvm-setup.yml b/contrib/kvm-setup/kvm-setup.yml
index b8d440587..73b819789 100644
--- a/contrib/kvm-setup/kvm-setup.yml
+++ b/contrib/kvm-setup/kvm-setup.yml
@@ -1,8 +1,8 @@
 ---
 - name: Prepare Hypervisor to later install kubespray VMs
   hosts: localhost
-  gather_facts: False
-  become: yes
+  gather_facts: false
+  become: true
   vars:
     bootstrap_os: none
   roles:
diff --git a/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml b/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
index 3e8ade645..dfcd3be73 100644
--- a/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
+++ b/contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
@@ -11,12 +11,12 @@
 
 - name: Install required packages
   apt:
-    upgrade: yes
-    update_cache: yes
+    upgrade: true
+    update_cache: true
     cache_valid_time: 3600
     name: "{{ item }}"
     state: present
-    install_recommends: no
+    install_recommends: false
   with_items:
     - dnsutils
     - ntp
diff --git a/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml b/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
index 6934eccf3..75b7ff8fd 100644
--- a/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
+++ b/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
@@ -30,7 +30,7 @@
     value: 1
     sysctl_file: "{{ sysctl_file_path }}"
     state: present
-    reload: yes
+    reload: true
 
 - name: Set bridge-nf-call-{arptables,iptables} to 0
   ansible.posix.sysctl:
@@ -38,7 +38,7 @@
     state: present
     value: 0
     sysctl_file: "{{ sysctl_file_path }}"
-    reload: yes
+    reload: true
   with_items:
     - net.bridge.bridge-nf-call-arptables
     - net.bridge.bridge-nf-call-ip6tables
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/README.md b/contrib/network-storage/glusterfs/roles/glusterfs/README.md
index dda243df0..9e5bf5dcf 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/README.md
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/README.md
@@ -21,7 +21,7 @@ glusterfs_default_release: ""
 You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
 
 ```yaml
-glusterfs_ppa_use: yes
+glusterfs_ppa_use: true
 glusterfs_ppa_version: "3.5"
 ```
 
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml
index b9f0d2d1d..c3fff2e63 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml
@@ -1,7 +1,7 @@
 ---
 # For Ubuntu.
 glusterfs_default_release: ""
-glusterfs_ppa_use: yes
+glusterfs_ppa_use: true
 glusterfs_ppa_version: "4.1"
 
 # Gluster configuration.
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml
index da7a4d8de..0d7cc1874 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-Debian.yml
@@ -3,7 +3,7 @@
   apt_repository:
     repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
     state: present
-    update_cache: yes
+    update_cache: true
   register: glusterfs_ppa_added
   when: glusterfs_ppa_use
 
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml
index ef9a71eba..7d6e1025b 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/defaults/main.yml
@@ -1,7 +1,7 @@
 ---
 # For Ubuntu.
 glusterfs_default_release: ""
-glusterfs_ppa_use: yes
+glusterfs_ppa_use: true
 glusterfs_ppa_version: "3.12"
 
 # Gluster configuration.
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
index 6bdc41420..a9f7698a3 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
@@ -43,7 +43,7 @@
   service:
     name: "{{ glusterfs_daemon }}"
     state: started
-    enabled: yes
+    enabled: true
 
 - name: Ensure Gluster brick and mount directories exist.
   file:
@@ -62,7 +62,7 @@
     replicas: "{{ groups['gfs-cluster'] | length }}"
     cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
     host: "{{ inventory_hostname }}"
-    force: yes
+    force: true
   run_once: true
   when: groups['gfs-cluster'] | length > 1
 
@@ -73,7 +73,7 @@
     brick: "{{ gluster_brick_dir }}"
     cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
     host: "{{ inventory_hostname }}"
-    force: yes
+    force: true
   run_once: true
   when: groups['gfs-cluster'] | length <= 1
 
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml
index 104735903..4d4b1b4b8 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-Debian.yml
@@ -3,7 +3,7 @@
   apt_repository:
     repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
     state: present
-    update_cache: yes
+    update_cache: true
   register: glusterfs_ppa_added
   when: glusterfs_ppa_use
 
diff --git a/contrib/network-storage/heketi/heketi-tear-down.yml b/contrib/network-storage/heketi/heketi-tear-down.yml
index e64f085cb..8c9d1c3a0 100644
--- a/contrib/network-storage/heketi/heketi-tear-down.yml
+++ b/contrib/network-storage/heketi/heketi-tear-down.yml
@@ -6,6 +6,6 @@
 
 - name: Teardown disks in heketi
   hosts: heketi-node
-  become: yes
+  become: true
   roles:
     - { role: tear-down-disks }
diff --git a/contrib/offline/generate_list.yml b/contrib/offline/generate_list.yml
index 6b2bcf806..f103d031f 100644
--- a/contrib/offline/generate_list.yml
+++ b/contrib/offline/generate_list.yml
@@ -1,7 +1,7 @@
 ---
 - name: Collect container images for offline deployment
   hosts: localhost
-  become: no
+  become: false
 
   roles:
     # Just load default variables from roles.
diff --git a/contrib/os-services/roles/prepare/tasks/main.yml b/contrib/os-services/roles/prepare/tasks/main.yml
index 177712e42..487b3b6f1 100644
--- a/contrib/os-services/roles/prepare/tasks/main.yml
+++ b/contrib/os-services/roles/prepare/tasks/main.yml
@@ -10,7 +10,7 @@
     systemd_service:
       name: firewalld
       state: stopped
-      enabled: no
+      enabled: false
     when:
       "'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
 
@@ -18,6 +18,6 @@
     systemd_service:
       name: ufw
       state: stopped
-      enabled: no
+      enabled: false
     when:
       "'ufw.service' in services and services['ufw.service'].status != 'not-found'"
diff --git a/extra_playbooks/upgrade-only-k8s.yml b/extra_playbooks/upgrade-only-k8s.yml
index 4207f8d28..5f396fa28 100644
--- a/extra_playbooks/upgrade-only-k8s.yml
+++ b/extra_playbooks/upgrade-only-k8s.yml
@@ -12,7 +12,7 @@
 
 - name: Setup ssh config to use the bastion
   hosts: localhost
-  gather_facts: False
+  gather_facts: false
   roles:
     - { role: kubespray-defaults}
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
diff --git a/playbooks/ansible_version.yml b/playbooks/ansible_version.yml
index aa2d6b476..2c8bac63c 100644
--- a/playbooks/ansible_version.yml
+++ b/playbooks/ansible_version.yml
@@ -2,7 +2,7 @@
 - name: Check Ansible version
   hosts: all
   gather_facts: false
-  become: no
+  become: false
   run_once: true
   vars:
     minimal_ansible_version: 2.16.4
diff --git a/playbooks/boilerplate.yml b/playbooks/boilerplate.yml
index 137a4c2c5..eafa9b42f 100644
--- a/playbooks/boilerplate.yml
+++ b/playbooks/boilerplate.yml
@@ -51,7 +51,7 @@
 
 - name: Install bastion ssh config
   hosts: bastion[0]
-  gather_facts: False
+  gather_facts: false
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults }
diff --git a/playbooks/cluster.yml b/playbooks/cluster.yml
index c433a8c69..ca67a28d3 100644
--- a/playbooks/cluster.yml
+++ b/playbooks/cluster.yml
@@ -7,7 +7,7 @@
 
 - name: Prepare for etcd install
   hosts: k8s_cluster:etcd
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -21,7 +21,7 @@
 
 - name: Install Kubernetes nodes
   hosts: k8s_cluster
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -30,7 +30,7 @@
 
 - name: Install the control plane
   hosts: kube_control_plane
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -41,7 +41,7 @@
 
 - name: Invoke kubeadm and install a CNI
   hosts: k8s_cluster
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -54,7 +54,7 @@
 
 - name: Install Calico Route Reflector
   hosts: calico_rr
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -63,7 +63,7 @@
 
 - name: Patch Kubernetes for Windows
   hosts: kube_control_plane[0]
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -72,7 +72,7 @@
 
 - name: Install Kubernetes apps
   hosts: kube_control_plane
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -86,7 +86,7 @@
 
 - name: Apply resolv.conf changes now that cluster DNS is up
   hosts: k8s_cluster
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
diff --git a/playbooks/facts.yml b/playbooks/facts.yml
index 77823aca4..d35eea80c 100644
--- a/playbooks/facts.yml
+++ b/playbooks/facts.yml
@@ -15,7 +15,7 @@
 
 - name: Gather facts
   hosts: k8s_cluster:etcd:calico_rr
-  gather_facts: False
+  gather_facts: false
   tags: always
   tasks:
     - name: Gather minimal facts
diff --git a/playbooks/install_etcd.yml b/playbooks/install_etcd.yml
index b8e4d1d89..1f585119c 100644
--- a/playbooks/install_etcd.yml
+++ b/playbooks/install_etcd.yml
@@ -16,7 +16,7 @@
 
 - name: Install etcd
   hosts: etcd:kube_control_plane:_kubespray_needs_etcd
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
diff --git a/playbooks/remove_node.yml b/playbooks/remove_node.yml
index e01338965..f994dae43 100644
--- a/playbooks/remove_node.yml
+++ b/playbooks/remove_node.yml
@@ -4,13 +4,13 @@
 
 - name: Confirm node removal
   hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
-  gather_facts: no
+  gather_facts: false
   tasks:
     - name: Confirm Execution
       pause:
         prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
       register: pause_result
-      run_once: True
+      run_once: true
       when:
         - not (skip_confirmation | default(false) | bool)
 
@@ -25,7 +25,7 @@
 
 - name: Reset node
   hosts: "{{ node | default('kube_node') }}"
-  gather_facts: no
+  gather_facts: false
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults, when: reset_nodes | default(True) | bool }
@@ -36,7 +36,7 @@
 # Currently cannot remove first master or etcd
 - name: Post node removal
   hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
-  gather_facts: no
+  gather_facts: false
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults, when: reset_nodes | default(True) | bool }
diff --git a/playbooks/reset.yml b/playbooks/reset.yml
index 5742bd844..6fa18c0ac 100644
--- a/playbooks/reset.yml
+++ b/playbooks/reset.yml
@@ -7,13 +7,13 @@
 
 - name: Reset cluster
   hosts: etcd:k8s_cluster:calico_rr
-  gather_facts: False
+  gather_facts: false
   pre_tasks:
     - name: Reset Confirmation
       pause:
         prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
       register: reset_confirmation_prompt
-      run_once: True
+      run_once: true
       when:
         - not (skip_confirmation | default(false) | bool)
         - reset_confirmation is not defined
diff --git a/playbooks/scale.yml b/playbooks/scale.yml
index 171e37832..ef0936607 100644
--- a/playbooks/scale.yml
+++ b/playbooks/scale.yml
@@ -7,7 +7,7 @@
 
 - name: Generate the etcd certificates beforehand
   hosts: etcd:kube_control_plane
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -24,7 +24,7 @@
 
 - name: Download images to ansible host cache via first kube_control_plane node
   hosts: kube_control_plane[0]
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -34,7 +34,7 @@
 
 - name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
   hosts: kube_node
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -53,7 +53,7 @@
 
 - name: Target only workers to get kubelet installed and checking in on any new nodes(node)
   hosts: kube_node
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -63,7 +63,7 @@
 - name: Upload control plane certs and retrieve encryption key
   hosts: kube_control_plane | first
   environment: "{{ proxy_disable_env }}"
-  gather_facts: False
+  gather_facts: false
   tags: kubeadm
   roles:
     - { role: kubespray-defaults }
@@ -84,7 +84,7 @@
 
 - name: Target only workers to get kubelet installed and checking in on any new nodes(network)
   hosts: kube_node
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -96,7 +96,7 @@
 
 - name: Apply resolv.conf changes now that cluster DNS is up
   hosts: k8s_cluster
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
diff --git a/playbooks/upgrade_cluster.yml b/playbooks/upgrade_cluster.yml
index 3180fec93..99511a820 100644
--- a/playbooks/upgrade_cluster.yml
+++ b/playbooks/upgrade_cluster.yml
@@ -7,7 +7,7 @@
 
 - name: Download images to ansible host cache via first kube_control_plane node
   hosts: kube_control_plane[0]
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -17,7 +17,7 @@
 
 - name: Prepare nodes for upgrade
   hosts: k8s_cluster:etcd:calico_rr
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -27,7 +27,7 @@
 
 - name: Upgrade container engine on non-cluster nodes
   hosts: etcd:calico_rr:!k8s_cluster
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   serial: "{{ serial | default('20%') }}"
@@ -39,7 +39,7 @@
   import_playbook: install_etcd.yml
 
 - name: Handle upgrades to master components first to maintain backwards compat.
-  gather_facts: False
+  gather_facts: false
   hosts: kube_control_plane
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
@@ -62,7 +62,7 @@
 
 - name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
   hosts: kube_control_plane:calico_rr:kube_node
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   environment: "{{ proxy_disable_env }}"
@@ -75,7 +75,7 @@
 
 - name: Finally handle worker upgrades, based on given batch size
   hosts: kube_node:calico_rr:!kube_control_plane
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   serial: "{{ serial | default('20%') }}"
@@ -93,7 +93,7 @@
 
 - name: Patch Kubernetes for Windows
   hosts: kube_control_plane[0]
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: true
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -102,7 +102,7 @@
 
 - name: Install Calico Route Reflector
   hosts: calico_rr
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -111,7 +111,7 @@
 
 - name: Install Kubernetes apps
   hosts: kube_control_plane
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
@@ -122,7 +122,7 @@
 
 - name: Apply resolv.conf changes now that cluster DNS is up
   hosts: k8s_cluster
-  gather_facts: False
+  gather_facts: false
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   environment: "{{ proxy_disable_env }}"
   roles:
diff --git a/roles/adduser/defaults/main.yml b/roles/adduser/defaults/main.yml
index df3fc2d02..3307032e0 100644
--- a/roles/adduser/defaults/main.yml
+++ b/roles/adduser/defaults/main.yml
@@ -7,14 +7,14 @@ addusers:
   etcd:
     name: etcd
     comment: "Etcd user"
-    create_home: no
-    system: yes
+    create_home: false
+    system: true
     shell: /sbin/nologin
   kube:
     name: kube
     comment: "Kubernetes user"
-    create_home: no
-    system: yes
+    create_home: false
+    system: true
     shell: /sbin/nologin
     group: "{{ kube_cert_group }}"
 
diff --git a/roles/adduser/vars/coreos.yml b/roles/adduser/vars/coreos.yml
index 5c258df6e..60fb05cf0 100644
--- a/roles/adduser/vars/coreos.yml
+++ b/roles/adduser/vars/coreos.yml
@@ -3,6 +3,6 @@ addusers:
   - name: kube
     comment: "Kubernetes user"
     shell: /sbin/nologin
-    system: yes
+    system: true
     group: "{{ kube_cert_group }}"
-    create_home: no
+    create_home: false
diff --git a/roles/adduser/vars/debian.yml b/roles/adduser/vars/debian.yml
index 99e5b3821..b14b8612e 100644
--- a/roles/adduser/vars/debian.yml
+++ b/roles/adduser/vars/debian.yml
@@ -2,14 +2,14 @@
 addusers:
   - name: etcd
     comment: "Etcd user"
-    create_home: yes
+    create_home: true
     home: "{{ etcd_data_dir }}"
-    system: yes
+    system: true
     shell: /sbin/nologin
 
   - name: kube
     comment: "Kubernetes user"
-    create_home: no
-    system: yes
+    create_home: false
+    system: true
     shell: /sbin/nologin
     group: "{{ kube_cert_group }}"
diff --git a/roles/adduser/vars/redhat.yml b/roles/adduser/vars/redhat.yml
index 99e5b3821..b14b8612e 100644
--- a/roles/adduser/vars/redhat.yml
+++ b/roles/adduser/vars/redhat.yml
@@ -2,14 +2,14 @@
 addusers:
   - name: etcd
     comment: "Etcd user"
-    create_home: yes
+    create_home: true
     home: "{{ etcd_data_dir }}"
-    system: yes
+    system: true
     shell: /sbin/nologin
 
   - name: kube
     comment: "Kubernetes user"
-    create_home: no
-    system: yes
+    create_home: false
+    system: true
     shell: /sbin/nologin
     group: "{{ kube_cert_group }}"
diff --git a/roles/bootstrap-os/molecule/default/converge.yml b/roles/bootstrap-os/molecule/default/converge.yml
index 1f44ec9ca..89a832559 100644
--- a/roles/bootstrap-os/molecule/default/converge.yml
+++ b/roles/bootstrap-os/molecule/default/converge.yml
@@ -1,6 +1,6 @@
 ---
 - name: Converge
   hosts: all
-  gather_facts: no
+  gather_facts: false
   roles:
     - role: bootstrap-os
diff --git a/roles/bootstrap-os/tasks/amzn.yml b/roles/bootstrap-os/tasks/amzn.yml
index 0da5591ca..8a473a07f 100644
--- a/roles/bootstrap-os/tasks/amzn.yml
+++ b/roles/bootstrap-os/tasks/amzn.yml
@@ -8,9 +8,9 @@
     file: epel
     description: Extra Packages for Enterprise Linux 7 - $basearch
     baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
-    gpgcheck: yes
+    gpgcheck: true
     gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
-    skip_if_unavailable: yes
-    enabled: yes
-    repo_gpgcheck: no
+    skip_if_unavailable: true
+    enabled: true
+    repo_gpgcheck: false
   when: epel_enabled
diff --git a/roles/bootstrap-os/tasks/centos.yml b/roles/bootstrap-os/tasks/centos.yml
index fc9a3cf0c..304a37b07 100644
--- a/roles/bootstrap-os/tasks/centos.yml
+++ b/roles/bootstrap-os/tasks/centos.yml
@@ -119,9 +119,9 @@
 - name: Check presence of fastestmirror.conf
   stat:
     path: /etc/yum/pluginconf.d/fastestmirror.conf
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: fastestmirror
 
 # the fastestmirror plugin can actually slow down Ansible deployments
diff --git a/roles/bootstrap-os/tasks/fedora-coreos.yml b/roles/bootstrap-os/tasks/fedora-coreos.yml
index b8c0f3fe7..3062a5a88 100644
--- a/roles/bootstrap-os/tasks/fedora-coreos.yml
+++ b/roles/bootstrap-os/tasks/fedora-coreos.yml
@@ -28,7 +28,7 @@
   raw: "nohup bash -c 'sleep 5s && shutdown -r now'"
   become: true
   ignore_errors: true  # noqa ignore-errors
-  ignore_unreachable: yes
+  ignore_unreachable: true
   when: need_bootstrap.rc != 0
 
 - name: Wait for the reboot to complete
diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml
index e62fbf496..c16fe1bec 100644
--- a/roles/bootstrap-os/tasks/main.yml
+++ b/roles/bootstrap-os/tasks/main.yml
@@ -22,7 +22,7 @@
       - "{{ os_release_dict['ID'] }}.yml"
       paths:
       - vars/
-      skip: True
+      skip: true
   - name: Include tasks
     include_tasks: "{{ included_tasks_file }}"
     with_first_found:
diff --git a/roles/bootstrap-os/tasks/opensuse.yml b/roles/bootstrap-os/tasks/opensuse.yml
index 9b69dcd89..5a4f9dead 100644
--- a/roles/bootstrap-os/tasks/opensuse.yml
+++ b/roles/bootstrap-os/tasks/opensuse.yml
@@ -8,9 +8,9 @@
 - name: Check that /etc/sysconfig/proxy file exists
   stat:
     path: /etc/sysconfig/proxy
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: stat_result
 
 - name: Create the /etc/sysconfig/proxy empty file
diff --git a/roles/bootstrap-os/tasks/redhat.yml b/roles/bootstrap-os/tasks/redhat.yml
index 0aae5a0d6..76a39b2f5 100644
--- a/roles/bootstrap-os/tasks/redhat.yml
+++ b/roles/bootstrap-os/tasks/redhat.yml
@@ -87,9 +87,9 @@
 - name: Check presence of fastestmirror.conf
   stat:
     path: /etc/yum/pluginconf.d/fastestmirror.conf
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: fastestmirror
 
 # the fastestmirror plugin can actually slow down Ansible deployments
diff --git a/roles/bootstrap-os/vars/fedora-coreos.yml b/roles/bootstrap-os/vars/fedora-coreos.yml
index e0bb069f9..37e4c46e8 100644
--- a/roles/bootstrap-os/vars/fedora-coreos.yml
+++ b/roles/bootstrap-os/vars/fedora-coreos.yml
@@ -1,2 +1,2 @@
 ---
-is_fedora_coreos: True
+is_fedora_coreos: true
diff --git a/roles/container-engine/containerd-common/tasks/main.yml b/roles/container-engine/containerd-common/tasks/main.yml
index d0cf1f139..c5b896808 100644
--- a/roles/container-engine/containerd-common/tasks/main.yml
+++ b/roles/container-engine/containerd-common/tasks/main.yml
@@ -2,9 +2,9 @@
 - name: Containerd-common | check if fedora coreos
   stat:
     path: /run/ostree-booted
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: ostree
 
 - name: Containerd-common | set is_ostree
diff --git a/roles/container-engine/containerd/handlers/main.yml b/roles/container-engine/containerd/handlers/main.yml
index 4e7722f4f..6a024fd59 100644
--- a/roles/container-engine/containerd/handlers/main.yml
+++ b/roles/container-engine/containerd/handlers/main.yml
@@ -3,9 +3,9 @@
   systemd_service:
     name: containerd
     state: restarted
-    enabled: yes
-    daemon-reload: yes
-    masked: no
+    enabled: true
+    daemon-reload: true
+    masked: false
   listen: Restart containerd
 
 - name: Containerd | wait for containerd
diff --git a/roles/container-engine/containerd/molecule/default/prepare.yml b/roles/container-engine/containerd/molecule/default/prepare.yml
index ddc9c0453..a3d09ad80 100644
--- a/roles/container-engine/containerd/molecule/default/prepare.yml
+++ b/roles/container-engine/containerd/molecule/default/prepare.yml
@@ -1,7 +1,7 @@
 ---
 - name: Prepare
   hosts: all
-  gather_facts: False
+  gather_facts: false
   become: true
   vars:
     ignore_assert_errors: true
@@ -19,7 +19,7 @@
 
 - name: Prepare CNI
   hosts: all
-  gather_facts: False
+  gather_facts: false
   become: true
   vars:
     ignore_assert_errors: true
diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml
index 073412cd0..8b8c12cbb 100644
--- a/roles/container-engine/containerd/tasks/main.yml
+++ b/roles/container-engine/containerd/tasks/main.yml
@@ -36,7 +36,7 @@
     src: "{{ downloads.containerd.dest }}"
     dest: "{{ containerd_bin_dir }}"
     mode: "0755"
-    remote_src: yes
+    remote_src: true
     extra_opts:
       - --strip-components=1
   notify: Restart containerd
@@ -138,6 +138,6 @@
 - name: Containerd | Ensure containerd is started and enabled
   systemd_service:
     name: containerd
-    daemon_reload: yes
-    enabled: yes
+    daemon_reload: true
+    enabled: true
     state: started
diff --git a/roles/container-engine/cri-dockerd/handlers/main.yml b/roles/container-engine/cri-dockerd/handlers/main.yml
index 00d00e7b2..f60f28fce 100644
--- a/roles/container-engine/cri-dockerd/handlers/main.yml
+++ b/roles/container-engine/cri-dockerd/handlers/main.yml
@@ -3,7 +3,7 @@
   systemd_service:
     name: cri-dockerd
     daemon_reload: true
-    masked: no
+    masked: false
   listen: Restart and enable cri-dockerd
 
 - name: Cri-dockerd | restart docker.service
@@ -27,5 +27,5 @@
 - name: Cri-dockerd | enable cri-dockerd service
   service:
     name: cri-dockerd.service
-    enabled: yes
+    enabled: true
   listen: Restart and enable cri-dockerd
diff --git a/roles/container-engine/cri-o/handlers/main.yml b/roles/container-engine/cri-o/handlers/main.yml
index d173ce41b..0595e4f94 100644
--- a/roles/container-engine/cri-o/handlers/main.yml
+++ b/roles/container-engine/cri-o/handlers/main.yml
@@ -8,5 +8,5 @@
   service:
     name: crio
     state: restarted
-    enabled: yes
+    enabled: true
   listen: Restart crio
diff --git a/roles/container-engine/cri-o/molecule/default/prepare.yml b/roles/container-engine/cri-o/molecule/default/prepare.yml
index c769d7cd2..55ad5174d 100644
--- a/roles/container-engine/cri-o/molecule/default/prepare.yml
+++ b/roles/container-engine/cri-o/molecule/default/prepare.yml
@@ -1,7 +1,7 @@
 ---
 - name: Prepare
   hosts: all
-  gather_facts: False
+  gather_facts: false
   become: true
   vars:
     ignore_assert_errors: true
@@ -19,7 +19,7 @@
 
 - name: Prepare CNI
   hosts: all
-  gather_facts: False
+  gather_facts: false
   become: true
   vars:
     ignore_assert_errors: true
diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml
index a7b234563..bde2e0756 100644
--- a/roles/container-engine/cri-o/tasks/main.yaml
+++ b/roles/container-engine/cri-o/tasks/main.yaml
@@ -5,9 +5,9 @@
 - name: Cri-o | check if fedora coreos
   stat:
     path: /run/ostree-booted
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: ostree
 
 - name: Cri-o | set is_ostree
diff --git a/roles/container-engine/cri-o/tasks/setup-amazon.yaml b/roles/container-engine/cri-o/tasks/setup-amazon.yaml
index 2462c30fd..cef0112b2 100644
--- a/roles/container-engine/cri-o/tasks/setup-amazon.yaml
+++ b/roles/container-engine/cri-o/tasks/setup-amazon.yaml
@@ -8,7 +8,7 @@
   lineinfile:
     dest: /etc/yum.repos.d/amzn2-extras.repo
     line: "[amzn2extra-docker]"
-  check_mode: yes
+  check_mode: true
   register: amzn2_extras_docker_repo
   when:
     - amzn2_extras_file_stat.stat.exists
@@ -19,7 +19,7 @@
     section: amzn2extra-docker
     option: enabled
     value: "0"
-    backup: yes
+    backup: true
     mode: "0644"
   when:
     - amzn2_extras_file_stat.stat.exists
diff --git a/roles/container-engine/crictl/handlers/main.yml b/roles/container-engine/crictl/handlers/main.yml
index 785823fc4..d6ffe169a 100644
--- a/roles/container-engine/crictl/handlers/main.yml
+++ b/roles/container-engine/crictl/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 - name: Get crictl completion
   command: "{{ bin_dir }}/crictl completion"
-  changed_when: False
+  changed_when: false
   register: cri_completion
   check_mode: false
 
diff --git a/roles/container-engine/docker-storage/tasks/main.yml b/roles/container-engine/docker-storage/tasks/main.yml
index e3c713db2..d90dcb775 100644
--- a/roles/container-engine/docker-storage/tasks/main.yml
+++ b/roles/container-engine/docker-storage/tasks/main.yml
@@ -39,7 +39,7 @@
     state: present
 
 - name: Docker-storage-setup | install and run container-storage-setup
-  become: yes
+  become: true
   script: |
     install_container_storage_setup.sh \
       {{ docker_container_storage_setup_repository }} \
diff --git a/roles/container-engine/docker/handlers/main.yml b/roles/container-engine/docker/handlers/main.yml
index 72e95e6bf..76d16f589 100644
--- a/roles/container-engine/docker/handlers/main.yml
+++ b/roles/container-engine/docker/handlers/main.yml
@@ -3,7 +3,7 @@
   systemd_service:
     name: docker
     daemon_reload: true
-    masked: no
+    masked: false
   listen: Restart docker
 
 - name: Docker | reload docker.socket
diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml
index 55b3a0be6..e56fe4ca9 100644
--- a/roles/container-engine/docker/tasks/main.yml
+++ b/roles/container-engine/docker/tasks/main.yml
@@ -2,9 +2,9 @@
 - name: Check if fedora coreos
   stat:
     path: /run/ostree-booted
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: ostree
 
 - name: Set is_ostree
@@ -66,7 +66,7 @@
     path: /etc/apt/sources.list
     regexp: 'buster-backports'
     state: absent
-    backup: yes
+    backup: true
   when:
     - ansible_os_family == 'Debian'
     - ansible_distribution_release == "buster"
@@ -183,7 +183,7 @@
 - name: Ensure docker service is started and enabled
   service:
     name: "{{ item }}"
-    enabled: yes
+    enabled: true
     state: started
   with_items:
     - docker
diff --git a/roles/container-engine/docker/tasks/set_facts_dns.yml b/roles/container-engine/docker/tasks/set_facts_dns.yml
index d7c10392e..d0ccd745a 100644
--- a/roles/container-engine/docker/tasks/set_facts_dns.yml
+++ b/roles/container-engine/docker/tasks/set_facts_dns.yml
@@ -21,9 +21,9 @@
   shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
   args:
     executable: /bin/bash
-  changed_when: False
+  changed_when: false
   register: system_nameservers
-  check_mode: no
+  check_mode: false
 
 - name: Check system search domains
   # noqa risky-shell-pipe - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
@@ -31,9 +31,9 @@
   shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
   args:
     executable: /bin/bash
-  changed_when: False
+  changed_when: false
   register: system_search_domains
-  check_mode: no
+  check_mode: false
 
 - name: Add system nameservers to docker options
   set_fact:
diff --git a/roles/container-engine/gvisor/tasks/main.yml b/roles/container-engine/gvisor/tasks/main.yml
index 13b19a2f6..4bab9a996 100644
--- a/roles/container-engine/gvisor/tasks/main.yml
+++ b/roles/container-engine/gvisor/tasks/main.yml
@@ -14,7 +14,7 @@
     src: "{{ item.src }}"
     dest: "{{ bin_dir }}/{{ item.dest }}"
     mode: "0755"
-    remote_src: yes
+    remote_src: true
   with_items:
     - { src: "{{ downloads.gvisor_runsc.dest }}", dest: "runsc" }
     - { src: "{{ downloads.gvisor_containerd_shim.dest }}", dest: "containerd-shim-runsc-v1" }
diff --git a/roles/container-engine/kata-containers/tasks/main.yml b/roles/container-engine/kata-containers/tasks/main.yml
index 38778987d..5014c214a 100644
--- a/roles/container-engine/kata-containers/tasks/main.yml
+++ b/roles/container-engine/kata-containers/tasks/main.yml
@@ -11,7 +11,7 @@
     mode: "0755"
     owner: root
     group: root
-    remote_src: yes
+    remote_src: true
 
 - name: Kata-containers | Create config directory
   file:
diff --git a/roles/container-engine/nerdctl/handlers/main.yml b/roles/container-engine/nerdctl/handlers/main.yml
index 98de60c1c..174470607 100644
--- a/roles/container-engine/nerdctl/handlers/main.yml
+++ b/roles/container-engine/nerdctl/handlers/main.yml
@@ -1,7 +1,7 @@
 ---
 - name: Get nerdctl completion
   command: "{{ bin_dir }}/nerdctl completion bash"
-  changed_when: False
+  changed_when: false
   register: nerdctl_completion
   check_mode: false
 
diff --git a/roles/container-engine/runc/tasks/main.yml b/roles/container-engine/runc/tasks/main.yml
index 3ee3fdae0..1d388768d 100644
--- a/roles/container-engine/runc/tasks/main.yml
+++ b/roles/container-engine/runc/tasks/main.yml
@@ -2,9 +2,9 @@
 - name: Runc | check if fedora coreos
   stat:
     path: /run/ostree-booted
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: ostree
 
 - name: Runc | set is_ostree
diff --git a/roles/container-engine/skopeo/tasks/main.yml b/roles/container-engine/skopeo/tasks/main.yml
index 95bb9697f..8f21e3f1c 100644
--- a/roles/container-engine/skopeo/tasks/main.yml
+++ b/roles/container-engine/skopeo/tasks/main.yml
@@ -2,9 +2,9 @@
 - name: Skopeo | check if fedora coreos
   stat:
     path: /run/ostree-booted
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: ostree
 
 - name: Skopeo | set is_ostree
diff --git a/roles/container-engine/validate-container-engine/tasks/main.yml b/roles/container-engine/validate-container-engine/tasks/main.yml
index 08ea1e5ca..ffb541c24 100644
--- a/roles/container-engine/validate-container-engine/tasks/main.yml
+++ b/roles/container-engine/validate-container-engine/tasks/main.yml
@@ -2,9 +2,9 @@
 - name: Validate-container-engine | check if fedora coreos
   stat:
     path: /run/ostree-booted
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: ostree
   tags:
     - facts
@@ -30,8 +30,8 @@
 - name: Check if containerd is installed
   find:
     file_type: file
-    recurse: yes
-    use_regex: yes
+    recurse: true
+    use_regex: true
     patterns:
       - containerd.service$
     paths:
@@ -45,8 +45,8 @@
 - name: Check if docker is installed
   find:
     file_type: file
-    recurse: yes
-    use_regex: yes
+    recurse: true
+    use_regex: true
     patterns:
       - docker.service$
     paths:
@@ -60,8 +60,8 @@
 - name: Check if crio is installed
   find:
     file_type: file
-    recurse: yes
-    use_regex: yes
+    recurse: true
+    use_regex: true
     patterns:
       - crio.service$
     paths:
diff --git a/roles/download/tasks/check_pull_required.yml b/roles/download/tasks/check_pull_required.yml
index e5ae1dcf3..585013125 100644
--- a/roles/download/tasks/check_pull_required.yml
+++ b/roles/download/tasks/check_pull_required.yml
@@ -5,7 +5,7 @@
   shell: "{{ image_info_command }}"
   register: docker_images
   changed_when: false
-  check_mode: no
+  check_mode: false
   when: not download_always_pull
 
 - name: Check_pull_required | Set pull_required if the desired image is not yet loaded
diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml
index f98adfa3f..5e67fe8c5 100644
--- a/roles/download/tasks/download_container.yml
+++ b/roles/download/tasks/download_container.yml
@@ -26,12 +26,12 @@
     - name: Download_container | Determine if image is in cache
       stat:
         path: "{{ image_path_cached }}"
-        get_attributes: no
-        get_checksum: no
-        get_mime: no
+        get_attributes: false
+        get_checksum: false
+        get_mime: false
       delegate_to: localhost
       connection: local
-      delegate_facts: no
+      delegate_facts: false
       register: cache_image
       changed_when: false
       become: false
@@ -57,7 +57,7 @@
     - name: Download_container | Download image if required
       command: "{{ image_pull_command_on_localhost if download_localhost else image_pull_command }} {{ image_reponame }}"
       delegate_to: "{{ download_delegate if download_run_once else inventory_hostname }}"
-      delegate_facts: yes
+      delegate_facts: true
       run_once: "{{ download_run_once }}"
       register: pull_task_result
       until: pull_task_result is succeeded
@@ -72,7 +72,7 @@
     - name: Download_container | Save and compress image
       shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"  # noqa command-instead-of-shell - image_save_command_on_localhost contains a pipe, therefore requires shell
       delegate_to: "{{ download_delegate }}"
-      delegate_facts: no
+      delegate_facts: false
       register: container_save_status
       failed_when: container_save_status.stderr
       run_once: true
@@ -99,7 +99,7 @@
         dest: "{{ image_path_final }}"
         use_ssh_args: true
         mode: push
-      delegate_facts: no
+      delegate_facts: false
       register: upload_image
       failed_when: not upload_image
       until: upload_image is succeeded
diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml
index 00dd33a28..53a7a8197 100644
--- a/roles/download/tasks/download_file.yml
+++ b/roles/download/tasks/download_file.yml
@@ -24,13 +24,13 @@
       owner: "{{ download.owner | default(omit) }}"
       mode: "0755"
       state: directory
-      recurse: yes
+      recurse: true
 
   - name: Download_file | Create local cache directory
     file:
       path: "{{ file_path_cached | dirname }}"
       state: directory
-      recurse: yes
+      recurse: true
     delegate_to: localhost
     connection: local
     delegate_facts: false
@@ -45,7 +45,7 @@
     file:
       path: "{{ file_path_cached | dirname }}"
       state: directory
-      recurse: yes
+      recurse: true
     delegate_to: "{{ download_delegate }}"
     delegate_facts: false
     run_once: true
diff --git a/roles/download/tasks/extract_file.yml b/roles/download/tasks/extract_file.yml
index 59d0531f6..ce7536f4f 100644
--- a/roles/download/tasks/extract_file.yml
+++ b/roles/download/tasks/extract_file.yml
@@ -5,7 +5,7 @@
     dest: "{{ download.dest | dirname }}"
     owner: "{{ download.owner | default(omit) }}"
     mode: "{{ download.mode | default(omit) }}"
-    copy: no
+    copy: false
     extra_opts: "{{ download.unarchive_extra_opts | default(omit) }}"
   when:
     - download.unarchive | default(false)
diff --git a/roles/download/tasks/prep_download.yml b/roles/download/tasks/prep_download.yml
index a8a79d711..15f3a91da 100644
--- a/roles/download/tasks/prep_download.yml
+++ b/roles/download/tasks/prep_download.yml
@@ -62,7 +62,7 @@
   register: docker_images
   failed_when: false
   changed_when: false
-  check_mode: no
+  check_mode: false
   when: download_container
 
 - name: Prep_download | Create staging directory on remote node
@@ -81,7 +81,7 @@
     mode: "0755"
   delegate_to: localhost
   connection: local
-  delegate_facts: no
+  delegate_facts: false
   run_once: true
   become: false
   when:
diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml
index 9c05a3ad0..84e03accd 100644
--- a/roles/etcd/handlers/backup.yml
+++ b/roles/etcd/handlers/backup.yml
@@ -23,9 +23,9 @@
 - name: Stat etcd v2 data directory
   stat:
     path: "{{ etcd_data_dir }}/member"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: etcd_data_dir_member
   listen: Restart etcd
   when: etcd_cluster_is_healthy.rc == 0
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
index 62a899945..9c8b8a82f 100644
--- a/roles/etcd/handlers/main.yml
+++ b/roles/etcd/handlers/main.yml
@@ -26,7 +26,7 @@
 - name: Wait for etcd up
   uri:
     url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
-    validate_certs: no
+    validate_certs: false
     client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
     client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
   register: result
@@ -41,7 +41,7 @@
 - name: Wait for etcd-events up
   uri:
     url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
-    validate_certs: no
+    validate_certs: false
     client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
     client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
   register: result
diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml
index 1611f9ec1..440685aa7 100644
--- a/roles/etcd/tasks/check_certs.yml
+++ b/roles/etcd/tasks/check_certs.yml
@@ -17,9 +17,9 @@
 - name: "Check certs | Register ca and etcd admin/member certs on etcd hosts"
   stat:
     path: "{{ etcd_cert_dir }}/{{ item }}"
-    get_attributes: no
-    get_checksum: yes
-    get_mime: no
+    get_attributes: false
+    get_checksum: true
+    get_mime: false
   register: etcd_member_certs
   when: inventory_hostname in groups['etcd']
   with_items:
diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml
index 4cf5387a0..b7b943f0d 100644
--- a/roles/etcd/tasks/configure.yml
+++ b/roles/etcd/tasks/configure.yml
@@ -6,8 +6,8 @@
   register: etcd_cluster_is_healthy
   failed_when: false
   changed_when: false
-  check_mode: no
-  run_once: yes
+  check_mode: false
+  run_once: true
   when:
     - is_etcd_master
     - etcd_cluster_setup
@@ -27,8 +27,8 @@
   register: etcd_events_cluster_is_healthy
   failed_when: false
   changed_when: false
-  check_mode: no
-  run_once: yes
+  check_mode: false
+  run_once: true
   when:
     - is_etcd_master
     - etcd_events_cluster_setup
@@ -49,7 +49,7 @@
   template:
     src: "etcd-{{ etcd_deployment_type }}.service.j2"
     dest: /etc/systemd/system/etcd.service
-    backup: yes
+    backup: true
     mode: "0644"
     # FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
     # Remove once we drop support for systemd < 250
@@ -60,7 +60,7 @@
   template:
     src: "etcd-events-{{ etcd_deployment_type }}.service.j2"
     dest: /etc/systemd/system/etcd-events.service
-    backup: yes
+    backup: true
     mode: "0644"
     validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:etcd-events-{{ etcd_deployment_type }}.service'"
     # FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
@@ -77,7 +77,7 @@
   service:
     name: etcd
     state: started
-    enabled: yes
+    enabled: true
   ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}"  # noqa ignore-errors
   when: is_etcd_master and etcd_cluster_setup
 
@@ -86,7 +86,7 @@
   service:
     name: etcd-events
     state: started
-    enabled: yes
+    enabled: true
   ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}"  # noqa ignore-errors
   when: is_etcd_master and etcd_events_cluster_setup
 
@@ -99,8 +99,8 @@
   retries: "{{ etcd_retries }}"
   delay: "{{ retry_stagger | random + 3 }}"
   changed_when: false
-  check_mode: no
-  run_once: yes
+  check_mode: false
+  run_once: true
   when:
     - is_etcd_master
     - etcd_cluster_setup
@@ -122,8 +122,8 @@
   retries: "{{ etcd_retries }}"
   delay: "{{ retry_stagger | random + 3 }}"
   changed_when: false
-  check_mode: no
-  run_once: yes
+  check_mode: false
+  run_once: true
   when:
     - is_etcd_master
     - etcd_events_cluster_setup
@@ -141,7 +141,7 @@
   register: etcd_member_in_cluster
   ignore_errors: true  # noqa ignore-errors
   changed_when: false
-  check_mode: no
+  check_mode: false
   when: is_etcd_master and etcd_cluster_setup
   tags:
     - facts
@@ -157,7 +157,7 @@
   register: etcd_events_member_in_cluster
   ignore_errors: true  # noqa ignore-errors
   changed_when: false
-  check_mode: no
+  check_mode: false
   when: is_etcd_master and etcd_events_cluster_setup
   tags:
     - facts
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index 711c14d64..934b5eb37 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -6,7 +6,7 @@
     state: directory
     owner: "{{ etcd_owner }}"
     mode: "{{ etcd_cert_dir_mode }}"
-    recurse: yes
+    recurse: true
 
 - name: "Gen_certs | create etcd script dir (on {{ groups['etcd'][0] }})"
   file:
@@ -14,7 +14,7 @@
     state: directory
     owner: root
     mode: "0700"
-  run_once: yes
+  run_once: true
   when: inventory_hostname == groups['etcd'][0]
 
 - name: Gen_certs | write openssl config
@@ -22,7 +22,7 @@
     src: "openssl.conf.j2"
     dest: "{{ etcd_config_dir }}/openssl.conf"
     mode: "0640"
-  run_once: yes
+  run_once: true
   delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - gen_certs | default(false)
@@ -33,7 +33,7 @@
     src: "make-ssl-etcd.sh.j2"
     dest: "{{ etcd_script_dir }}/make-ssl-etcd.sh"
     mode: "0700"
-  run_once: yes
+  run_once: true
   when:
     - gen_certs | default(false)
     - inventory_hostname == groups['etcd'][0]
@@ -43,7 +43,7 @@
   environment:
     MASTERS: "{{ groups['gen_master_certs_True'] | ansible.builtin.intersect(groups['etcd']) | join(' ') }}"
     HOSTS: "{{ groups['gen_node_certs_True'] | ansible.builtin.intersect(groups['kube_control_plane']) | join(' ') }}"
-  run_once: yes
+  run_once: true
   delegate_to: "{{ groups['etcd'][0] }}"
   when: gen_certs | default(false)
   notify: Set etcd_secret_changed
@@ -52,7 +52,7 @@
   command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
   environment:
     HOSTS: "{{ groups['gen_node_certs_True'] | ansible.builtin.intersect(groups['k8s_cluster']) | join(' ') }}"
-  run_once: yes
+  run_once: true
   delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
@@ -153,4 +153,4 @@
     state: directory
     owner: "{{ etcd_owner }}"
     mode: "{{ etcd_cert_dir_mode }}"
-    recurse: yes
+    recurse: true
diff --git a/roles/etcd/tasks/gen_nodes_certs_script.yml b/roles/etcd/tasks/gen_nodes_certs_script.yml
index 2093bf880..e074d0c01 100644
--- a/roles/etcd/tasks/gen_nodes_certs_script.yml
+++ b/roles/etcd/tasks/gen_nodes_certs_script.yml
@@ -21,7 +21,7 @@
     executable: /bin/bash
   no_log: "{{ not (unsafe_show_logs | bool) }}"
   register: etcd_node_certs
-  check_mode: no
+  check_mode: false
   delegate_to: "{{ groups['etcd'][0] }}"
   changed_when: false
 
diff --git a/roles/etcd/tasks/install_docker.yml b/roles/etcd/tasks/install_docker.yml
index a7aba5094..f393bd4eb 100644
--- a/roles/etcd/tasks/install_docker.yml
+++ b/roles/etcd/tasks/install_docker.yml
@@ -29,7 +29,7 @@
     dest: "{{ bin_dir }}/etcd"
     owner: 'root'
     mode: "0750"
-    backup: yes
+    backup: true
   when: etcd_cluster_setup
 
 - name: Install etcd-events launch script
@@ -38,5 +38,5 @@
     dest: "{{ bin_dir }}/etcd-events"
     owner: 'root'
     mode: "0750"
-    backup: yes
+    backup: true
   when: etcd_events_cluster_setup
diff --git a/roles/etcd/tasks/install_host.yml b/roles/etcd/tasks/install_host.yml
index 7bfc7e2ab..eb67952ea 100644
--- a/roles/etcd/tasks/install_host.yml
+++ b/roles/etcd/tasks/install_host.yml
@@ -25,7 +25,7 @@
     src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
     dest: "{{ bin_dir }}/{{ item }}"
     mode: "0755"
-    remote_src: yes
+    remote_src: true
   with_items:
     - etcd
   when: etcd_cluster_setup
diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml
index 0fad331e3..106f06e03 100644
--- a/roles/etcd/tasks/join_etcd-events_member.yml
+++ b/roles/etcd/tasks/join_etcd-events_member.yml
@@ -32,7 +32,7 @@
     executable: /bin/bash
   register: etcd_events_member_in_cluster
   changed_when: false
-  check_mode: no
+  check_mode: false
   tags:
     - facts
   environment:
@@ -46,4 +46,4 @@
   service:
     name: etcd-events
     state: started
-    enabled: yes
+    enabled: true
diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml
index ee77d4b26..a2e37714d 100644
--- a/roles/etcd/tasks/join_etcd_member.yml
+++ b/roles/etcd/tasks/join_etcd_member.yml
@@ -33,7 +33,7 @@
     executable: /bin/bash
   register: etcd_member_in_cluster
   changed_when: false
-  check_mode: no
+  check_mode: false
   retries: "{{ etcd_retries }}"
   delay: "{{ retry_stagger | random + 3 }}"
   until: etcd_member_in_cluster.rc == 0
@@ -50,4 +50,4 @@
   service:
     name: etcd
     state: started
-    enabled: yes
+    enabled: true
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 40ca3de5f..74d5f16d3 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -33,7 +33,7 @@
   command: "openssl x509 -in {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem -noout -serial"
   register: "etcd_client_cert_serial_result"
   changed_when: false
-  check_mode: no
+  check_mode: false
   when:
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
diff --git a/roles/etcdctl_etcdutl/tasks/main.yml b/roles/etcdctl_etcdutl/tasks/main.yml
index b9e6832f5..053e14295 100644
--- a/roles/etcdctl_etcdutl/tasks/main.yml
+++ b/roles/etcdctl_etcdutl/tasks/main.yml
@@ -24,7 +24,7 @@
   unarchive:
     src: "{{ downloads.etcd.dest }}"
     dest: "{{ local_release_dir }}/"
-    remote_src: yes
+    remote_src: true
   when: container_manager in ['crio', 'containerd']
 
 - name: Copy etcdctl and etcdutl binary from download dir
@@ -32,7 +32,7 @@
     src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
     dest: "{{ bin_dir }}/{{ item }}"
     mode: "0755"
-    remote_src: yes
+    remote_src: true
   with_items:
     - etcdctl
     - etcdutl
diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml
index e3d82f106..18deee805 100644
--- a/roles/kubernetes-apps/ansible/tasks/main.yml
+++ b/roles/kubernetes-apps/ansible/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Kubernetes Apps | Wait for kube-apiserver
   uri:
     url: "{{ kube_apiserver_endpoint }}/healthz"
-    validate_certs: no
+    validate_certs: false
     client_cert: "{{ kube_apiserver_client_cert }}"
     client_key: "{{ kube_apiserver_client_key }}"
   register: result
diff --git a/roles/kubernetes-apps/argocd/tasks/main.yml b/roles/kubernetes-apps/argocd/tasks/main.yml
index 3cfe06fc7..05c63337b 100644
--- a/roles/kubernetes-apps/argocd/tasks/main.yml
+++ b/roles/kubernetes-apps/argocd/tasks/main.yml
@@ -8,10 +8,10 @@
   ansible.posix.synchronize:
     src: "{{ downloads.yq.dest }}"
     dest: "{{ bin_dir }}/yq"
-    compress: no
-    perms: yes
-    owner: no
-    group: no
+    compress: false
+    perms: true
+    owner: false
+    group: false
   delegate_to: "{{ inventory_hostname }}"
 
 - name: Kubernetes Apps | Set ArgoCD template list
@@ -49,17 +49,17 @@
   ansible.posix.synchronize:
     src: "{{ local_release_dir }}/{{ item.file }}"
     dest: "{{ kube_config_dir }}/{{ item.file }}"
-    compress: no
-    perms: yes
-    owner: no
-    group: no
+    compress: false
+    perms: true
+    owner: false
+    group: false
   delegate_to: "{{ inventory_hostname }}"
   with_items: "{{ argocd_templates | selectattr('url', 'defined') | list }}"
   when:
     - "inventory_hostname == groups['kube_control_plane'][0]"
 
 - name: Kubernetes Apps | Set ArgoCD namespace for remote manifests
-  become: yes
+  become: true
   command: |
     {{ bin_dir }}/yq eval-all -i '.metadata.namespace="{{ argocd_namespace }}"' {{ kube_config_dir }}/{{ item.file }}
   with_items: "{{ argocd_templates | selectattr('url', 'defined') | list }}"
@@ -69,7 +69,7 @@
     - "inventory_hostname == groups['kube_control_plane'][0]"
 
 - name: Kubernetes Apps | Create ArgoCD manifests from templates
-  become: yes
+  become: true
   template:
     src: "{{ item.file }}.j2"
     dest: "{{ kube_config_dir }}/{{ item.file }}"
@@ -81,7 +81,7 @@
     - "inventory_hostname == groups['kube_control_plane'][0]"
 
 - name: Kubernetes Apps | Install ArgoCD
-  become: yes
+  become: true
   kube:
     name: ArgoCD
     kubectl: "{{ bin_dir }}/kubectl"
@@ -93,7 +93,7 @@
 
 # https://github.com/argoproj/argo-cd/blob/master/docs/faq.md#i-forgot-the-admin-password-how-do-i-reset-it
 - name: Kubernetes Apps | Set ArgoCD custom admin password
-  become: yes
+  become: true
   shell: |
     {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n {{ argocd_namespace }} patch secret argocd-secret -p \
       '{
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index 8d7230e0a..ef4737eac 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Kubernetes Apps | Wait for kube-apiserver
   uri:
     url: "{{ kube_apiserver_endpoint }}/healthz"
-    validate_certs: no
+    validate_certs: false
     client_cert: "{{ kube_apiserver_client_cert }}"
     client_key: "{{ kube_apiserver_client_key }}"
   register: result
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml
index 0d4144141..325e3cb7d 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml
+++ b/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml
@@ -21,7 +21,7 @@ vsphere_csi_controller_replicas: 1
 
 csi_endpoint: '{% if external_vsphere_version >= "7.0u1" %}/csi{% else %}/var/lib/csi/sockets/pluginproxy{% endif %}'
 
-vsphere_csi_aggressive_node_drain: False
+vsphere_csi_aggressive_node_drain: false
 vsphere_csi_aggressive_node_unreachable_timeout: 300
 vsphere_csi_aggressive_node_not_ready_timeout: 300
 
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index 61596aefb..5951381a2 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -37,13 +37,13 @@
 
 - name: Helm | Get helm completion
   command: "{{ bin_dir }}/helm completion bash"
-  changed_when: False
+  changed_when: false
   register: helm_completion
-  check_mode: False
+  check_mode: false
 
 - name: Helm | Install helm completion
   copy:
     dest: /etc/bash_completion.d/helm.sh
     content: "{{ helm_completion.stdout }}"
     mode: "0755"
-  become: True
+  become: true
diff --git a/roles/kubernetes-apps/helm/tasks/pyyaml-flatcar.yml b/roles/kubernetes-apps/helm/tasks/pyyaml-flatcar.yml
index ea0d63a08..72f0e2182 100644
--- a/roles/kubernetes-apps/helm/tasks/pyyaml-flatcar.yml
+++ b/roles/kubernetes-apps/helm/tasks/pyyaml-flatcar.yml
@@ -2,13 +2,13 @@
 - name: Get installed pip version
   command: "{{ ansible_python_interpreter if ansible_python_interpreter is defined else 'python' }} -m pip --version"
   register: pip_version_output
-  ignore_errors: yes
+  ignore_errors: true
   changed_when: false
 
 - name: Get installed PyYAML version
   command: "{{ ansible_python_interpreter if ansible_python_interpreter is defined else 'python' }} -m pip show PyYAML"
   register: pyyaml_version_output
-  ignore_errors: yes
+  ignore_errors: true
   changed_when: false
 
 - name: Install pip
diff --git a/roles/kubernetes-apps/krew/tasks/krew.yml b/roles/kubernetes-apps/krew/tasks/krew.yml
index e46dbb48d..3308aef85 100644
--- a/roles/kubernetes-apps/krew/tasks/krew.yml
+++ b/roles/kubernetes-apps/krew/tasks/krew.yml
@@ -24,15 +24,15 @@
 
 - name: Krew | Get krew completion
   command: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }} completion bash"
-  changed_when: False
+  changed_when: false
   register: krew_completion
-  check_mode: False
-  ignore_errors: yes  # noqa ignore-errors
+  check_mode: false
+  ignore_errors: true  # noqa ignore-errors
 
 - name: Krew | Install krew completion
   copy:
     dest: /etc/bash_completion.d/krew.sh
     content: "{{ krew_completion.stdout }}"
     mode: "0755"
-  become: True
+  become: true
   when: krew_completion.rc == 0
diff --git a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
index bc0f932d8..587b652a5 100644
--- a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml
@@ -13,7 +13,7 @@
 - name: Weave | Wait for Weave to become available
   uri:
     url: http://127.0.0.1:6784/status
-    return_content: yes
+    return_content: true
   register: weave_status
   retries: 180
   delay: 5
diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml
index cc7887750..920205198 100644
--- a/roles/kubernetes/client/tasks/main.yml
+++ b/roles/kubernetes/client/tasks/main.yml
@@ -30,9 +30,9 @@
   copy:
     src: "{{ kube_config_dir }}/admin.conf"
     dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
-    remote_src: yes
+    remote_src: true
     mode: "0600"
-    backup: yes
+    backup: true
 
 - name: Create kube artifacts dir
   file:
@@ -41,8 +41,8 @@
     state: directory
   delegate_to: localhost
   connection: local
-  become: no
-  run_once: yes
+  become: false
+  run_once: true
   when: kubeconfig_localhost
 
 - name: Wait for k8s apiserver
@@ -54,7 +54,7 @@
 - name: Get admin kubeconfig from remote host
   slurp:
     src: "{{ kube_config_dir }}/admin.conf"
-  run_once: yes
+  run_once: true
   register: raw_admin_kubeconfig
   when: kubeconfig_localhost
 
@@ -83,21 +83,21 @@
     mode: "0600"
   delegate_to: localhost
   connection: local
-  become: no
-  run_once: yes
+  become: false
+  run_once: true
   when: kubeconfig_localhost
 
 - name: Copy kubectl binary to ansible host
   fetch:
     src: "{{ bin_dir }}/kubectl"
     dest: "{{ artifacts_dir }}/kubectl"
-    flat: yes
-    validate_checksum: no
+    flat: true
+    validate_checksum: false
   register: copy_binary_result
   until: copy_binary_result is not failed
   retries: 20
-  become: no
-  run_once: yes
+  become: false
+  run_once: true
   when: kubectl_localhost
 
 - name: Create helper script kubectl.sh on ansible host
@@ -107,8 +107,8 @@
       ${BASH_SOURCE%/*}/kubectl --kubeconfig=${BASH_SOURCE%/*}/admin.conf "$@"
     dest: "{{ artifacts_dir }}/kubectl.sh"
     mode: "0755"
-  become: no
-  run_once: yes
+  become: false
+  run_once: true
   delegate_to: localhost
   connection: local
   when: kubectl_localhost and kubeconfig_localhost
diff --git a/roles/kubernetes/control-plane/handlers/main.yml b/roles/kubernetes/control-plane/handlers/main.yml
index be5fdffb1..3d7f3e074 100644
--- a/roles/kubernetes/control-plane/handlers/main.yml
+++ b/roles/kubernetes/control-plane/handlers/main.yml
@@ -81,7 +81,7 @@
     endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}"
   uri:
     url: https://{{ endpoint }}:10259/healthz
-    validate_certs: no
+    validate_certs: false
   register: scheduler_result
   until: scheduler_result.status == 200
   retries: 60
@@ -95,7 +95,7 @@
     endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}"
   uri:
     url: https://{{ endpoint }}:10257/healthz
-    validate_certs: no
+    validate_certs: false
   register: controller_manager_result
   until: controller_manager_result.status == 200
   retries: 60
@@ -107,7 +107,7 @@
 - name: Master | wait for the apiserver to be running
   uri:
     url: "{{ kube_apiserver_endpoint }}/healthz"
-    validate_certs: no
+    validate_certs: false
   register: result
   until: result.status == 200
   retries: 60
diff --git a/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml b/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml
index ce5894d11..5faa18485 100644
--- a/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml
+++ b/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml
@@ -3,7 +3,7 @@
 - name: Check which kube-control nodes are already members of the cluster
   command: "{{ bin_dir }}/kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o json"
   register: kube_control_planes_raw
-  ignore_errors: yes
+  ignore_errors: true
   changed_when: false
 
 - name: Set fact joined_control_planes
@@ -12,7 +12,7 @@
   delegate_to: "{{ item }}"
   loop: "{{ groups['kube_control_plane'] }}"
   when: kube_control_planes_raw is succeeded
-  run_once: yes
+  run_once: true
 
 - name: Set fact first_kube_control_plane
   set_fact:
diff --git a/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml b/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml
index 9b998c52b..2950c76e2 100644
--- a/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml
+++ b/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml
@@ -2,9 +2,9 @@
 - name: Check if secret for encrypting data at rest already exist
   stat:
     path: "{{ kube_cert_dir }}/secrets_encryption.yaml"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: secrets_encryption_file
 
 - name: Slurp secrets_encryption file if it exists
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml
index 36bb62798..918f7cf47 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-backup.yml
@@ -4,7 +4,7 @@
     src: "{{ kube_cert_dir }}/{{ item }}"
     dest: "{{ kube_cert_dir }}/{{ item }}.old"
     mode: preserve
-    remote_src: yes
+    remote_src: true
   with_items:
     - apiserver.crt
     - apiserver.key
@@ -19,7 +19,7 @@
     src: "{{ kube_config_dir }}/{{ item }}"
     dest: "{{ kube_config_dir }}/{{ item }}.old"
     mode: preserve
-    remote_src: yes
+    remote_src: true
   with_items:
     - admin.conf
     - controller-manager.conf
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml b/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml
index 5376aba81..e47f571d3 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml
@@ -5,7 +5,7 @@
     dest: "{{ kube_config_dir }}/{{ item }}"
     regexp: '^    server: https'
     line: '    server: {{ kube_apiserver_endpoint }}'
-    backup: yes
+    backup: true
   with_items:
     - admin.conf
     - controller-manager.conf
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
index 128e93f36..413d4946c 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
@@ -25,7 +25,7 @@
 - name: Parse certificate key if not set
   set_fact:
     kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}"
-  run_once: yes
+  run_once: true
   when:
     - hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is defined
     - hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is not skipped
@@ -35,7 +35,7 @@
     src: "kubeadm-controlplane.{{ kubeadmConfig_api_version }}.yaml.j2"
     dest: "{{ kube_config_dir }}/kubeadm-controlplane.yaml"
     mode: "0640"
-    backup: yes
+    backup: true
   when:
     - inventory_hostname != first_kube_control_plane
     - not kubeadm_already_run.stat.exists
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
index dfbe604a4..52700af2e 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
@@ -13,9 +13,9 @@
 - name: Kubeadm | Check if kubeadm has already run
   stat:
     path: "/var/lib/kubelet/config.yaml"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: kubeadm_already_run
 
 - name: Kubeadm | Backup kubeadm certs / kubeconfig
diff --git a/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml b/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml
index 7d0c1a0d5..409ecb043 100644
--- a/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml
+++ b/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml
@@ -4,7 +4,7 @@
     path: "{{ kube_config_dir }}/kubelet.conf"
     regexp: '^    client-certificate-data: '
     line: '    client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem'
-    backup: yes
+    backup: true
   notify:
     - "Master | reload kubelet"
 
@@ -13,6 +13,6 @@
     path: "{{ kube_config_dir }}/kubelet.conf"
     regexp: '^    client-key-data: '
     line: '    client-key: /var/lib/kubelet/pki/kubelet-client-current.pem'
-    backup: yes
+    backup: true
   notify:
     - "Master | reload kubelet"
diff --git a/roles/kubernetes/control-plane/tasks/main.yml b/roles/kubernetes/control-plane/tasks/main.yml
index 5d58014e8..518bac961 100644
--- a/roles/kubernetes/control-plane/tasks/main.yml
+++ b/roles/kubernetes/control-plane/tasks/main.yml
@@ -120,7 +120,7 @@
 - name: Renew K8S control plane certificates monthly 2/2
   systemd_service:
     name: k8s-certs-renew.timer
-    enabled: yes
+    enabled: true
     state: started
     daemon_reload: "{{ k8s_certs_units is changed }}"
   when: auto_renew_certificates
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index 2b5778726..ad6ac36be 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -14,17 +14,17 @@
 - name: Check if kubelet.conf exists
   stat:
     path: "{{ kube_config_dir }}/kubelet.conf"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: kubelet_conf
 
 - name: Check if kubeadm CA cert is accessible
   stat:
     path: "{{ kube_cert_dir }}/ca.crt"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: kubeadm_ca_stat
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   run_once: true
@@ -79,7 +79,7 @@
   template:
     src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
     dest: "{{ kube_config_dir }}/kubeadm-client.conf"
-    backup: yes
+    backup: true
     mode: "0640"
   when: not is_kube_master
 
@@ -140,7 +140,7 @@
     dest: "{{ kube_config_dir }}/kubelet.conf"
     regexp: 'server:'
     line: '    server: {{ kube_apiserver_endpoint }}'
-    backup: yes
+    backup: true
   when:
     - kubeadm_config_api_fqdn is not defined
     - not is_kube_master
@@ -152,7 +152,7 @@
     dest: "{{ kube_config_dir }}/kubelet.conf"
     regexp: '^    server: https'
     line: '    server: {{ kube_apiserver_endpoint }}'
-    backup: yes
+    backup: true
   when:
     - not is_kube_master
     - loadbalancer_apiserver is defined
diff --git a/roles/kubernetes/node-label/tasks/main.yml b/roles/kubernetes/node-label/tasks/main.yml
index 00e87504c..3ebb64594 100644
--- a/roles/kubernetes/node-label/tasks/main.yml
+++ b/roles/kubernetes/node-label/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: Kubernetes Apps | Wait for kube-apiserver
   uri:
     url: "{{ kube_apiserver_endpoint }}/healthz"
-    validate_certs: no
+    validate_certs: false
     client_cert: "{{ kube_apiserver_client_cert }}"
     client_key: "{{ kube_apiserver_client_key }}"
   register: result
diff --git a/roles/kubernetes/node/tasks/facts.yml b/roles/kubernetes/node/tasks/facts.yml
index 0aaa11d60..6e8995274 100644
--- a/roles/kubernetes/node/tasks/facts.yml
+++ b/roles/kubernetes/node/tasks/facts.yml
@@ -8,7 +8,7 @@
       executable: /bin/bash
     register: docker_cgroup_driver_result
     changed_when: false
-    check_mode: no
+    check_mode: false
 
   - name: Set kubelet_cgroup_driver_detected fact for docker
     set_fact:
diff --git a/roles/kubernetes/node/tasks/kubelet.yml b/roles/kubernetes/node/tasks/kubelet.yml
index b63aefe1f..1f27bd072 100644
--- a/roles/kubernetes/node/tasks/kubelet.yml
+++ b/roles/kubernetes/node/tasks/kubelet.yml
@@ -11,7 +11,7 @@
     src: "kubelet.env.{{ kubeletConfig_api_version }}.j2"
     dest: "{{ kube_config_dir }}/kubelet.env"
     setype: "{{ (preinstall_selinux_state != 'disabled') | ternary('etc_t', omit) }}"
-    backup: yes
+    backup: true
     mode: "0600"
   notify: Node | restart kubelet
   tags:
@@ -32,7 +32,7 @@
   template:
     src: "kubelet.service.j2"
     dest: "/etc/systemd/system/kubelet.service"
-    backup: "yes"
+    backup: true
     mode: "0600"
     validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:kubelet.service'"
     # FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
@@ -48,7 +48,7 @@
 - name: Enable kubelet
   service:
     name: kubelet
-    enabled: yes
+    enabled: true
     state: started
   tags:
     - kubelet
diff --git a/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml b/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
index 2d3454e5a..b4c58126e 100644
--- a/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
+++ b/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml
@@ -17,14 +17,14 @@
     dest: "{{ haproxy_config_dir }}/haproxy.cfg"
     owner: root
     mode: "0755"
-    backup: yes
+    backup: true
 
 - name: Haproxy | Get checksum from config
   stat:
     path: "{{ haproxy_config_dir }}/haproxy.cfg"
-    get_attributes: no
-    get_checksum: yes
-    get_mime: no
+    get_attributes: false
+    get_checksum: true
+    get_mime: false
   register: haproxy_stat
 
 - name: Haproxy | Write static pod
diff --git a/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml b/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml
index b210cfe16..b52261a62 100644
--- a/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml
+++ b/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml
@@ -16,9 +16,9 @@
 - name: Kube-vip | Check if kubeadm has already run
   stat:
     path: "/var/lib/kubelet/config.yaml"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: kubeadm_already_run
 
 - name: Kube-vip | Set admin.conf
diff --git a/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml b/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
index aeeacc80d..66ebe55e0 100644
--- a/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
+++ b/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml
@@ -17,14 +17,14 @@
     dest: "{{ nginx_config_dir }}/nginx.conf"
     owner: root
     mode: "0755"
-    backup: yes
+    backup: true
 
 - name: Nginx-proxy | Get checksum from config
   stat:
     path: "{{ nginx_config_dir }}/nginx.conf"
-    get_attributes: no
-    get_checksum: yes
-    get_mime: no
+    get_attributes: false
+    get_checksum: true
+    get_mime: false
   register: nginx_stat
 
 - name: Nginx-proxy | Write static pod
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 7dc211405..56117bc3a 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -51,10 +51,10 @@
   ansible.posix.sysctl:
     name: net.ipv4.ip_local_reserved_ports
     value: "{{ kube_apiserver_node_port_range }}"
-    sysctl_set: yes
+    sysctl_set: true
     sysctl_file: "{{ sysctl_file_path }}"
     state: present
-    reload: yes
+    reload: true
   when: kube_apiserver_node_port_range is defined
   tags:
     - kube-proxy
@@ -66,7 +66,7 @@
   register: modinfo_br_netfilter
   failed_when: modinfo_br_netfilter.rc not in [0, 1]
   changed_when: false
-  check_mode: no
+  check_mode: false
 
 # TODO: Remove once upstream issue is fixed
 # https://github.com/ansible-collections/community.general/issues/7717
@@ -97,7 +97,7 @@
   command: "sysctl net.bridge.bridge-nf-call-iptables"
   failed_when: false
   changed_when: false
-  check_mode: no
+  check_mode: false
   register: sysctl_bridge_nf_call_iptables
 
 - name: Enable bridge-nf-call tables
@@ -106,7 +106,7 @@
     state: present
     sysctl_file: "{{ sysctl_file_path }}"
     value: "1"
-    reload: yes
+    reload: true
   when: sysctl_bridge_nf_call_iptables.rc == 0
   with_items:
     - net.bridge.bridge-nf-call-iptables
diff --git a/roles/kubernetes/node/tasks/pre_upgrade.yml b/roles/kubernetes/node/tasks/pre_upgrade.yml
index d9c2d07ef..e4bbf6b74 100644
--- a/roles/kubernetes/node/tasks/pre_upgrade.yml
+++ b/roles/kubernetes/node/tasks/pre_upgrade.yml
@@ -11,7 +11,7 @@
     executable: /bin/bash
   failed_when: false
   changed_when: false
-  check_mode: no
+  check_mode: false
   register: kubelet_container_check
 
 - name: "Pre-upgrade | copy /var/lib/cni from kubelet"
diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml
index 35140ab42..cc69fe42c 100644
--- a/roles/kubernetes/preinstall/handlers/main.yml
+++ b/roles/kubernetes/preinstall/handlers/main.yml
@@ -31,9 +31,9 @@
 - name: Preinstall | kube-apiserver configured
   stat:
     path: "{{ kube_manifest_dir }}/kube-apiserver.yaml"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: kube_apiserver_set
   when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
   listen: Preinstall | propagate resolvconf to k8s components
@@ -42,9 +42,9 @@
 - name: Preinstall | kube-controller configured
   stat:
     path: "{{ kube_manifest_dir }}/kube-controller-manager.yaml"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: kube_controller_set
   when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
   listen: Preinstall | propagate resolvconf to k8s components
@@ -109,7 +109,7 @@
 - name: Preinstall | wait for the apiserver to be running
   uri:
     url: "{{ kube_apiserver_endpoint }}/healthz"
-    validate_certs: no
+    validate_certs: false
   register: result
   until: result.status == 200
   retries: 60
diff --git a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
index 45474c844..76d95d11d 100644
--- a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
+++ b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml
@@ -2,9 +2,9 @@
 - name: Check if /etc/fstab exists
   stat:
     path: "/etc/fstab"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: fstab_file
 
 - name: Remove swapfile from /etc/fstab
diff --git a/roles/kubernetes/preinstall/tasks/0020-set_facts.yml b/roles/kubernetes/preinstall/tasks/0020-set_facts.yml
index 18ad65694..263bca400 100644
--- a/roles/kubernetes/preinstall/tasks/0020-set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-set_facts.yml
@@ -12,24 +12,24 @@
   register: resolvconf
   failed_when: false
   changed_when: false
-  check_mode: no
+  check_mode: false
 
 - name: Check existence of /etc/resolvconf/resolv.conf.d
   stat:
     path: /etc/resolvconf/resolv.conf.d
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   failed_when: false
   register: resolvconfd_path
 
 - name: Check status of /etc/resolv.conf
   stat:
     path: /etc/resolv.conf
-    follow: no
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    follow: false
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   failed_when: false
   register: resolvconf_stat
 
@@ -72,7 +72,7 @@
   register: systemd_resolved_enabled
   failed_when: false
   changed_when: false
-  check_mode: no
+  check_mode: false
 
 - name: Set default dns if remove_default_searchdomains is false
   set_fact:
@@ -94,9 +94,9 @@
 - name: Check if kubelet is configured
   stat:
     path: "{{ kube_config_dir }}/kubelet.env"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: kubelet_configured
   changed_when: false
 
@@ -121,9 +121,9 @@
 - name: Check if /etc/dhclient.conf exists
   stat:
     path: /etc/dhclient.conf
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: dhclient_stat
 
 - name: Target dhclient conf file for /etc/dhclient.conf
@@ -134,9 +134,9 @@
 - name: Check if /etc/dhcp/dhclient.conf exists
   stat:
     path: /etc/dhcp/dhclient.conf
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: dhcp_dhclient_stat
 
 - name: Target dhclient conf file for /etc/dhcp/dhclient.conf
@@ -218,9 +218,9 @@
 - name: Check /usr readonly
   stat:
     path: "/usr"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: usr
 
 - name: Set alternate flexvolume path
diff --git a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
index 1bb0f4856..d609ab574 100644
--- a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
@@ -44,7 +44,7 @@
   assert:
     that: item.value | type_debug == 'bool'
     msg: "{{ item.value }} isn't a bool"
-  run_once: yes
+  run_once: true
   with_items:
     - { name: download_run_once, value: "{{ download_run_once }}" }
     - { name: deploy_netchecker, value: "{{ deploy_netchecker }}" }
@@ -172,21 +172,21 @@
     that:
       - kube_service_addresses | ansible.utils.ipaddr('net')
     msg: "kube_service_addresses = '{{ kube_service_addresses }}' is not a valid network range"
-  run_once: yes
+  run_once: true
 
 - name: "Check that kube_pods_subnet is a network range"
   assert:
     that:
       - kube_pods_subnet | ansible.utils.ipaddr('net')
     msg: "kube_pods_subnet = '{{ kube_pods_subnet }}' is not a valid network range"
-  run_once: yes
+  run_once: true
 
 - name: "Check that kube_pods_subnet does not collide with kube_service_addresses"
   assert:
     that:
       - kube_pods_subnet | ansible.utils.ipaddr(kube_service_addresses) | string == 'None'
     msg: "kube_pods_subnet cannot be the same network segment as kube_service_addresses"
-  run_once: yes
+  run_once: true
 
 - name: "Check that IP range is enough for the nodes"
   assert:
@@ -194,7 +194,7 @@
       - 2 ** (kube_network_node_prefix - kube_pods_subnet | ansible.utils.ipaddr('prefix')) >= groups['k8s_cluster'] | length
     msg: "Not enough IPs are available for the desired node count."
   when: kube_network_plugin != 'calico'
-  run_once: yes
+  run_once: true
 
 - name: Stop if unknown dns mode
   assert:
@@ -246,7 +246,7 @@
 
 # TODO: Clean this task up when we drop backward compatibility support for `etcd_kubeadm_enabled`
 - name: Stop if etcd deployment type is not host or kubeadm when container_manager != docker and etcd_kubeadm_enabled is not defined
-  run_once: yes
+  run_once: true
   when: etcd_kubeadm_enabled is defined
   block:
     - name: Warn the user if they are still using `etcd_kubeadm_enabled`
@@ -292,7 +292,7 @@
   assert:
     that: containerd_version is version(containerd_min_version_required, '>=')
     msg: "containerd_version is too low. Minimum version {{ containerd_min_version_required }}"
-  run_once: yes
+  run_once: true
   when:
     - containerd_version not in ['latest', 'edge', 'stable']
     - container_manager == 'containerd'
diff --git a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
index 2fff8ef56..507a72d78 100644
--- a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
+++ b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml
@@ -48,9 +48,9 @@
 - name: Check if kubernetes kubeadm compat cert dir exists
   stat:
     path: "{{ kube_cert_compat_dir }}"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: kube_cert_compat_dir_check
   when:
     - inventory_hostname in groups['k8s_cluster']
diff --git a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
index 6219161fa..9aad0dba8 100644
--- a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
+++ b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
@@ -16,7 +16,7 @@
       options ndots:{{ ndots }} timeout:{{ dns_timeout | default('2') }} attempts:{{ dns_attempts | default('2') }}
     state: present
     insertbefore: BOF
-    create: yes
+    create: true
     backup: "{{ not resolvconf_stat.stat.islnk }}"
     marker: "# Ansible entries {mark}"
     mode: "0644"
diff --git a/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml b/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml
index ca51e88b9..6ebed2553 100644
--- a/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml
+++ b/roles/kubernetes/preinstall/tasks/0062-networkmanager-unmanaged-devices.yml
@@ -3,7 +3,7 @@
   file:
     path: "/etc/NetworkManager/conf.d"
     state: directory
-    recurse: yes
+    recurse: true
 
 - name: NetworkManager | Prevent NetworkManager from managing Calico interfaces (cali*/tunl*/vxlan.calico)
   copy:
diff --git a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
index e155f0a18..6dfa72426 100644
--- a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
+++ b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
@@ -6,7 +6,7 @@
     option: servers
     value: "{{ nameserverentries }}"
     mode: '0600'
-    backup: yes
+    backup: true
   when:
     - nameserverentries != "127.0.0.53" or systemd_resolved_enabled.rc != 0
   notify: Preinstall | update resolvconf for networkmanager
@@ -23,7 +23,7 @@
     option: searches
     value: "{{ (default_searchdomains | default([]) + searchdomains | default([])) | join(',') }}"
     mode: '0600'
-    backup: yes
+    backup: true
   notify: Preinstall | update resolvconf for networkmanager
 
 - name: NetworkManager | Add DNS options to NM configuration
@@ -33,5 +33,5 @@
     option: options
     value: "ndots:{{ ndots }},timeout:{{ dns_timeout | default('2') }},attempts:{{ dns_attempts | default('2') }}"
     mode: '0600'
-    backup: yes
+    backup: true
   notify: Preinstall | update resolvconf for networkmanager
diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
index cddbe1ecf..c8b480c84 100644
--- a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
+++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
@@ -34,7 +34,7 @@
 
 - name: Update package management cache (APT)
   apt:
-    update_cache: yes
+    update_cache: true
     cache_valid_time: 3600
   when: ansible_os_family == "Debian"
   tags:
diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
index 5b2c7d10a..8941a649a 100644
--- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
+++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
@@ -3,9 +3,9 @@
 - name: Confirm selinux deployed
   stat:
     path: /etc/selinux/config
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   when:
     - ansible_os_family == "RedHat"
     - "'Amazon' not in ansible_distribution"
@@ -27,8 +27,8 @@
     dest: /etc/gai.conf
     line: "precedence ::ffff:0:0/96  100"
     state: present
-    create: yes
-    backup: yes
+    create: true
+    backup: true
     mode: "0644"
   when:
     - disable_ipv6_dns
@@ -47,9 +47,9 @@
 - name: Stat sysctl file configuration
   stat:
     path: "{{ sysctl_file_path }}"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: sysctl_file_stat
   tags:
     - bootstrap-os
@@ -75,7 +75,7 @@
     name: net.ipv4.ip_forward
     value: "1"
     state: present
-    reload: yes
+    reload: true
 
 - name: Enable ipv6 forwarding
   ansible.posix.sysctl:
@@ -83,15 +83,15 @@
     name: net.ipv6.conf.all.forwarding
     value: "1"
     state: present
-    reload: yes
+    reload: true
   when: enable_dual_stack_networks | bool
 
 - name: Check if we need to set fs.may_detach_mounts
   stat:
     path: /proc/sys/fs/may_detach_mounts
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: fs_may_detach_mounts
   ignore_errors: true  # noqa ignore-errors
 
@@ -101,7 +101,7 @@
     name: fs.may_detach_mounts
     value: 1
     state: present
-    reload: yes
+    reload: true
   when: fs_may_detach_mounts.stat.exists | d(false)
 
 - name: Ensure kubelet expected parameters are set
@@ -110,7 +110,7 @@
     name: "{{ item.name }}"
     value: "{{ item.value }}"
     state: present
-    reload: yes
+    reload: true
   with_items:
     - { name: kernel.keys.root_maxbytes, value: 25000000 }
     - { name: kernel.keys.root_maxkeys, value: 1000000 }
@@ -133,7 +133,7 @@
     name: "{{ item.name }}"
     value: "{{ item.value }}"
     state: present
-    reload: yes
+    reload: true
   with_items: "{{ additional_sysctl }}"
 
 - name: Disable fapolicyd service
diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
index 4ec9a69e6..0b44d26ad 100644
--- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
+++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
@@ -11,17 +11,17 @@
       {% endfor %}
   delegate_to: localhost
   connection: local
-  delegate_facts: yes
-  run_once: yes
+  delegate_facts: true
+  run_once: true
 
 - name: Hosts | populate inventory into hosts file
   blockinfile:
     path: /etc/hosts
     block: "{{ hostvars.localhost.etc_hosts_inventory_block }}"
     state: "{{ 'present' if populate_inventory_to_hosts_file else 'absent' }}"
-    create: yes
-    backup: yes
-    unsafe_writes: yes
+    create: true
+    backup: true
+    unsafe_writes: true
     marker: "# Ansible inventory hosts {mark}"
     mode: "0644"
 
@@ -31,8 +31,8 @@
     regexp: ".*{{ apiserver_loadbalancer_domain_name }}$"
     line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name }}"
     state: present
-    backup: yes
-    unsafe_writes: yes
+    backup: true
+    unsafe_writes: true
   when:
     - populate_loadbalancer_apiserver_to_hosts_file
     - loadbalancer_apiserver is defined
@@ -69,8 +69,8 @@
         line: "{{ item.key }} {{ item.value | join(' ') }}"
         regexp: "^{{ item.key }}.*$"
         state: present
-        backup: yes
-        unsafe_writes: yes
+        backup: true
+        unsafe_writes: true
       loop: "{{ etc_hosts_localhosts_dict_target | default({}) | dict2items }}"
 
 # gather facts to update ansible_fqdn
diff --git a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
index 6276034d3..9745ab261 100644
--- a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
+++ b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
@@ -6,10 +6,10 @@
       {{ item }}
       {% endfor %}
     path: "{{ dhclientconffile }}"
-    create: yes
+    create: true
     state: present
     insertbefore: BOF
-    backup: yes
+    backup: true
     marker: "# Ansible entries {mark}"
     mode: "0644"
   notify: Preinstall | propagate resolvconf to k8s components
diff --git a/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml b/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml
index 024e39f9f..dd320d50a 100644
--- a/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml
+++ b/roles/kubernetes/preinstall/tasks/0110-dhclient-hooks-undo.yml
@@ -7,7 +7,7 @@
   blockinfile:
     path: "{{ dhclientconffile }}"
     state: absent
-    backup: yes
+    backup: true
     marker: "# Ansible entries {mark}"
   notify: Preinstall | propagate resolvconf to k8s components
 
diff --git a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
index 621629f6a..b9c35875e 100644
--- a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
+++ b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
@@ -22,7 +22,7 @@
 
 - name: Check if growpart needs to be run
   command: growpart -N {{ device }} {{ partition }}
-  failed_when: False
+  failed_when: false
   changed_when: "'NOCHANGE:' not in growpart_needed.stdout"
   register: growpart_needed
   environment:
@@ -30,7 +30,7 @@
 
 - name: Check fs type
   command: file -Ls {{ root_device }}
-  changed_when: False
+  changed_when: false
   register: fs_type
 
 - name: Run growpart  # noqa no-handler
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index ee846f8ba..722beecd3 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -121,9 +121,9 @@
 - name: Check if we are running inside a Azure VM
   stat:
     path: /var/lib/waagent/
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: azure_check
   when:
     - not dns_late
diff --git a/roles/kubernetes/tokens/tasks/check-tokens.yml b/roles/kubernetes/tokens/tasks/check-tokens.yml
index a157a0597..d8bb203e9 100644
--- a/roles/kubernetes/tokens/tasks/check-tokens.yml
+++ b/roles/kubernetes/tokens/tasks/check-tokens.yml
@@ -2,9 +2,9 @@
 - name: "Check_tokens | check if the tokens have already been generated on first master"
   stat:
     path: "{{ kube_token_dir }}/known_tokens.csv"
-    get_attributes: no
-    get_checksum: yes
-    get_mime: no
+    get_attributes: false
+    get_checksum: true
+    get_mime: false
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   register: known_tokens_master
   run_once: true
@@ -23,9 +23,9 @@
 - name: "Check tokens | check if a cert already exists"
   stat:
     path: "{{ kube_token_dir }}/known_tokens.csv"
-    get_attributes: no
-    get_checksum: yes
-    get_mime: no
+    get_attributes: false
+    get_checksum: true
+    get_mime: false
   register: known_tokens
 
 - name: "Check_tokens | Set 'sync_tokens' to true"
diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml
index 1dabf9657..a64aea9e0 100644
--- a/roles/kubernetes/tokens/tasks/gen_tokens.yml
+++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml
@@ -4,7 +4,7 @@
     src: "kube-gen-token.sh"
     dest: "{{ kube_script_dir }}/kube-gen-token.sh"
     mode: "0700"
-  run_once: yes
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: gen_tokens | default(false)
 
@@ -17,7 +17,7 @@
     - "{{ groups['kube_control_plane'] }}"
   register: gentoken_master
   changed_when: "'Added' in gentoken_master.stdout"
-  run_once: yes
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: gen_tokens | default(false)
 
@@ -30,14 +30,14 @@
     - "{{ groups['kube_node'] }}"
   register: gentoken_node
   changed_when: "'Added' in gentoken_node.stdout"
-  run_once: yes
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: gen_tokens | default(false)
 
 - name: Gen_tokens | Get list of tokens from first master
   command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
   register: tokens_list
-  check_mode: no
+  check_mode: false
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   run_once: true
   when: sync_tokens | default(false)
@@ -47,7 +47,7 @@
   args:
     executable: /bin/bash
   register: tokens_data
-  check_mode: no
+  check_mode: false
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   run_once: true
   when: sync_tokens | default(false)
diff --git a/roles/kubespray-defaults/tasks/fallback_ips.yml b/roles/kubespray-defaults/tasks/fallback_ips.yml
index a1aff37ee..ae3b15150 100644
--- a/roles/kubespray-defaults/tasks/fallback_ips.yml
+++ b/roles/kubespray-defaults/tasks/fallback_ips.yml
@@ -8,10 +8,10 @@
     gather_subset: '!all,network'
     filter: "ansible_default_ipv4"
   delegate_to: "{{ item }}"
-  delegate_facts: yes
+  delegate_facts: true
   when: hostvars[item].ansible_default_ipv4 is not defined
   loop: "{{ (ansible_play_hosts_all + [groups['kube_control_plane'][0]]) | unique if ansible_limit is defined else (groups['k8s_cluster'] | default([]) + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique }}"
-  run_once: yes
+  run_once: true
   ignore_unreachable: true
   tags: always
 
@@ -26,9 +26,9 @@
       {% endfor %}
   delegate_to: localhost
   connection: local
-  delegate_facts: yes
-  become: no
-  run_once: yes
+  delegate_facts: true
+  become: false
+  run_once: true
 
 - name: Set fallback_ips
   set_fact:
diff --git a/roles/kubespray-defaults/tasks/no_proxy.yml b/roles/kubespray-defaults/tasks/no_proxy.yml
index d2d5cc6d1..adec886f4 100644
--- a/roles/kubespray-defaults/tasks/no_proxy.yml
+++ b/roles/kubespray-defaults/tasks/no_proxy.yml
@@ -26,9 +26,9 @@
       127.0.0.1,localhost,{{ kube_service_addresses }},{{ kube_pods_subnet }},svc,svc.{{ dns_domain }}
   delegate_to: localhost
   connection: local
-  delegate_facts: yes
-  become: no
-  run_once: yes
+  delegate_facts: true
+  become: false
+  run_once: true
 
 - name: Populates no_proxy to all hosts
   set_fact:
diff --git a/roles/network_plugin/calico/rr/tasks/pre.yml b/roles/network_plugin/calico/rr/tasks/pre.yml
index d8dbd8072..f8a9de611 100644
--- a/roles/network_plugin/calico/rr/tasks/pre.yml
+++ b/roles/network_plugin/calico/rr/tasks/pre.yml
@@ -3,7 +3,7 @@
   service:
     name: calico-rr
     state: stopped
-    enabled: no
+    enabled: false
   failed_when: false
 
 - name: Calico-rr | Delete obsolete files
diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml
index 95dcfa673..7f73a08c4 100644
--- a/roles/network_plugin/calico/tasks/check.yml
+++ b/roles/network_plugin/calico/tasks/check.yml
@@ -4,7 +4,7 @@
     that:
       - ipip is not defined
     msg: "'ipip' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs"
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: Stop if legacy encapsulation variables are detected (ipip_mode)
@@ -12,7 +12,7 @@
     that:
       - ipip_mode is not defined
     msg: "'ipip_mode' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs"
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: Stop if legacy encapsulation variables are detected (calcio_ipam_autoallocateblocks)
@@ -20,7 +20,7 @@
     that:
       - calcio_ipam_autoallocateblocks is not defined
     msg: "'calcio_ipam_autoallocateblocks' configuration variable is deprecated, it's a typo, please configure your inventory with 'calico_ipam_autoallocateblocks' set to 'true' or 'false' according to your specific needs"
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 
@@ -32,7 +32,7 @@
     msg: "When using cloud_provider azure and network_plugin calico calico_ipip_mode must be 'Never' and calico_vxlan_mode 'Always' or 'CrossSubnet'"
   when:
     - cloud_provider is defined and cloud_provider == 'azure'
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: Stop if supported Calico versions
@@ -40,21 +40,21 @@
     that:
       - "calico_version in calico_crds_archive_checksums.keys()"
     msg: "Calico version not supported {{ calico_version }} not in {{ calico_crds_archive_checksums.keys() }}"
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: Check if calicoctl.sh exists
   stat:
     path: "{{ bin_dir }}/calicoctl.sh"
   register: calicoctl_sh_exists
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: Check if calico ready
   command: "{{ bin_dir }}/calicoctl.sh get ClusterInformation default"
   register: calico_ready
-  run_once: True
-  ignore_errors: True
+  run_once: true
+  ignore_errors: true
   retries: 5
   delay: 10
   until: calico_ready.rc == 0
@@ -62,7 +62,7 @@
   when: calicoctl_sh_exists.stat.exists
 
 - name: Check that current calico version is enough for upgrade
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when: calicoctl_sh_exists.stat.exists and calico_ready.rc == 0
   block:
@@ -91,7 +91,7 @@
   when:
     - peer_with_calico_rr
     - inventory_hostname == groups['kube_control_plane'][0]
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: "Check that calico_rr nodes are in k8s_cluster group"
@@ -101,7 +101,7 @@
     msg: "calico_rr must be a child group of k8s_cluster group"
   when:
     - '"calico_rr" in group_names'
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: "Check vars defined correctly"
@@ -110,7 +110,7 @@
       - "calico_pool_name is defined"
       - "calico_pool_name is match('^[a-zA-Z0-9-_\\\\.]{2,63}$')"
     msg: "calico_pool_name contains invalid characters"
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: "Check calico network backend defined correctly"
@@ -118,11 +118,11 @@
     that:
       - "calico_network_backend in ['bird', 'vxlan', 'none']"
     msg: "calico network backend is not 'bird', 'vxlan' or 'none'"
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: "Check ipip and vxlan mode defined correctly"
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   assert:
     that:
@@ -137,7 +137,7 @@
     msg: "IP in IP and VXLAN mode is mutualy exclusive modes"
   when:
     - "calico_ipip_mode in ['Always', 'CrossSubnet']"
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: "Check ipip and vxlan mode if simultaneously enabled"
@@ -147,23 +147,23 @@
     msg: "IP in IP and VXLAN mode is mutualy exclusive modes"
   when:
     - "calico_vxlan_mode in ['Always', 'CrossSubnet']"
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: "Get Calico {{ calico_pool_name }} configuration"
   command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }} -o json"
-  failed_when: False
-  changed_when: False
-  check_mode: no
+  failed_when: false
+  changed_when: false
+  check_mode: false
   register: calico
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: "Set calico_pool_conf"
   set_fact:
     calico_pool_conf: '{{ calico.stdout | from_json }}'
   when: calico.rc == 0 and calico.stdout
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: "Check if inventory match current cluster configuration"
@@ -176,7 +176,7 @@
     msg: "Your inventory doesn't match the current cluster configuration"
   when:
     - calico_pool_conf is defined
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: "Check kdd calico_datastore if calico_apiserver_enabled"
@@ -185,7 +185,7 @@
     msg: "When using calico apiserver you need to use the kubernetes datastore"
   when:
     - calico_apiserver_enabled
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: "Check kdd calico_datastore if typha_enabled"
@@ -194,7 +194,7 @@
     msg: "When using typha you need to use the kubernetes datastore"
   when:
     - typha_enabled
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
 
 - name: "Check ipip mode is Never for calico ipv6"
@@ -204,5 +204,5 @@
     msg: "Calico doesn't support ipip tunneling for the IPv6"
   when:
     - enable_dual_stack_networks
-  run_once: True
+  run_once: true
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index 7f895b555..1d3b02339 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -14,7 +14,7 @@
     src: "{{ downloads.calicoctl.dest }}"
     dest: "{{ bin_dir }}/calicoctl"
     mode: "0755"
-    remote_src: yes
+    remote_src: true
 
 - name: Calico | Create calico certs directory
   file:
@@ -31,7 +31,7 @@
     dest: "{{ calico_cert_dir }}/{{ item.d }}"
     state: hard
     mode: "0640"
-    force: yes
+    force: true
   with_items:
     - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
     - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
@@ -61,7 +61,7 @@
 - name: Calico | wait for etcd
   uri:
     url: "{{ etcd_access_addresses.split(',') | first }}/health"
-    validate_certs: no
+    validate_certs: false
     client_cert: "{{ calico_cert_dir }}/cert.crt"
     client_key: "{{ calico_cert_dir }}/key.pem"
   register: result
@@ -165,8 +165,8 @@
     - name: Calico | Get existing FelixConfiguration
       command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json"
       register: _felix_cmd
-      ignore_errors: True
-      changed_when: False
+      ignore_errors: true
+      changed_when: false
 
     - name: Calico | Set kubespray FelixConfiguration
       set_fact:
@@ -201,7 +201,7 @@
       command:
         cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
         stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config | to_json) }}"
-      changed_when: False
+      changed_when: false
 
 - name: Calico | Configure Calico IP Pool
   when:
@@ -210,8 +210,8 @@
     - name: Calico | Get existing calico network pool
       command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json"
       register: _calico_pool_cmd
-      ignore_errors: True
-      changed_when: False
+      ignore_errors: true
+      changed_when: false
 
     - name: Calico | Set kubespray calico network pool
       set_fact:
@@ -251,7 +251,7 @@
       command:
         cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
         stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool | to_json) }}"
-      changed_when: False
+      changed_when: false
 
 - name: Calico | Configure Calico IPv6 Pool
   when:
@@ -261,8 +261,8 @@
     - name: Calico | Get existing calico ipv6 network pool
       command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json"
       register: _calico_pool_ipv6_cmd
-      ignore_errors: True
-      changed_when: False
+      ignore_errors: true
+      changed_when: false
 
     - name: Calico | Set kubespray calico network pool
       set_fact:
@@ -302,19 +302,19 @@
       command:
         cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
         stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6 | to_json) }}"
-      changed_when: False
+      changed_when: false
 
 - name: Populate Service External IPs
   set_fact:
     _service_external_ips: "{{ _service_external_ips | default([]) + [{'cidr': item}] }}"
   with_items: "{{ calico_advertise_service_external_ips }}"
-  run_once: yes
+  run_once: true
 
 - name: Populate Service LoadBalancer IPs
   set_fact:
     _service_loadbalancer_ips: "{{ _service_loadbalancer_ips | default([]) + [{'cidr': item}] }}"
   with_items: "{{ calico_advertise_service_loadbalancer_ips }}"
-  run_once: yes
+  run_once: true
 
 - name: "Determine nodeToNodeMesh needed state"
   set_fact:
@@ -322,7 +322,7 @@
   when:
     - peer_with_router | default(false) or peer_with_calico_rr | default(false)
     - inventory_hostname in groups['k8s_cluster']
-  run_once: yes
+  run_once: true
 
 - name: Calico | Configure Calico BGP
   when:
@@ -331,8 +331,8 @@
     - name: Calico | Get existing BGP Configuration
       command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json"
       register: _bgp_config_cmd
-      ignore_errors: True
-      changed_when: False
+      ignore_errors: true
+      changed_when: false
 
     - name: Calico | Set kubespray BGP Configuration
       set_fact:
@@ -366,7 +366,7 @@
       command:
         cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
         stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config | to_json) }}"
-      changed_when: False
+      changed_when: false
 
 - name: Calico | Create calico manifests
   template:
diff --git a/roles/network_plugin/calico/tasks/repos.yml b/roles/network_plugin/calico/tasks/repos.yml
index dd29f4520..7eba916bb 100644
--- a/roles/network_plugin/calico/tasks/repos.yml
+++ b/roles/network_plugin/calico/tasks/repos.yml
@@ -10,11 +10,11 @@
         file: _copr:copr.fedorainfracloud.org:jdoss:wireguard
         description: Copr repo for wireguard owned by jdoss
         baseurl: "{{ calico_wireguard_repo }}"
-        gpgcheck: yes
+        gpgcheck: true
         gpgkey: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/pubkey.gpg
-        skip_if_unavailable: yes
-        enabled: yes
-        repo_gpgcheck: no
+        skip_if_unavailable: true
+        enabled: true
+        repo_gpgcheck: false
       when:
         - ansible_os_family in ['RedHat']
         - ansible_distribution not in ['Fedora']
diff --git a/roles/network_plugin/calico/tasks/reset.yml b/roles/network_plugin/calico/tasks/reset.yml
index 8dab21462..16c850977 100644
--- a/roles/network_plugin/calico/tasks/reset.yml
+++ b/roles/network_plugin/calico/tasks/reset.yml
@@ -2,9 +2,9 @@
 - name: Reset | check vxlan.calico network device
   stat:
     path: /sys/class/net/vxlan.calico
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: vxlan
 
 - name: Reset | remove the network vxlan.calico device created by calico
@@ -14,9 +14,9 @@
 - name: Reset | check dummy0 network device
   stat:
     path: /sys/class/net/dummy0
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: dummy0
 
 - name: Reset | remove the network device created by calico
diff --git a/roles/network_plugin/cilium/tasks/install.yml b/roles/network_plugin/cilium/tasks/install.yml
index 1039953a0..7da39644b 100644
--- a/roles/network_plugin/cilium/tasks/install.yml
+++ b/roles/network_plugin/cilium/tasks/install.yml
@@ -22,7 +22,7 @@
     dest: "{{ cilium_cert_dir }}/{{ item.d }}"
     mode: "0644"
     state: hard
-    force: yes
+    force: true
   loop:
     - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
     - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
@@ -94,4 +94,4 @@
     src: "{{ local_release_dir }}/cilium"
     dest: "{{ bin_dir }}/cilium"
     mode: "0755"
-    remote_src: yes
+    remote_src: true
diff --git a/roles/network_plugin/cilium/tasks/reset_iface.yml b/roles/network_plugin/cilium/tasks/reset_iface.yml
index e2f7c14af..57a2d5765 100644
--- a/roles/network_plugin/cilium/tasks/reset_iface.yml
+++ b/roles/network_plugin/cilium/tasks/reset_iface.yml
@@ -2,9 +2,9 @@
 - name: "Reset | check if network device {{ iface }} is present"
   stat:
     path: "/sys/class/net/{{ iface }}"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: device_remains
 
 - name: "Reset | remove network device {{ iface }}"
diff --git a/roles/network_plugin/cni/tasks/main.yml b/roles/network_plugin/cni/tasks/main.yml
index 8ac0dc53a..28376bd76 100644
--- a/roles/network_plugin/cni/tasks/main.yml
+++ b/roles/network_plugin/cni/tasks/main.yml
@@ -13,4 +13,4 @@
     dest: "/opt/cni/bin"
     mode: "0755"
     owner: "{{ cni_bin_owner }}"
-    remote_src: yes
+    remote_src: true
diff --git a/roles/network_plugin/flannel/tasks/reset.yml b/roles/network_plugin/flannel/tasks/reset.yml
index 03d40a0c1..c4b1b8815 100644
--- a/roles/network_plugin/flannel/tasks/reset.yml
+++ b/roles/network_plugin/flannel/tasks/reset.yml
@@ -2,9 +2,9 @@
 - name: Reset | check cni network device
   stat:
     path: /sys/class/net/cni0
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: cni
 
 - name: Reset | remove the network device created by the flannel
@@ -14,9 +14,9 @@
 - name: Reset | check flannel network device
   stat:
     path: /sys/class/net/flannel.1
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: flannel
 
 - name: Reset | remove the network device created by the flannel
diff --git a/roles/network_plugin/kube-router/tasks/reset.yml b/roles/network_plugin/kube-router/tasks/reset.yml
index ae9ee55c7..32f707591 100644
--- a/roles/network_plugin/kube-router/tasks/reset.yml
+++ b/roles/network_plugin/kube-router/tasks/reset.yml
@@ -2,9 +2,9 @@
 - name: Reset | check kube-dummy-if network device
   stat:
     path: /sys/class/net/kube-dummy-if
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: kube_dummy_if
 
 - name: Reset | remove the network device created by kube-router
@@ -14,9 +14,9 @@
 - name: Check kube-bridge exists
   stat:
     path: /sys/class/net/kube-bridge
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: kube_bridge_if
 
 - name: Reset | donw the network bridge create by kube-router
diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml
index 165030d59..6ffe3348c 100644
--- a/roles/network_plugin/macvlan/tasks/main.yml
+++ b/roles/network_plugin/macvlan/tasks/main.yml
@@ -104,7 +104,7 @@
   ansible.posix.sysctl:
     name: net.ipv4.conf.all.arp_notify
     value: 1
-    sysctl_set: yes
+    sysctl_set: true
     sysctl_file: "{{ sysctl_file_path }}"
     state: present
-    reload: yes
+    reload: true
diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml
index 599f56b15..6291ea36f 100644
--- a/roles/recover_control_plane/etcd/tasks/main.yml
+++ b/roles/recover_control_plane/etcd/tasks/main.yml
@@ -4,7 +4,7 @@
   register: etcd_endpoint_health
   ignore_errors: true  # noqa ignore-errors
   changed_when: false
-  check_mode: no
+  check_mode: false
   environment:
     ETCDCTL_API: "3"
     ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
@@ -64,7 +64,7 @@
   command: "{{ bin_dir }}/etcdctl member list"
   register: member_list
   changed_when: false
-  check_mode: no
+  check_mode: false
   environment:
     ETCDCTL_API: "3"
     ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml
index 0279018d4..70e33251a 100644
--- a/roles/remove-node/remove-etcd-node/tasks/main.yml
+++ b/roles/remove-node/remove-etcd-node/tasks/main.yml
@@ -32,7 +32,7 @@
   register: etcd_member_id
   ignore_errors: true  # noqa ignore-errors
   changed_when: false
-  check_mode: no
+  check_mode: false
   tags:
     - facts
   environment:
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 1d00913e1..acafb5a76 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -50,9 +50,9 @@
 - name: Reset | check if crictl is present
   stat:
     path: "{{ bin_dir }}/crictl"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: crictl
 
 - name: Reset | stop all cri containers
@@ -161,7 +161,7 @@
   shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
   args:
     executable: /bin/bash
-  check_mode: no
+  check_mode: false
   register: mounted_dirs
   failed_when: false
   changed_when: false
@@ -182,7 +182,7 @@
 - name: Flush iptables
   iptables:
     table: "{{ item }}"
-    flush: yes
+    flush: true
   with_items:
     - filter
     - nat
@@ -195,7 +195,7 @@
 - name: Flush ip6tables
   iptables:
     table: "{{ item }}"
-    flush: yes
+    flush: true
     ip_version: ipv6
   with_items:
     - filter
@@ -215,9 +215,9 @@
 - name: Reset | check kube-ipvs0 network device
   stat:
     path: /sys/class/net/kube-ipvs0
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: kube_ipvs0
 
 - name: Reset | Remove kube-ipvs0
@@ -229,9 +229,9 @@
 - name: Reset | check nodelocaldns network device
   stat:
     path: /sys/class/net/nodelocaldns
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: nodelocaldns_device
 
 - name: Reset | Remove nodelocaldns
@@ -243,9 +243,9 @@
 - name: Reset | Check whether /var/lib/kubelet directory exists
   stat:
     path: /var/lib/kubelet
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: var_lib_kubelet_directory
 
 - name: Reset | Find files/dirs with immutable flag in /var/lib/kubelet
diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml
index d82fe8d33..467ff05e1 100644
--- a/roles/upgrade/post-upgrade/tasks/main.yml
+++ b/roles/upgrade/post-upgrade/tasks/main.yml
@@ -13,7 +13,7 @@
 
 - name: Confirm node uncordon
   pause:
-    echo: yes
+    echo: true
     prompt: "Ready to uncordon node?"
   when:
     - upgrade_node_post_upgrade_confirm
diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml
index 8d5d99c93..6e3cdd2b8 100644
--- a/roles/upgrade/pre-upgrade/tasks/main.yml
+++ b/roles/upgrade/pre-upgrade/tasks/main.yml
@@ -2,7 +2,7 @@
 # Wait for upgrade
 - name: Confirm node upgrade
   pause:
-    echo: yes
+    echo: true
     prompt: "Ready to upgrade node? (Press Enter to continue or Ctrl+C for other options)"
   when:
     - upgrade_node_confirm
diff --git a/roles/win_nodes/kubernetes_patch/tasks/main.yml b/roles/win_nodes/kubernetes_patch/tasks/main.yml
index 880c58cf8..1dd504e16 100644
--- a/roles/win_nodes/kubernetes_patch/tasks/main.yml
+++ b/roles/win_nodes/kubernetes_patch/tasks/main.yml
@@ -4,7 +4,7 @@
   file:
     path: "{{ kubernetes_user_manifests_path }}/kubernetes"
     state: directory
-    recurse: yes
+    recurse: true
   tags: [init, cni]
 
 - name: Apply kube-proxy nodeselector
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index 0234c0733..272fb724b 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -2,7 +2,7 @@
 - name: Collect debug info
   hosts: all
   become: true
-  gather_facts: no
+  gather_facts: false
 
   vars:
     docker_bin_dir: /usr/bin
@@ -118,7 +118,7 @@
       failed_when: false
       with_items: "{{ commands }}"
       when: item.when | default(True)
-      no_log: True
+      no_log: true
 
     - name: Fetch results
       fetch:
diff --git a/tests/cloud_playbooks/cleanup-packet.yml b/tests/cloud_playbooks/cleanup-packet.yml
index 009071ec2..2ba5e3021 100644
--- a/tests/cloud_playbooks/cleanup-packet.yml
+++ b/tests/cloud_playbooks/cleanup-packet.yml
@@ -2,7 +2,7 @@
 
 - name: Cleanup packet vms
   hosts: localhost
-  gather_facts: no
+  gather_facts: false
   become: true
   roles:
     - { role: cleanup-packet-ci }
diff --git a/tests/cloud_playbooks/create-packet.yml b/tests/cloud_playbooks/create-packet.yml
index 8212fb6c8..2cd08b54d 100644
--- a/tests/cloud_playbooks/create-packet.yml
+++ b/tests/cloud_playbooks/create-packet.yml
@@ -2,7 +2,7 @@
 
 - name: Provision Packet VMs
   hosts: localhost
-  gather_facts: no
+  gather_facts: false
   become: true
   vars:
     ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}"
diff --git a/tests/cloud_playbooks/delete-packet.yml b/tests/cloud_playbooks/delete-packet.yml
index 7d0c9003c..7320da622 100644
--- a/tests/cloud_playbooks/delete-packet.yml
+++ b/tests/cloud_playbooks/delete-packet.yml
@@ -2,7 +2,7 @@
 
 - name: Terminate Packet VMs
   hosts: localhost
-  gather_facts: no
+  gather_facts: false
   become: true
   vars:
     ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}"
diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
index 98bd05a61..75156584a 100644
--- a/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
+++ b/tests/cloud_playbooks/roles/packet-ci/tasks/delete-vms.yml
@@ -3,9 +3,9 @@
 - name: Check if temp directory for {{ test_name }} exists
   stat:
     path: "/tmp/{{ test_name }}"
-    get_attributes: no
-    get_checksum: no
-    get_mime: no
+    get_attributes: false
+    get_checksum: false
+    get_mime: false
   register: temp_dir_details
 
 - name: "Cleanup temp directory for {{ test_name }}"
diff --git a/tests/cloud_playbooks/wait-for-ssh.yml b/tests/cloud_playbooks/wait-for-ssh.yml
index 0e09c9f04..54b268273 100644
--- a/tests/cloud_playbooks/wait-for-ssh.yml
+++ b/tests/cloud_playbooks/wait-for-ssh.yml
@@ -1,8 +1,8 @@
 ---
 - name: Wait until SSH is available
   hosts: all
-  become: False
-  gather_facts: False
+  become: false
+  gather_facts: false
 
   tasks:
   - name: Wait until SSH is available
diff --git a/tests/files/packet_ubuntu20-all-in-one-docker.yml b/tests/files/packet_ubuntu20-all-in-one-docker.yml
index 2ed6307d8..0116eae42 100644
--- a/tests/files/packet_ubuntu20-all-in-one-docker.yml
+++ b/tests/files/packet_ubuntu20-all-in-one-docker.yml
@@ -8,7 +8,7 @@ auto_renew_certificates: true
 
 # Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
 kube_proxy_mode: iptables
-enable_nodelocaldns: False
+enable_nodelocaldns: false
 
 # Use docker
 container_manager: docker
diff --git a/tests/files/packet_ubuntu20-calico-all-in-one-hardening.yml b/tests/files/packet_ubuntu20-calico-all-in-one-hardening.yml
index c494810cf..5dafe23bf 100644
--- a/tests/files/packet_ubuntu20-calico-all-in-one-hardening.yml
+++ b/tests/files/packet_ubuntu20-calico-all-in-one-hardening.yml
@@ -8,7 +8,7 @@ auto_renew_certificates: true
 
 # Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
 kube_proxy_mode: iptables
-enable_nodelocaldns: False
+enable_nodelocaldns: false
 
 # The followings are for hardening
 ## kube-apiserver
diff --git a/tests/files/packet_ubuntu20-calico-all-in-one.yml b/tests/files/packet_ubuntu20-calico-all-in-one.yml
index 3cfc99c96..f59e72a3c 100644
--- a/tests/files/packet_ubuntu20-calico-all-in-one.yml
+++ b/tests/files/packet_ubuntu20-calico-all-in-one.yml
@@ -8,4 +8,4 @@ auto_renew_certificates: true
 
 # Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
 kube_proxy_mode: iptables
-enable_nodelocaldns: False
+enable_nodelocaldns: false
diff --git a/tests/files/packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha.yml b/tests/files/packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha.yml
index 57187a8dd..425ce75b8 100644
--- a/tests/files/packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha.yml
+++ b/tests/files/packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha.yml
@@ -10,7 +10,7 @@ upgrade_cluster_setup: true
 
 # Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
 kube_proxy_mode: iptables
-enable_nodelocaldns: False
+enable_nodelocaldns: false
 
 # Pin disabling ipip mode to ensure proper upgrade
 ipip: false
diff --git a/tests/files/packet_ubuntu20-calico-etcd-kubeadm.yml b/tests/files/packet_ubuntu20-calico-etcd-kubeadm.yml
index ba9d7b34b..ddc5cb556 100644
--- a/tests/files/packet_ubuntu20-calico-etcd-kubeadm.yml
+++ b/tests/files/packet_ubuntu20-calico-etcd-kubeadm.yml
@@ -8,7 +8,7 @@ etcd_deployment_type: kubeadm
 
 # Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
 kube_proxy_mode: iptables
-enable_nodelocaldns: False
+enable_nodelocaldns: false
 
 # Remove anonymous access to cluster
 remove_anonymous_access: true
diff --git a/tests/files/packet_ubuntu22-all-in-one-docker.yml b/tests/files/packet_ubuntu22-all-in-one-docker.yml
index 16ae45986..fcdd8f3cc 100644
--- a/tests/files/packet_ubuntu22-all-in-one-docker.yml
+++ b/tests/files/packet_ubuntu22-all-in-one-docker.yml
@@ -9,7 +9,7 @@ auto_renew_certificates: true
 
 # Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
 kube_proxy_mode: iptables
-enable_nodelocaldns: False
+enable_nodelocaldns: false
 
 # Use docker
 container_manager: docker
diff --git a/tests/files/packet_ubuntu22-calico-all-in-one.yml b/tests/files/packet_ubuntu22-calico-all-in-one.yml
index 2c666f8e3..615530107 100644
--- a/tests/files/packet_ubuntu22-calico-all-in-one.yml
+++ b/tests/files/packet_ubuntu22-calico-all-in-one.yml
@@ -9,7 +9,7 @@ auto_renew_certificates: true
 
 # Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
 kube_proxy_mode: iptables
-enable_nodelocaldns: False
+enable_nodelocaldns: false
 
 containerd_registries_mirrors:
   - prefix: docker.io
diff --git a/tests/files/packet_ubuntu24-all-in-one-docker.yml b/tests/files/packet_ubuntu24-all-in-one-docker.yml
index d4a0adccb..8b1da4ac0 100644
--- a/tests/files/packet_ubuntu24-all-in-one-docker.yml
+++ b/tests/files/packet_ubuntu24-all-in-one-docker.yml
@@ -9,7 +9,7 @@ auto_renew_certificates: true
 
 # Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=noble&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
 kube_proxy_mode: iptables
-enable_nodelocaldns: False
+enable_nodelocaldns: false
 
 # Use docker
 container_manager: docker
diff --git a/tests/files/packet_ubuntu24-calico-all-in-one.yml b/tests/files/packet_ubuntu24-calico-all-in-one.yml
index 4b9e403ca..5d7f55878 100644
--- a/tests/files/packet_ubuntu24-calico-all-in-one.yml
+++ b/tests/files/packet_ubuntu24-calico-all-in-one.yml
@@ -9,7 +9,7 @@ auto_renew_certificates: true
 
 # Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=noble&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
 kube_proxy_mode: iptables
-enable_nodelocaldns: False
+enable_nodelocaldns: false
 
 containerd_registries_mirrors:
   - prefix: docker.io
diff --git a/tests/files/packet_ubuntu24-calico-etcd-datastore.yml b/tests/files/packet_ubuntu24-calico-etcd-datastore.yml
index 2805fa731..4f35d2f87 100644
--- a/tests/files/packet_ubuntu24-calico-etcd-datastore.yml
+++ b/tests/files/packet_ubuntu24-calico-etcd-datastore.yml
@@ -9,7 +9,7 @@ auto_renew_certificates: true
 
 # Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=noble&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko
 kube_proxy_mode: iptables
-enable_nodelocaldns: False
+enable_nodelocaldns: false
 
 containerd_registries:
   "docker.io": "https://mirror.gcr.io"
diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml
index 0d20bda02..081a2a31e 100644
--- a/tests/testcases/010_check-apiserver.yml
+++ b/tests/testcases/010_check-apiserver.yml
@@ -6,7 +6,7 @@
   - name: Check the API servers are responding
     uri:
       url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port | default(6443) }}/version"
-      validate_certs: no
+      validate_certs: false
       status_code: 200
     register: apiserver_response
     retries: 12
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index 9b7eacfc1..e13128dd9 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -153,13 +153,13 @@
   - name: Get running pods
     command: "{{ bin_dir }}/kubectl get pods -n test -o
             jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
-    changed_when: False
+    changed_when: false
     register: running_pods
     no_log: true
 
   - name: Check kubectl output
     command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
-    changed_when: False
+    changed_when: false
     register: get_pods
     no_log: true
 
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index 4fc70eb07..45cf6db28 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -69,7 +69,7 @@
     - name: Get netchecker agents
       uri:
         url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/"
-        return_content: yes
+        return_content: true
       run_once: true
       delegate_to: "{{ groups['kube_control_plane'][0] }}"
       register: agents
@@ -85,7 +85,7 @@
       uri:
         url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check"
         status_code: 200
-        return_content: yes
+        return_content: true
       delegate_to: "{{ groups['kube_control_plane'][0] }}"
       run_once: true
       register: connectivity_check
diff --git a/tests/testcases/100_check-k8s-conformance.yml b/tests/testcases/100_check-k8s-conformance.yml
index 3c07ffe46..3e0f17109 100644
--- a/tests/testcases/100_check-k8s-conformance.yml
+++ b/tests/testcases/100_check-k8s-conformance.yml
@@ -24,7 +24,7 @@
       unarchive:
         src: /tmp/sonobuoy.tar.gz
         dest: /usr/local/bin/
-        copy: no
+        copy: false
 
     - name: Run sonobuoy
       command: "{{ sonobuoy_path }} run --mode {{ sonobuoy_mode }} --e2e-parallel {{ sonobuoy_parallel }} --wait"
-- 
GitLab