diff --git a/.ansible-lint b/.ansible-lint
index ec6a9e0c34ffa9ed8dcf9c5b74a99e6e548c75b0..021341d245e7740b8dd7f55a338bdf5c7d768555 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -16,7 +16,6 @@ skip_list:
   # In Kubespray we use variables that use camelCase to match their k8s counterparts
   # (Disabled in June 2021)
   - 'var-naming'
-  - 'var-spacing'
 
   # [fqcn-builtins]
   # Roles in kubespray don't need fully qualified collection names
diff --git a/.ansible-lint-ignore b/.ansible-lint-ignore
new file mode 100644
index 0000000000000000000000000000000000000000..03a371318b87158373fbdd5cb8410e77c0bf995a
--- /dev/null
+++ b/.ansible-lint-ignore
@@ -0,0 +1,8 @@
+# This file contains ignores rule violations for ansible-lint
+inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml jinja[spacing]
+roles/kubernetes/control-plane/defaults/main/kube-proxy.yml jinja[spacing]
+roles/kubernetes/control-plane/defaults/main/main.yml jinja[spacing]
+roles/kubernetes/kubeadm/defaults/main.yml jinja[spacing]
+roles/kubernetes/node/defaults/main.yml jinja[spacing]
+roles/kubernetes/preinstall/defaults/main.yml jinja[spacing]
+roles/kubespray-defaults/defaults/main.yaml jinja[spacing]
diff --git a/contrib/azurerm/roles/generate-templates/defaults/main.yml b/contrib/azurerm/roles/generate-templates/defaults/main.yml
index 1ba24804331317b7e480becd58dbd03649b5668d..ff6b313266ff0adfc4fcf314b27817563d7665b5 100644
--- a/contrib/azurerm/roles/generate-templates/defaults/main.yml
+++ b/contrib/azurerm/roles/generate-templates/defaults/main.yml
@@ -24,14 +24,14 @@ bastionIPAddressName: bastion-pubip
 
 disablePasswordAuthentication: true
 
-sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
+sshKeyPath: "/home/{{ admin_username }}/.ssh/authorized_keys"
 
 imageReference:
   publisher: "OpenLogic"
   offer: "CentOS"
   sku: "7.5"
   version: "latest"
-imageReferenceJson: "{{imageReference|to_json}}"
+imageReferenceJson: "{{ imageReference | to_json }}"
 
-storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
+storageAccountName: "sa{{ nameSuffix | replace('-', '') }}"
 storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml
index 59023df3c2ce03200adaaf37e6625a50433d2c12..2d74f7ea72b79370c5996bb0baff828c4650eb88 100644
--- a/contrib/dind/roles/dind-cluster/tasks/main.yaml
+++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml
@@ -43,7 +43,7 @@
   package:
     name: "{{ item }}"
     state: present
-  with_items: "{{ distro_extra_packages + [ 'rsyslog', 'openssh-server' ] }}"
+  with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}"
 
 - name: Start needed services
   service:
@@ -70,4 +70,4 @@
   ansible.posix.authorized_key:
     user: "{{ distro_user }}"
     state: present
-    key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
+    key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}"
diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml
index 205f77894ff5069568096376080fbf895dbe7e62..030ce72662b8944b108ea538ed9378f1d07c5f72 100644
--- a/contrib/dind/roles/dind-host/tasks/main.yaml
+++ b/contrib/dind/roles/dind-host/tasks/main.yaml
@@ -53,7 +53,7 @@
     {{ distro_raw_setup_done }}  && echo SKIPPED && exit 0
     until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
     {{ distro_raw_setup }}
-  delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
+  delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
   with_items: "{{ containers.results }}"
   register: result
   changed_when: result.stdout.find("SKIPPED") < 0
@@ -63,7 +63,7 @@
     until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
     systemctl disable {{ distro_agetty_svc }}
     systemctl stop {{ distro_agetty_svc }}
-  delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
+  delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
   with_items: "{{ containers.results }}"
   changed_when: false
 
@@ -75,13 +75,13 @@
     mv -b /etc/machine-id.new /etc/machine-id
     cmp /etc/machine-id /etc/machine-id~ || true
     systemctl daemon-reload
-  delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
+  delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
   with_items: "{{ containers.results }}"
 
 - name: Early hack image install to adapt for DIND
   raw: |
     rm -fv /usr/bin/udevadm /usr/sbin/udevadm
-  delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
+  delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
   with_items: "{{ containers.results }}"
   register: result
   changed_when: result.stdout.find("removed") >= 0
diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
index 1146188aaf65351c189cde55e69f375e160e1ba2..64e7691bb3c5e8ea7e8438dc686103ca1c961e05 100644
--- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
+++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml
@@ -58,27 +58,27 @@
     name: "{{ gluster_brick_name }}"
     brick: "{{ gluster_brick_dir }}"
     replicas: "{{ groups['gfs-cluster'] | length }}"
-    cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
+    cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
     host: "{{ inventory_hostname }}"
     force: yes
   run_once: true
-  when: groups['gfs-cluster']|length > 1
+  when: groups['gfs-cluster'] | length > 1
 
 - name: Configure Gluster volume without replicas
   gluster.gluster.gluster_volume:
     state: present
     name: "{{ gluster_brick_name }}"
     brick: "{{ gluster_brick_dir }}"
-    cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
+    cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
     host: "{{ inventory_hostname }}"
     force: yes
   run_once: true
-  when: groups['gfs-cluster']|length <= 1
+  when: groups['gfs-cluster'] | length <= 1
 
 - name: Mount glusterfs to retrieve disk size
   ansible.posix.mount:
     name: "{{ gluster_mount_dir }}"
-    src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
+    src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
     fstype: glusterfs
     opts: "defaults,_netdev"
     state: mounted
@@ -92,7 +92,7 @@
 
 - name: Set Gluster disk size to variable
   set_fact:
-    gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
+    gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}"
   when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
 
 - name: Create file on GlusterFS
@@ -106,6 +106,6 @@
   ansible.posix.mount:
     name: "{{ gluster_mount_dir }}"
     fstype: glusterfs
-    src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
+    src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
     state: unmounted
   when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
diff --git a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
index 82b0acb82d8d74992a5924204763ce21df1e11ca..ed62e282e38c3fb150f0aca7a35e2c5df40f4b61 100644
--- a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
+++ b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml
@@ -18,6 +18,6 @@
     kubectl: "{{ bin_dir }}/kubectl"
     resource: "{{ item.item.type }}"
     filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
-    state: "{{ item.changed | ternary('latest','present') }}"
+    state: "{{ item.changed | ternary('latest', 'present') }}"
   with_items: "{{ gluster_pv.results }}"
   when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml
index f0111cec01a63cb42b4af0036a34168b6082af99..7b4330038834bc375e2f48a50ffc1f60a5cd0e6a 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml
@@ -7,9 +7,9 @@
 
 - name: "Bootstrap heketi."
   when:
-    - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
-    - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0"
-    - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0"
+    - "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0"
+    - "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0"
+    - "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0"
   include_tasks: "bootstrap/deploy.yml"
 
 # Prepare heketi topology
@@ -20,11 +20,11 @@
 
 - name: "Ensure heketi bootstrap pod is up."
   assert:
-    that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
+    that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1"
 
 - name: Store the initial heketi pod name
   set_fact:
-    initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
+    initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}"
 
 - name: "Test heketi topology."
   changed_when: false
@@ -32,7 +32,7 @@
   command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
 
 - name: "Load heketi topology."
-  when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
+  when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0"
   include_tasks: "bootstrap/topology.yml"
 
 # Provision heketi database volume
@@ -58,7 +58,7 @@
     service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
     job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
   when:
-    - "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
-    - "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
-    - "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
-    - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
+    - "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
+    - "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
+    - "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
+    - "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
index 8d03ffc2fc1f12fd7f15242d79ff10ddceb7abe5..866fe30bf6ada53c838ed3ee675610e5adb40bcf 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml
@@ -17,11 +17,11 @@
   register: "initial_heketi_state"
   vars:
     initial_heketi_state: { stdout: "{}" }
-    pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
-    deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
+    pods_query: "items[?kind=='Pod'].status.conditions | [0][?type=='Ready'].status | [0]"
+    deployments_query: "items[?kind=='Deployment'].status.conditions | [0][?type=='Available'].status | [0]"
   command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
   until:
-    - "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
-    - "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
+    - "initial_heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
+    - "initial_heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
   retries: 60
   delay: 5
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml
index 63a475a85cc37861a09d1c676bdbb35982d14af4..650c12d12eec489b0869c2fb3f56d22746f7d0ae 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml
@@ -15,10 +15,10 @@
     service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
     job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
   when:
-    - "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
-    - "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
-    - "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
-    - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
+    - "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
+    - "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
+    - "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
+    - "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
   register: "heketi_storage_result"
 - name: "Get state of heketi database copy job."
   command: "{{ bin_dir }}/kubectl get jobs --output=json"
@@ -28,6 +28,6 @@
     heketi_storage_state: { stdout: "{}" }
     job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
   until:
-    - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1"
+    - "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 1"
   retries: 60
   delay: 5
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml
index 0ffd6f469f2e5fbbcbef40fe45e0aa8dc7577144..ad48882b6c8f9cebbadaa918e42d3a74bb8a83db 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml
@@ -5,10 +5,10 @@
   changed_when: false
 - name: "Delete bootstrap Heketi."
   command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
-  when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
+  when: "heketi_resources.stdout | from_json | json_query('items[*]') | length > 0"
 - name: "Ensure there is nothing left over."
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
   register: "heketi_result"
-  until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
+  until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
   retries: 60
   delay: 5
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
index e623576d1be3fe012d2a4b60c1ffd5f6a21616b9..2f3efd4dd1fe365ae4ea7e1b0df27e8b0b6356e9 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml
@@ -22,6 +22,6 @@
   changed_when: false
   register: "heketi_topology"
   command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
-  until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
+  until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
   retries: 60
   delay: 5
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
index 14ab97793991d5c7f2d39d693b89dde66a9636ea..6d26dfc9a61e89d74478ed447df1f05709e3600b 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml
@@ -6,14 +6,14 @@
 - name: "Get heketi volumes."
   changed_when: false
   command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
-  with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
+  with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
   loop_control: { loop_var: "volume_id" }
   register: "volumes_information"
 - name: "Test heketi database volume."
   set_fact: { heketi_database_volume_exists: true }
   with_items: "{{ volumes_information.results }}"
   loop_control: { loop_var: "volume_information" }
-  vars: { volume: "{{ volume_information.stdout|from_json }}" }
+  vars: { volume: "{{ volume_information.stdout | from_json }}" }
   when: "volume.name == 'heketidbstorage'"
 - name: "Provision database volume."
   command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
@@ -28,14 +28,14 @@
 - name: "Get heketi volumes."
   changed_when: false
   command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
-  with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
+  with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
   loop_control: { loop_var: "volume_id" }
   register: "volumes_information"
 - name: "Test heketi database volume."
   set_fact: { heketi_database_volume_created: true }
   with_items: "{{ volumes_information.results }}"
   loop_control: { loop_var: "volume_information" }
-  vars: { volume: "{{ volume_information.stdout|from_json }}" }
+  vars: { volume: "{{ volume_information.stdout | from_json }}" }
   when: "volume.name == 'heketidbstorage'"
 - name: "Ensure heketi database volume exists."
   assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
index 3409cf95785254d79134880ccc2b5d3ec492e861..973c6685141c33adb7bb9f96ad33e4ba6f7cc05b 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml
@@ -23,8 +23,8 @@
   changed_when: false
   vars:
     daemonset_state: { stdout: "{}" }
-    ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}"
-    desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}"
+    ready: "{{ daemonset_state.stdout | from_json | json_query(\"status.numberReady\") }}"
+    desired: "{{ daemonset_state.stdout | from_json | json_query(\"status.desiredNumberScheduled\") }}"
   until: "ready | int >= 3"
   retries: 60
   delay: 5
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml
index ae598c3dfcbc976bad0f5daa55b5e9a28597cb35..4cefd47ac156534841ec81bb0119e172533eebd9 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml
@@ -5,7 +5,7 @@
   changed_when: false
 
 - name: "Assign storage label"
-  when: "label_present.stdout_lines|length == 0"
+  when: "label_present.stdout_lines | length == 0"
   command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
 
 - name: Get storage nodes again
@@ -15,5 +15,5 @@
 
 - name: Ensure the label has been set
   assert:
-    that: "label_present|length > 0"
+    that: "label_present | length > 0"
     msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
index 9a6ce55b2560a3ac642248ec13843c584321d00f..a8549df458169db729d2a4b2cd6982471546dfd2 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml
@@ -24,11 +24,11 @@
     deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
   command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
   until:
-    - "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
-    - "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
+    - "heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
+    - "heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
   retries: 60
   delay: 5
 
 - name: Set the Heketi pod name
   set_fact:
-    heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
+    heketi_pod_name: "{{ heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
index 3380a612f3caf18d7f8446069346671cb598914b..bd4f6666be6c8c5884236941324bd37fff8a047d 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml
@@ -12,7 +12,7 @@
 - name: "Render storage class configuration."
   become: true
   vars:
-    endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}"
+    endpoint_address: "{{ (heketi_service.stdout | from_json).spec.clusterIP }}"
   template:
     src: "storageclass.yml.j2"
     dest: "{{ kube_config_dir }}/storageclass.yml"
diff --git a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
index f5f8e6a94728d92b29d82611c5f09585f60abb43..aa662083ea5c90af676f6b49b72f3ae34b6c1da5 100644
--- a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
+++ b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml
@@ -21,6 +21,6 @@
   register: "heketi_topology"
   changed_when: false
   command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
-  until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
+  until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
   retries: 60
   delay: 5
diff --git a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
index 5b3553bf472c08b566b31de8c7bb5331dc8470fb..5c271e794d7357ebb7bf58bde4beadbb85708d86 100644
--- a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
+++ b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
@@ -13,13 +13,13 @@
 - name: Ensure there is nothing left over.
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
   register: "heketi_result"
-  until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
+  until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
   retries: 60
   delay: 5
 - name: Ensure there is nothing left over.
   command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
   register: "heketi_result"
-  until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
+  until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
   retries: 60
   delay: 5
 - name: Tear down glusterfs.
@@ -46,6 +46,6 @@
   changed_when: false
 - name: Remove heketi storage secret
   vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
-  command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
+  command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout | from_json | json_query(storage_query) }}"
   when: "storage_query is defined"
   ignore_errors: true  # noqa ignore-errors
diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml
index 2699eff2fec33e375cc882768c038c41157b4b87..70e93776a70b479f96f5e96734c17ba9ad5f7679 100644
--- a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml
@@ -117,7 +117,7 @@ kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
 kube_network_node_prefix_ipv6: 120
 
 # The port the API Server will be listening on.
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_ip: "{{ kube_service_addresses | ipaddr('net') | ipaddr(1) | ipaddr('address') }}"
 kube_apiserver_port: 6443  # (https)
 
 # Kube-proxy proxyMode configuration.
@@ -141,7 +141,7 @@ kube_proxy_nodeport_addresses: >-
 
 # If non-empty, will use this string as identification instead of the actual hostname
 # kube_override_hostname: >-
-#   {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
+#   {%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
 #   {%- else -%}
 #   {{ inventory_hostname }}
 #   {%- endif -%}
@@ -165,7 +165,7 @@ ndots: 2
 # Custom search domains to be added in addition to the default cluster search domains
 # searchdomains:
 #   - svc.{{ cluster_name }}
-#   - default.svc.{{ cluster_name  }}
+#   - default.svc.{{ cluster_name }}
 # Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
 # remove_default_searchdomains: false
 # Can be coredns, coredns_dual, manual or none
@@ -219,8 +219,8 @@ resolvconf_mode: host_resolvconf
 # Deploy netchecker app to verify DNS resolve as an HTTP service
 deploy_netchecker: false
 # Ip address of the kubernetes skydns service
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
+skydns_server: "{{ kube_service_addresses | ipaddr('net') | ipaddr(3) | ipaddr('address') }}"
+skydns_server_secondary: "{{ kube_service_addresses | ipaddr('net') | ipaddr(4) | ipaddr('address') }}"
 dns_domain: "{{ cluster_name }}"
 
 ## Container runtime
diff --git a/playbooks/remove_node.yml b/playbooks/remove_node.yml
index b9fdb93d60ed6f5269c249907db9392cded3924e..be346a768e9b5ff521d10716b40de984e9e7ae3a 100644
--- a/playbooks/remove_node.yml
+++ b/playbooks/remove_node.yml
@@ -30,21 +30,21 @@
 
 - name: Gather facts
   import_playbook: facts.yml
-  when: reset_nodes|default(True)|bool
+  when: reset_nodes | default(True) | bool
 
 - hosts: "{{ node | default('kube_node') }}"
   gather_facts: no
   environment: "{{ proxy_disable_env }}"
   roles:
-    - { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
+    - { role: kubespray-defaults, when: reset_nodes | default(True) | bool }
     - { role: remove-node/pre-remove, tags: pre-remove }
     - { role: remove-node/remove-etcd-node }
-    - { role: reset, tags: reset, when: reset_nodes|default(True)|bool }
+    - { role: reset, tags: reset, when: reset_nodes | default(True) | bool }
 
 # Currently cannot remove first master or etcd
 - hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
   gather_facts: no
   environment: "{{ proxy_disable_env }}"
   roles:
-    - { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
+    - { role: kubespray-defaults, when: reset_nodes | default(True) | bool }
     - { role: remove-node/post-remove, tags: post-remove }
diff --git a/roles/adduser/defaults/main.yml b/roles/adduser/defaults/main.yml
index faf258d79876f3c40808739a85a6bae1b3d80580..df3fc2d0284a9ef360e54c943ed9dbed6400d538 100644
--- a/roles/adduser/defaults/main.yml
+++ b/roles/adduser/defaults/main.yml
@@ -20,8 +20,8 @@ addusers:
 
 adduser:
   name: "{{ user.name }}"
-  group: "{{ user.name|default(None) }}"
-  comment: "{{ user.comment|default(None) }}"
-  shell: "{{ user.shell|default(None) }}"
-  system: "{{ user.system|default(None) }}"
-  create_home: "{{ user.create_home|default(None) }}"
+  group: "{{ user.name | default(None) }}"
+  comment: "{{ user.comment | default(None) }}"
+  shell: "{{ user.shell | default(None) }}"
+  system: "{{ user.system | default(None) }}"
+  create_home: "{{ user.create_home | default(None) }}"
diff --git a/roles/adduser/tasks/main.yml b/roles/adduser/tasks/main.yml
index 51dd5bb06e3cf3ec510abab05154a6cc55329833..ba5edd7d066895e93c454131f6d76dd8b4489c30 100644
--- a/roles/adduser/tasks/main.yml
+++ b/roles/adduser/tasks/main.yml
@@ -1,16 +1,16 @@
 ---
 - name: User | Create User Group
   group:
-    name: "{{ user.group|default(user.name) }}"
-    system: "{{ user.system|default(omit) }}"
+    name: "{{ user.group | default(user.name) }}"
+    system: "{{ user.system | default(omit) }}"
 
 - name: User | Create User
   user:
-    comment: "{{ user.comment|default(omit) }}"
-    create_home: "{{ user.create_home|default(omit) }}"
-    group: "{{ user.group|default(user.name) }}"
-    home: "{{ user.home|default(omit) }}"
-    shell: "{{ user.shell|default(omit) }}"
+    comment: "{{ user.comment | default(omit) }}"
+    create_home: "{{ user.create_home | default(omit) }}"
+    group: "{{ user.group | default(user.name) }}"
+    home: "{{ user.home | default(omit) }}"
+    shell: "{{ user.shell | default(omit) }}"
     name: "{{ user.name }}"
-    system: "{{ user.system|default(omit) }}"
+    system: "{{ user.system | default(omit) }}"
   when: user.name != "root"
diff --git a/roles/bootstrap-os/tasks/bootstrap-centos.yml b/roles/bootstrap-os/tasks/bootstrap-centos.yml
index aaab37202459e0a30fd2646c6d6aa2b556582f6f..5d543aea12fa1fac8b6db68ef7096580a263b05b 100644
--- a/roles/bootstrap-os/tasks/bootstrap-centos.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-centos.yml
@@ -6,7 +6,7 @@
 
 - name: Add proxy to yum.conf or dnf.conf if http_proxy is defined
   community.general.ini_file:
-    path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}"
+    path: "{{ ((ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf', '/etc/dnf/dnf.conf') }}"
     section: main
     option: proxy
     value: "{{ http_proxy | default(omit) }}"
@@ -23,7 +23,7 @@
     dest: /etc/yum.repos.d/public-yum-ol7.repo
     mode: 0644
   when:
-    - use_oracle_public_repo|default(true)
+    - use_oracle_public_repo | default(true)
     - '''ID="ol"'' in os_release.stdout_lines'
     - (ansible_distribution_version | float) < 7.6
   environment: "{{ proxy_env }}"
@@ -40,7 +40,7 @@
     - ol7_addons
     - ol7_developer_EPEL
   when:
-    - use_oracle_public_repo|default(true)
+    - use_oracle_public_repo | default(true)
     - '''ID="ol"'' in os_release.stdout_lines'
     - (ansible_distribution_version | float) < 7.6
 
@@ -49,7 +49,7 @@
     name: "oracle-epel-release-el{{ ansible_distribution_major_version }}"
     state: present
   when:
-    - use_oracle_public_repo|default(true)
+    - use_oracle_public_repo | default(true)
     - '''ID="ol"'' in os_release.stdout_lines'
     - (ansible_distribution_version | float) >= 7.6
 
@@ -65,7 +65,7 @@
     - { option: "enabled", value: "1" }
     - { option: "baseurl", value: "http://yum.oracle.com/repo/OracleLinux/OL{{ ansible_distribution_major_version }}/addons/$basearch/" }
   when:
-    - use_oracle_public_repo|default(true)
+    - use_oracle_public_repo | default(true)
     - '''ID="ol"'' in os_release.stdout_lines'
     - (ansible_distribution_version | float) >= 7.6
 
@@ -80,9 +80,9 @@
     - { option: "name", value: "CentOS-{{ ansible_distribution_major_version }} - Extras" }
     - { option: "enabled", value: "1" }
     - { option: "gpgcheck", value: "0" }
-    - { option: "baseurl", value: "http://mirror.centos.org/{{ 'altarch' if (ansible_distribution_major_version | int) <= 7 and ansible_architecture == 'aarch64' else 'centos' }}/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version|int > 7 %}os/{% endif %}" }
+    - { option: "baseurl", value: "http://mirror.centos.org/{{ 'altarch' if (ansible_distribution_major_version | int) <= 7 and ansible_architecture == 'aarch64' else 'centos' }}/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version | int > 7 %}os/{% endif %}" }
   when:
-    - use_oracle_public_repo|default(true)
+    - use_oracle_public_repo | default(true)
     - '''ID="ol"'' in os_release.stdout_lines'
     - (ansible_distribution_version | float) >= 7.6
     - (ansible_distribution_version | float) < 9
@@ -113,6 +113,6 @@
 # See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements
 - name: Install libselinux python package
   package:
-    name: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}"
+    name: "{{ ((ansible_distribution_major_version | int) < 8) | ternary('libselinux-python', 'python3-libselinux') }}"
     state: present
   become: true
diff --git a/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml b/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml
index d3fd1c94219a8d6c51db4cc90c70b6ab6dfc6de6..91dc020c4a8c6ec916cdfe4846f697ff10fc9722 100644
--- a/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-fedora-coreos.yml
@@ -20,7 +20,7 @@
   when: need_bootstrap.rc != 0
 
 - name: Install required packages on fedora coreos
-  raw: "export http_proxy={{ http_proxy | default('') }};rpm-ostree install --allow-inactive {{ fedora_coreos_packages|join(' ') }}"
+  raw: "export http_proxy={{ http_proxy | default('') }};rpm-ostree install --allow-inactive {{ fedora_coreos_packages | join(' ') }}"
   become: true
   when: need_bootstrap.rc != 0
 
diff --git a/roles/bootstrap-os/tasks/bootstrap-redhat.yml b/roles/bootstrap-os/tasks/bootstrap-redhat.yml
index a87046165f621c7607372fd50d938066b4782341..c3621466ee665ab5dfa8578564e6d66782417c8f 100644
--- a/roles/bootstrap-os/tasks/bootstrap-redhat.yml
+++ b/roles/bootstrap-os/tasks/bootstrap-redhat.yml
@@ -6,7 +6,7 @@
 
 - name: Add proxy to yum.conf or dnf.conf if http_proxy is defined
   community.general.ini_file:
-    path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}"
+    path: "{{ ((ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf', '/etc/dnf/dnf.conf') }}"
     section: main
     option: proxy
     value: "{{ http_proxy | default(omit) }}"
@@ -57,7 +57,7 @@
       sync: true
   notify: RHEL auto-attach subscription
   become: true
-  no_log: "{{ not (unsafe_show_logs|bool) }}"
+  no_log: "{{ not (unsafe_show_logs | bool) }}"
   when:
     - rh_subscription_username is defined
     - rh_subscription_status.changed
@@ -108,6 +108,6 @@
 # See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements
 - name: Install libselinux python package
   package:
-    name: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}"
+    name: "{{ ((ansible_distribution_major_version | int) < 8) | ternary('libselinux-python', 'python3-libselinux') }}"
     state: present
   become: true
diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml
index 853ce095f4f72aaf1c2f9bc84b74c1782bf84f34..42321fd37522fdfd500040a3fca63c6ce27ac298 100644
--- a/roles/bootstrap-os/tasks/main.yml
+++ b/roles/bootstrap-os/tasks/main.yml
@@ -89,7 +89,7 @@
     name:
       - ceph-common
     state: present
-  when: rbd_provisioner_enabled|default(false)
+  when: rbd_provisioner_enabled | default(false)
 
 - name: Ensure bash_completion.d folder exists
   file:
diff --git a/roles/container-engine/containerd-common/tasks/main.yml b/roles/container-engine/containerd-common/tasks/main.yml
index cfd78f3a379025118665dd65412cd0ccff22d788..fcca4fb640c84f0e35b6c09f5611612353d0cab3 100644
--- a/roles/container-engine/containerd-common/tasks/main.yml
+++ b/roles/container-engine/containerd-common/tasks/main.yml
@@ -15,14 +15,14 @@
   include_vars: "{{ item }}"
   with_first_found:
     - files:
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml"
-        - "{{ ansible_distribution|lower }}.yml"
-        - "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml"
-        - "{{ ansible_os_family|lower }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}-{{ host_architecture }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ host_architecture }}.yml"
+        - "{{ ansible_distribution | lower }}.yml"
+        - "{{ ansible_os_family | lower }}-{{ host_architecture }}.yml"
+        - "{{ ansible_os_family | lower }}.yml"
         - defaults.yml
       paths:
         - ../vars
diff --git a/roles/container-engine/containerd/defaults/main.yml b/roles/container-engine/containerd/defaults/main.yml
index e763d91b106a07481920631ea9fbbe60179a9b5d..4c2df2aba8907d61742ec3145f8b4301711d2846 100644
--- a/roles/container-engine/containerd/defaults/main.yml
+++ b/roles/container-engine/containerd/defaults/main.yml
@@ -36,7 +36,7 @@ containerd_default_base_runtime_spec_patch:
         soft: "{{ containerd_base_runtime_spec_rlimit_nofile }}"
 
 containerd_base_runtime_specs:
-  cri-base.json: "{{ containerd_default_base_runtime_spec | combine(containerd_default_base_runtime_spec_patch,recursive=1) }}"
+  cri-base.json: "{{ containerd_default_base_runtime_spec | combine(containerd_default_base_runtime_spec_patch, recursive=1) }}"
 
 containerd_grpc_max_recv_message_size: 16777216
 containerd_grpc_max_send_message_size: 16777216
diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml
index 5ec9c28acdcb29d39021c4e963cd706487022d9a..e3ee58643308a02d0a91e1b391a411fed0e32ddf 100644
--- a/roles/container-engine/containerd/tasks/main.yml
+++ b/roles/container-engine/containerd/tasks/main.yml
@@ -130,7 +130,7 @@
             capabilities = ["pull", "resolve", "push"]
             skip_verify = true
       with_dict: "{{ containerd_insecure_registries }}"
-  when: containerd_use_config_path is defined and containerd_use_config_path|bool and containerd_insecure_registries is defined
+  when: containerd_use_config_path is defined and containerd_use_config_path | bool and containerd_insecure_registries is defined
 
 # you can sometimes end up in a state where everything is installed
 # but containerd was not started / enabled
diff --git a/roles/container-engine/containerd/vars/debian.yml b/roles/container-engine/containerd/vars/debian.yml
index 99dc4a50c95230a594cd5613f57585916fc2762c..8b18d9a9f4e1385cffee8ae5230d399bea66c1f4 100644
--- a/roles/container-engine/containerd/vars/debian.yml
+++ b/roles/container-engine/containerd/vars/debian.yml
@@ -3,5 +3,5 @@ containerd_repo_info:
   repos:
     - >
       deb {{ containerd_debian_repo_base_url }}
-      {{ ansible_distribution_release|lower }}
+      {{ ansible_distribution_release | lower }}
       {{ containerd_debian_repo_component }}
diff --git a/roles/container-engine/containerd/vars/ubuntu.yml b/roles/container-engine/containerd/vars/ubuntu.yml
index ccce96d0e7c6913edd3698c04954e9367e5ea867..dd775323dde1df261b3352227f0eb17d13d36cd0 100644
--- a/roles/container-engine/containerd/vars/ubuntu.yml
+++ b/roles/container-engine/containerd/vars/ubuntu.yml
@@ -3,5 +3,5 @@ containerd_repo_info:
   repos:
     - >
       deb {{ containerd_ubuntu_repo_base_url }}
-      {{ ansible_distribution_release|lower }}
+      {{ ansible_distribution_release | lower }}
       {{ containerd_ubuntu_repo_component }}
diff --git a/roles/container-engine/cri-o/defaults/main.yml b/roles/container-engine/cri-o/defaults/main.yml
index d2c087b8da34c211bdbebdd8019d1dcc5b24f8c6..949ed69ed53e23c316e8107ab26549b70a7e1fd5 100644
--- a/roles/container-engine/cri-o/defaults/main.yml
+++ b/roles/container-engine/cri-o/defaults/main.yml
@@ -27,7 +27,7 @@ crio_registry_auth: []
 #    password: pass
 
 crio_seccomp_profile: ""
-crio_selinux: "{{ (preinstall_selinux_state == 'enforcing')|lower }}"
+crio_selinux: "{{ (preinstall_selinux_state == 'enforcing') | lower }}"
 crio_signature_policy: "{% if ansible_os_family == 'ClearLinux' %}/usr/share/defaults/crio/policy.json{% endif %}"
 
 # Override system default for storage driver
diff --git a/roles/container-engine/cri-o/tasks/cleanup.yaml b/roles/container-engine/cri-o/tasks/cleanup.yaml
index fd2f119afed66fe73d32e75507c53c1a29eb4546..2c3872229c27ab901ac5575ac80de0b6759b66e5 100644
--- a/roles/container-engine/cri-o/tasks/cleanup.yaml
+++ b/roles/container-engine/cri-o/tasks/cleanup.yaml
@@ -2,7 +2,7 @@
 # TODO(cristicalin): drop this file after 2.21
 - name: CRI-O kubic repo name for debian os family
   set_fact:
-    crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
+    crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x', '')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
   when: ansible_os_family == "Debian"
 
 - name: Remove legacy CRI-O kubic apt repo key
diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml
index 23cab0e8a46f6dd77702f23aa3ee254abe384b83..4a667ac9a186f21e36c8c7f8a95a13a587cba484 100644
--- a/roles/container-engine/cri-o/tasks/main.yaml
+++ b/roles/container-engine/cri-o/tasks/main.yaml
@@ -32,7 +32,7 @@
 
 - name: cri-o | build a list of crio runtimes with Katacontainers runtimes
   set_fact:
-    crio_runtimes: "{{ crio_runtimes + kata_runtimes  }}"
+    crio_runtimes: "{{ crio_runtimes + kata_runtimes }}"
   when:
     - kata_containers_enabled
 
diff --git a/roles/container-engine/cri-o/tasks/reset.yml b/roles/container-engine/cri-o/tasks/reset.yml
index 0005a38a6a8ee95008b34f60a043144b009f6abc..65ee0026a64a1de59fbc85f1afe0af44c8ae98a1 100644
--- a/roles/container-engine/cri-o/tasks/reset.yml
+++ b/roles/container-engine/cri-o/tasks/reset.yml
@@ -1,7 +1,7 @@
 ---
 - name: CRI-O | Kubic repo name for debian os family
   set_fact:
-    crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
+    crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x', '')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
   when: ansible_os_family == "Debian"
   tags:
     - reset_crio
diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml
index 314430f27c426b41b687d1b17a64c9fcb9667b23..9413ba914d38675a4d6f926564282f9a35810b86 100644
--- a/roles/container-engine/docker/tasks/main.yml
+++ b/roles/container-engine/docker/tasks/main.yml
@@ -22,16 +22,16 @@
   include_vars: "{{ item }}"
   with_first_found:
     - files:
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml"
-        - "{{ ansible_distribution|lower }}.yml"
-        - "{{ ansible_distribution.split(' ')[0]|lower }}.yml"
-        - "{{ ansible_os_family|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
-        - "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml"
-        - "{{ ansible_os_family|lower }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}-{{ host_architecture }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ host_architecture }}.yml"
+        - "{{ ansible_distribution | lower }}.yml"
+        - "{{ ansible_distribution.split(' ')[0] | lower }}.yml"
+        - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
+        - "{{ ansible_os_family | lower }}-{{ host_architecture }}.yml"
+        - "{{ ansible_os_family | lower }}.yml"
         - defaults.yml
       paths:
         - ../vars
@@ -121,7 +121,7 @@
   when:
     - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
     - not is_ostree
-    - docker_package_info.pkgs|length > 0
+    - docker_package_info.pkgs | length > 0
 
 # This is required to ensure any apt upgrade will not break kubernetes
 - name: Tell Debian hosts not to change the docker version with apt upgrade
diff --git a/roles/container-engine/docker/tasks/reset.yml b/roles/container-engine/docker/tasks/reset.yml
index fb4f02c9b2c6e445cadc2e85a5083697580de22f..51b79e5a793fcbb01b5717b8015163d09bf0bcca 100644
--- a/roles/container-engine/docker/tasks/reset.yml
+++ b/roles/container-engine/docker/tasks/reset.yml
@@ -19,7 +19,7 @@
   changed_when: true
   delay: 5
   ignore_errors: true  # noqa ignore-errors
-  when: docker_packages_list|length>0
+  when: docker_packages_list | length>0
 
 - name: reset | remove all containers
   shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
@@ -29,7 +29,7 @@
   retries: 4
   until: remove_all_containers.rc == 0
   delay: 5
-  when: docker_packages_list|length>0
+  when: docker_packages_list | length>0
 
 - name: Docker | Stop docker service
   service:
@@ -40,7 +40,7 @@
     - docker
     - docker.socket
     - containerd
-  when: docker_packages_list|length>0
+  when: docker_packages_list | length>0
 
 - name: Docker | Remove dpkg hold
   dpkg_selections:
@@ -63,7 +63,7 @@
   when:
     - not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
     - not is_ostree
-    - docker_packages_list|length > 0
+    - docker_packages_list | length > 0
 
 - name: Docker | ensure docker-ce repository is removed
   apt_repository:
diff --git a/roles/container-engine/docker/tasks/set_facts_dns.yml b/roles/container-engine/docker/tasks/set_facts_dns.yml
index 3b7b67cf56552f0b3a0f34a3e4f28bfd8a3cef7f..9d563a259eb144653f11f6e19cc2d2b7c6ab922f 100644
--- a/roles/container-engine/docker/tasks/set_facts_dns.yml
+++ b/roles/container-engine/docker/tasks/set_facts_dns.yml
@@ -10,12 +10,12 @@
 
 - name: add upstream dns servers
   set_fact:
-    docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}"
+    docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers | default([]) }}"
   when: dns_mode in ['coredns', 'coredns_dual']
 
 - name: add global searchdomains
   set_fact:
-    docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}"
+    docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains | default([]) }}"
 
 - name: check system nameservers
   shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
@@ -42,25 +42,25 @@
 
 - name: add system search domains to docker options
   set_fact:
-    docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split()|default([])) | unique }}"
+    docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split() | default([])) | unique }}"
   when: system_search_domains.stdout
 
 - name: check number of nameservers
   fail:
     msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in docker.yml and we will only use the first 3."
-  when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool
+  when: docker_dns_servers | length > 3 and docker_dns_servers_strict | bool
 
 - name: rtrim number of nameservers to 3
   set_fact:
     docker_dns_servers: "{{ docker_dns_servers[0:3] }}"
-  when: docker_dns_servers|length > 3 and not docker_dns_servers_strict|bool
+  when: docker_dns_servers | length > 3 and not docker_dns_servers_strict | bool
 
 - name: check number of search domains
   fail:
     msg: "Too many search domains"
-  when: docker_dns_search_domains|length > 6
+  when: docker_dns_search_domains | length > 6
 
 - name: check length of search domains
   fail:
     msg: "Search domains exceeded limit of 256 characters"
-  when: docker_dns_search_domains|join(' ')|length > 256
+  when: docker_dns_search_domains | join(' ') | length > 256
diff --git a/roles/container-engine/docker/vars/debian-bookworm.yml b/roles/container-engine/docker/vars/debian-bookworm.yml
index db20d0b3103fa740acd53a5a3849de57fad6a2a3..74a66ccb3e0fa243ccf7d953b2a027fd23d19851 100644
--- a/roles/container-engine/docker/vars/debian-bookworm.yml
+++ b/roles/container-engine/docker/vars/debian-bookworm.yml
@@ -17,17 +17,17 @@ containerd_versioned_pkg:
 # https://download.docker.com/linux/debian/
 docker_versioned_pkg:
   'latest': docker-ce
-  '23.0': docker-ce=5:23.0.6-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
-  '24.0': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
-  'stable': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
-  'edge': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
+  '23.0': docker-ce=5:23.0.6-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
+  '24.0': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
+  'stable': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
+  'edge': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
 
 docker_cli_versioned_pkg:
   'latest': docker-ce-cli
-  '23.0': docker-ce=5:23.0.6-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
-  '24.0': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
-  'stable': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
-  'edge': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
+  '23.0': docker-ce=5:23.0.6-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
+  '24.0': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
+  'stable': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
+  'edge': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
 
 docker_package_info:
   pkgs:
@@ -44,5 +44,5 @@ docker_repo_info:
   repos:
     - >
       deb {{ docker_debian_repo_base_url }}
-      {{ ansible_distribution_release|lower }}
+      {{ ansible_distribution_release | lower }}
       stable
diff --git a/roles/container-engine/docker/vars/debian.yml b/roles/container-engine/docker/vars/debian.yml
index d46bfa8b8f05aadd25ef43e6a546312c439a14a4..f42b001bb6d0c7acbc402bd7189aede6e841332c 100644
--- a/roles/container-engine/docker/vars/debian.yml
+++ b/roles/container-engine/docker/vars/debian.yml
@@ -16,19 +16,19 @@ containerd_versioned_pkg:
 # https://download.docker.com/linux/debian/
 docker_versioned_pkg:
   'latest': docker-ce
-  '18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }}
-  '19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }}
-  '20.10': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
-  'stable': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
-  'edge': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
+  '18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release | lower }}
+  '19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release | lower }}
+  '20.10': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }}
+  'stable': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }}
+  'edge': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }}
 
 docker_cli_versioned_pkg:
   'latest': docker-ce-cli
-  '18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }}
-  '19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }}
-  '20.10': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
-  'stable': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
-  'edge': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
+  '18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release | lower }}
+  '19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release | lower }}
+  '20.10': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }}
+  'stable': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }}
+  'edge': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }}
 
 docker_package_info:
   pkgs:
@@ -45,5 +45,5 @@ docker_repo_info:
   repos:
     - >
       deb {{ docker_debian_repo_base_url }}
-      {{ ansible_distribution_release|lower }}
+      {{ ansible_distribution_release | lower }}
       stable
diff --git a/roles/container-engine/docker/vars/ubuntu.yml b/roles/container-engine/docker/vars/ubuntu.yml
index cced07e11b24b63c1ff7275f6031ed654c6f7bde..4b9398d2622557eff988fa5ff0ddabaf7487caf1 100644
--- a/roles/container-engine/docker/vars/ubuntu.yml
+++ b/roles/container-engine/docker/vars/ubuntu.yml
@@ -16,19 +16,19 @@ containerd_versioned_pkg:
 # https://download.docker.com/linux/ubuntu/
 docker_versioned_pkg:
   'latest': docker-ce
-  '18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }}
-  '19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }}
-  '20.10': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
-  'stable': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
-  'edge': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
+  '18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release | lower }}
+  '19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release | lower }}
+  '20.10': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }}
+  'stable': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }}
+  'edge': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }}
 
 docker_cli_versioned_pkg:
   'latest': docker-ce-cli
-  '18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }}
-  '19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }}
-  '20.10': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
-  'stable': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
-  'edge': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
+  '18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release | lower }}
+  '19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release | lower }}
+  '20.10': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }}
+  'stable': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }}
+  'edge': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }}
 
 docker_package_info:
   pkgs:
@@ -45,5 +45,5 @@ docker_repo_info:
   repos:
     - >
       deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }}
-      {{ ansible_distribution_release|lower }}
+      {{ ansible_distribution_release | lower }}
       stable
diff --git a/roles/download/defaults/main/main.yml b/roles/download/defaults/main/main.yml
index 828d804577aaed6194087404ac847a33fac2a1aa..b3bc368ae8051ee5cd71595a58dab3bc5c22be95 100644
--- a/roles/download/defaults/main/main.yml
+++ b/roles/download/defaults/main/main.yml
@@ -70,10 +70,10 @@ image_pull_command_on_localhost: "{{ lookup('vars', image_command_tool_on_localh
 image_info_command_on_localhost: "{{ lookup('vars', image_command_tool_on_localhost + '_image_info_command') }}"
 
 # Arch of Docker images and needed packages
-image_arch: "{{host_architecture | default('amd64')}}"
+image_arch: "{{ host_architecture | default('amd64') }}"
 
 # Nerdctl insecure flag set
-nerdctl_extra_flags: '{%- if containerd_insecure_registries is defined and containerd_insecure_registries|length>0 -%}--insecure-registry{%- else -%}{%- endif -%}'
+nerdctl_extra_flags: '{%- if containerd_insecure_registries is defined and containerd_insecure_registries | length > 0 -%}--insecure-registry{%- else -%}{%- endif -%}'
 
 # Versions
 kubeadm_version: "{{ kube_version }}"
@@ -277,10 +277,10 @@ haproxy_image_tag: 2.6.6-alpine
 # Coredns version should be supported by corefile-migration (or at least work with)
 # bundle with kubeadm; if not 'basic' upgrade can sometimes fail
 
-coredns_version: "{{ 'v1.10.1' if (kube_version is version('v1.27.0','>=')) else 'v1.9.3' }}"
-coredns_image_is_namespaced: "{{ (coredns_version is version('v1.7.1','>=')) }}"
+coredns_version: "{{ 'v1.10.1' if (kube_version is version('v1.27.0', '>=')) else 'v1.9.3' }}"
+coredns_image_is_namespaced: "{{ (coredns_version is version('v1.7.1', '>=')) }}"
 
-coredns_image_repo: "{{ kube_image_repo }}{{'/coredns/coredns' if (coredns_image_is_namespaced | bool) else '/coredns' }}"
+coredns_image_repo: "{{ kube_image_repo }}{{ '/coredns/coredns' if (coredns_image_is_namespaced | bool) else '/coredns' }}"
 coredns_image_tag: "{{ coredns_version if (coredns_image_is_namespaced | bool) else (coredns_version | regex_replace('^v', '')) }}"
 
 nodelocaldns_version: "1.22.20"
@@ -389,7 +389,7 @@ downloads:
     container: true
     repo: "{{ netcheck_server_image_repo }}"
     tag: "{{ netcheck_server_image_tag }}"
-    sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
+    sha256: "{{ netcheck_server_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -398,7 +398,7 @@ downloads:
     container: true
     repo: "{{ netcheck_agent_image_repo }}"
     tag: "{{ netcheck_agent_image_tag }}"
-    sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
+    sha256: "{{ netcheck_agent_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -412,7 +412,7 @@ downloads:
     tag: "{{ etcd_image_tag }}"
     sha256: >-
       {{ etcd_binary_checksum if (etcd_deployment_type == 'host')
-      else etcd_digest_checksum|d(None) }}
+      else etcd_digest_checksum | d(None) }}
     url: "{{ etcd_download_url }}"
     unarchive: "{{ etcd_deployment_type == 'host' }}"
     owner: "root"
@@ -635,7 +635,7 @@ downloads:
     container: true
     repo: "{{ cilium_image_repo }}"
     tag: "{{ cilium_image_tag }}"
-    sha256: "{{ cilium_digest_checksum|default(None) }}"
+    sha256: "{{ cilium_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -644,7 +644,7 @@ downloads:
     container: true
     repo: "{{ cilium_operator_image_repo }}"
     tag: "{{ cilium_operator_image_tag }}"
-    sha256: "{{ cilium_operator_digest_checksum|default(None) }}"
+    sha256: "{{ cilium_operator_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -653,7 +653,7 @@ downloads:
     container: true
     repo: "{{ cilium_hubble_relay_image_repo }}"
     tag: "{{ cilium_hubble_relay_image_tag }}"
-    sha256: "{{ cilium_hubble_relay_digest_checksum|default(None) }}"
+    sha256: "{{ cilium_hubble_relay_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -662,7 +662,7 @@ downloads:
     container: true
     repo: "{{ cilium_hubble_certgen_image_repo }}"
     tag: "{{ cilium_hubble_certgen_image_tag }}"
-    sha256: "{{ cilium_hubble_certgen_digest_checksum|default(None) }}"
+    sha256: "{{ cilium_hubble_certgen_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -671,7 +671,7 @@ downloads:
     container: true
     repo: "{{ cilium_hubble_ui_image_repo }}"
     tag: "{{ cilium_hubble_ui_image_tag }}"
-    sha256: "{{ cilium_hubble_ui_digest_checksum|default(None) }}"
+    sha256: "{{ cilium_hubble_ui_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -680,7 +680,7 @@ downloads:
     container: true
     repo: "{{ cilium_hubble_ui_backend_image_repo }}"
     tag: "{{ cilium_hubble_ui_backend_image_tag }}"
-    sha256: "{{ cilium_hubble_ui_backend_digest_checksum|default(None) }}"
+    sha256: "{{ cilium_hubble_ui_backend_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -689,7 +689,7 @@ downloads:
     container: true
     repo: "{{ cilium_hubble_envoy_image_repo }}"
     tag: "{{ cilium_hubble_envoy_image_tag }}"
-    sha256: "{{ cilium_hubble_envoy_digest_checksum|default(None) }}"
+    sha256: "{{ cilium_hubble_envoy_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -711,7 +711,7 @@ downloads:
     container: true
     repo: "{{ multus_image_repo }}"
     tag: "{{ multus_image_tag }}"
-    sha256: "{{ multus_digest_checksum|default(None) }}"
+    sha256: "{{ multus_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -720,7 +720,7 @@ downloads:
     container: true
     repo: "{{ flannel_image_repo }}"
     tag: "{{ flannel_image_tag }}"
-    sha256: "{{ flannel_digest_checksum|default(None) }}"
+    sha256: "{{ flannel_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -729,7 +729,7 @@ downloads:
     container: true
     repo: "{{ flannel_init_image_repo }}"
     tag: "{{ flannel_init_image_tag }}"
-    sha256: "{{ flannel_init_digest_checksum|default(None) }}"
+    sha256: "{{ flannel_init_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -753,7 +753,7 @@ downloads:
     container: true
     repo: "{{ calico_node_image_repo }}"
     tag: "{{ calico_node_image_tag }}"
-    sha256: "{{ calico_node_digest_checksum|default(None) }}"
+    sha256: "{{ calico_node_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -762,7 +762,7 @@ downloads:
     container: true
     repo: "{{ calico_cni_image_repo }}"
     tag: "{{ calico_cni_image_tag }}"
-    sha256: "{{ calico_cni_digest_checksum|default(None) }}"
+    sha256: "{{ calico_cni_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -771,7 +771,7 @@ downloads:
     container: true
     repo: "{{ calico_flexvol_image_repo }}"
     tag: "{{ calico_flexvol_image_tag }}"
-    sha256: "{{ calico_flexvol_digest_checksum|default(None) }}"
+    sha256: "{{ calico_flexvol_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -780,7 +780,7 @@ downloads:
     container: true
     repo: "{{ calico_policy_image_repo }}"
     tag: "{{ calico_policy_image_tag }}"
-    sha256: "{{ calico_policy_digest_checksum|default(None) }}"
+    sha256: "{{ calico_policy_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -789,7 +789,7 @@ downloads:
     container: true
     repo: "{{ calico_typha_image_repo }}"
     tag: "{{ calico_typha_image_tag }}"
-    sha256: "{{ calico_typha_digest_checksum|default(None) }}"
+    sha256: "{{ calico_typha_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -798,7 +798,7 @@ downloads:
     container: true
     repo: "{{ calico_apiserver_image_repo }}"
     tag: "{{ calico_apiserver_image_tag }}"
-    sha256: "{{ calico_apiserver_digest_checksum|default(None) }}"
+    sha256: "{{ calico_apiserver_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -811,9 +811,9 @@ downloads:
     url: "{{ calico_crds_download_url }}"
     unarchive: true
     unarchive_extra_opts:
-    - "{{ '--strip=6' if (calico_version is version('v3.22.3','<')) else '--strip=3' }}"
+    - "{{ '--strip=6' if (calico_version is version('v3.22.3', '<')) else '--strip=3' }}"
     - "--wildcards"
-    - "{{ '*/_includes/charts/calico/crds/kdd/' if (calico_version is version('v3.22.3','<')) else '*/libcalico-go/config/crd/' }}"
+    - "{{ '*/_includes/charts/calico/crds/kdd/' if (calico_version is version('v3.22.3', '<')) else '*/libcalico-go/config/crd/' }}"
     owner: "root"
     mode: "0755"
     groups:
@@ -824,7 +824,7 @@ downloads:
     container: true
     repo: "{{ weave_kube_image_repo }}"
     tag: "{{ weave_kube_image_tag }}"
-    sha256: "{{ weave_kube_digest_checksum|default(None) }}"
+    sha256: "{{ weave_kube_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -833,7 +833,7 @@ downloads:
     container: true
     repo: "{{ weave_npc_image_repo }}"
     tag: "{{ weave_npc_image_tag }}"
-    sha256: "{{ weave_npc_digest_checksum|default(None) }}"
+    sha256: "{{ weave_npc_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -842,7 +842,7 @@ downloads:
     container: true
     repo: "{{ kube_ovn_container_image_repo }}"
     tag: "{{ kube_ovn_container_image_tag }}"
-    sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
+    sha256: "{{ kube_ovn_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -851,7 +851,7 @@ downloads:
     container: true
     repo: "{{ kube_router_image_repo }}"
     tag: "{{ kube_router_image_tag }}"
-    sha256: "{{ kube_router_digest_checksum|default(None) }}"
+    sha256: "{{ kube_router_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -860,7 +860,7 @@ downloads:
     container: true
     repo: "{{ pod_infra_image_repo }}"
     tag: "{{ pod_infra_image_tag }}"
-    sha256: "{{ pod_infra_digest_checksum|default(None) }}"
+    sha256: "{{ pod_infra_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -869,7 +869,7 @@ downloads:
     container: true
     repo: "{{ kube_vip_image_repo }}"
     tag: "{{ kube_vip_image_tag }}"
-    sha256: "{{ kube_vip_digest_checksum|default(None) }}"
+    sha256: "{{ kube_vip_digest_checksum | default(None) }}"
     groups:
     - kube_control_plane
 
@@ -878,7 +878,7 @@ downloads:
     container: true
     repo: "{{ nginx_image_repo }}"
     tag: "{{ nginx_image_tag }}"
-    sha256: "{{ nginx_digest_checksum|default(None) }}"
+    sha256: "{{ nginx_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -887,7 +887,7 @@ downloads:
     container: true
     repo: "{{ haproxy_image_repo }}"
     tag: "{{ haproxy_image_tag }}"
-    sha256: "{{ haproxy_digest_checksum|default(None) }}"
+    sha256: "{{ haproxy_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -896,7 +896,7 @@ downloads:
     container: true
     repo: "{{ coredns_image_repo }}"
     tag: "{{ coredns_image_tag }}"
-    sha256: "{{ coredns_digest_checksum|default(None) }}"
+    sha256: "{{ coredns_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -905,7 +905,7 @@ downloads:
     container: true
     repo: "{{ nodelocaldns_image_repo }}"
     tag: "{{ nodelocaldns_image_tag }}"
-    sha256: "{{ nodelocaldns_digest_checksum|default(None) }}"
+    sha256: "{{ nodelocaldns_digest_checksum | default(None) }}"
     groups:
     - k8s_cluster
 
@@ -914,7 +914,7 @@ downloads:
     container: true
     repo: "{{ dnsautoscaler_image_repo }}"
     tag: "{{ dnsautoscaler_image_tag }}"
-    sha256: "{{ dnsautoscaler_digest_checksum|default(None) }}"
+    sha256: "{{ dnsautoscaler_digest_checksum | default(None) }}"
     groups:
     - kube_control_plane
 
@@ -949,7 +949,7 @@ downloads:
     container: true
     repo: "{{ registry_image_repo }}"
     tag: "{{ registry_image_tag }}"
-    sha256: "{{ registry_digest_checksum|default(None) }}"
+    sha256: "{{ registry_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -958,7 +958,7 @@ downloads:
     container: true
     repo: "{{ metrics_server_image_repo }}"
     tag: "{{ metrics_server_image_tag }}"
-    sha256: "{{ metrics_server_digest_checksum|default(None) }}"
+    sha256: "{{ metrics_server_digest_checksum | default(None) }}"
     groups:
     - kube_control_plane
 
@@ -967,7 +967,7 @@ downloads:
     container: true
     repo: "{{ local_volume_provisioner_image_repo }}"
     tag: "{{ local_volume_provisioner_image_tag }}"
-    sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
+    sha256: "{{ local_volume_provisioner_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -976,7 +976,7 @@ downloads:
     container: true
     repo: "{{ cephfs_provisioner_image_repo }}"
     tag: "{{ cephfs_provisioner_image_tag }}"
-    sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}"
+    sha256: "{{ cephfs_provisioner_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -985,7 +985,7 @@ downloads:
     container: true
     repo: "{{ rbd_provisioner_image_repo }}"
     tag: "{{ rbd_provisioner_image_tag }}"
-    sha256: "{{ rbd_provisioner_digest_checksum|default(None) }}"
+    sha256: "{{ rbd_provisioner_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -994,7 +994,7 @@ downloads:
     container: true
     repo: "{{ local_path_provisioner_image_repo }}"
     tag: "{{ local_path_provisioner_image_tag }}"
-    sha256: "{{ local_path_provisioner_digest_checksum|default(None) }}"
+    sha256: "{{ local_path_provisioner_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1003,7 +1003,7 @@ downloads:
     container: true
     repo: "{{ ingress_nginx_controller_image_repo }}"
     tag: "{{ ingress_nginx_controller_image_tag }}"
-    sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
+    sha256: "{{ ingress_nginx_controller_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1012,7 +1012,7 @@ downloads:
     container: true
     repo: "{{ alb_ingress_image_repo }}"
     tag: "{{ alb_ingress_image_tag }}"
-    sha256: "{{ ingress_alb_controller_digest_checksum|default(None) }}"
+    sha256: "{{ ingress_alb_controller_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1021,7 +1021,7 @@ downloads:
     container: true
     repo: "{{ cert_manager_controller_image_repo }}"
     tag: "{{ cert_manager_controller_image_tag }}"
-    sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
+    sha256: "{{ cert_manager_controller_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1030,7 +1030,7 @@ downloads:
     container: true
     repo: "{{ cert_manager_cainjector_image_repo }}"
     tag: "{{ cert_manager_cainjector_image_tag }}"
-    sha256: "{{ cert_manager_cainjector_digest_checksum|default(None) }}"
+    sha256: "{{ cert_manager_cainjector_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1039,7 +1039,7 @@ downloads:
     container: true
     repo: "{{ cert_manager_webhook_image_repo }}"
     tag: "{{ cert_manager_webhook_image_tag }}"
-    sha256: "{{ cert_manager_webhook_digest_checksum|default(None) }}"
+    sha256: "{{ cert_manager_webhook_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1048,7 +1048,7 @@ downloads:
     container: true
     repo: "{{ csi_attacher_image_repo }}"
     tag: "{{ csi_attacher_image_tag }}"
-    sha256: "{{ csi_attacher_digest_checksum|default(None) }}"
+    sha256: "{{ csi_attacher_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1057,7 +1057,7 @@ downloads:
     container: true
     repo: "{{ csi_provisioner_image_repo }}"
     tag: "{{ csi_provisioner_image_tag }}"
-    sha256: "{{ csi_provisioner_digest_checksum|default(None) }}"
+    sha256: "{{ csi_provisioner_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1066,7 +1066,7 @@ downloads:
     container: true
     repo: "{{ csi_snapshotter_image_repo }}"
     tag: "{{ csi_snapshotter_image_tag }}"
-    sha256: "{{ csi_snapshotter_digest_checksum|default(None) }}"
+    sha256: "{{ csi_snapshotter_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1075,7 +1075,7 @@ downloads:
     container: true
     repo: "{{ snapshot_controller_image_repo }}"
     tag: "{{ snapshot_controller_image_tag }}"
-    sha256: "{{ snapshot_controller_digest_checksum|default(None) }}"
+    sha256: "{{ snapshot_controller_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1084,7 +1084,7 @@ downloads:
     container: true
     repo: "{{ csi_resizer_image_repo }}"
     tag: "{{ csi_resizer_image_tag }}"
-    sha256: "{{ csi_resizer_digest_checksum|default(None) }}"
+    sha256: "{{ csi_resizer_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1093,7 +1093,7 @@ downloads:
     container: true
     repo: "{{ csi_node_driver_registrar_image_repo }}"
     tag: "{{ csi_node_driver_registrar_image_tag }}"
-    sha256: "{{ csi_node_driver_registrar_digest_checksum|default(None) }}"
+    sha256: "{{ csi_node_driver_registrar_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1102,7 +1102,7 @@ downloads:
     container: true
     repo: "{{ cinder_csi_plugin_image_repo }}"
     tag: "{{ cinder_csi_plugin_image_tag }}"
-    sha256: "{{ cinder_csi_plugin_digest_checksum|default(None) }}"
+    sha256: "{{ cinder_csi_plugin_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1111,7 +1111,7 @@ downloads:
     container: true
     repo: "{{ aws_ebs_csi_plugin_image_repo }}"
     tag: "{{ aws_ebs_csi_plugin_image_tag }}"
-    sha256: "{{ aws_ebs_csi_plugin_digest_checksum|default(None) }}"
+    sha256: "{{ aws_ebs_csi_plugin_digest_checksum | default(None) }}"
     groups:
     - kube_node
 
@@ -1120,7 +1120,7 @@ downloads:
     container: true
     repo: "{{ dashboard_image_repo }}"
     tag: "{{ dashboard_image_tag }}"
-    sha256: "{{ dashboard_digest_checksum|default(None) }}"
+    sha256: "{{ dashboard_digest_checksum | default(None) }}"
     groups:
     - kube_control_plane
 
@@ -1129,7 +1129,7 @@ downloads:
     container: true
     repo: "{{ dashboard_metrics_scraper_repo }}"
     tag: "{{ dashboard_metrics_scraper_tag }}"
-    sha256: "{{ dashboard_digest_checksum|default(None) }}"
+    sha256: "{{ dashboard_digest_checksum | default(None) }}"
     groups:
     - kube_control_plane
 
@@ -1138,7 +1138,7 @@ downloads:
     container: true
     repo: "{{ metallb_speaker_image_repo }}"
     tag: "{{ metallb_version }}"
-    sha256: "{{ metallb_speaker_digest_checksum|default(None) }}"
+    sha256: "{{ metallb_speaker_digest_checksum | default(None) }}"
     groups:
     - kube_control_plane
 
@@ -1147,7 +1147,7 @@ downloads:
     container: true
     repo: "{{ metallb_controller_image_repo }}"
     tag: "{{ metallb_version }}"
-    sha256: "{{ metallb_controller_digest_checksum|default(None) }}"
+    sha256: "{{ metallb_controller_digest_checksum | default(None) }}"
     groups:
     - kube_control_plane
 
@@ -1156,7 +1156,7 @@ downloads:
     file: true
     version: "{{ yq_version }}"
     dest: "{{ local_release_dir }}/yq-{{ yq_version }}-{{ image_arch }}"
-    sha256: "{{ yq_binary_checksum|default(None) }}"
+    sha256: "{{ yq_binary_checksum | default(None) }}"
     url: "{{ yq_download_url }}"
     unarchive: false
     owner: "root"
diff --git a/roles/download/tasks/check_pull_required.yml b/roles/download/tasks/check_pull_required.yml
index 449589b4ce967ddeedbde0ee56706df8b21ed47b..c0681a7ec270077d7dbd58faa529e39ca4fd7924 100644
--- a/roles/download/tasks/check_pull_required.yml
+++ b/roles/download/tasks/check_pull_required.yml
@@ -11,7 +11,7 @@
 - name: check_pull_required | Set pull_required if the desired image is not yet loaded
   set_fact:
     pull_required: >-
-      {%- if image_reponame | regex_replace('^docker\.io/(library/)?','') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%}
+      {%- if image_reponame | regex_replace('^docker\.io/(library/)?', '') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%}
   when: not download_always_pull
 
 - name: check_pull_required | Check that the local digest sha256 corresponds to the given image tag
diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml
index fba76405e8cabf7c1d5bdee431f8b5602ae0698b..0db1eec68ccb574ab170c48840c9b9d935184b53 100644
--- a/roles/download/tasks/download_file.yml
+++ b/roles/download/tasks/download_file.yml
@@ -68,7 +68,7 @@
     retries: "{{ download_retries }}"
     delay: "{{ retry_stagger | default(5) }}"
     environment: "{{ proxy_env }}"
-    no_log: "{{ not (unsafe_show_logs|bool) }}"
+    no_log: "{{ not (unsafe_show_logs | bool) }}"
     loop: "{{ download.mirrors | default([download.url]) }}"
     loop_control:
       loop_var: mirror
@@ -102,7 +102,7 @@
     retries: "{{ download_retries }}"
     delay: "{{ retry_stagger | default(5) }}"
     environment: "{{ proxy_env }}"
-    no_log: "{{ not (unsafe_show_logs|bool) }}"
+    no_log: "{{ not (unsafe_show_logs | bool) }}"
 
   - name: download_file | Copy file back to ansible host file cache
     ansible.posix.synchronize:
diff --git a/roles/download/tasks/extract_file.yml b/roles/download/tasks/extract_file.yml
index 81858dd3a03926173ed482aa32746004b5103236..94f240edb7bc6b66aff089203774fd58e9b0d768 100644
--- a/roles/download/tasks/extract_file.yml
+++ b/roles/download/tasks/extract_file.yml
@@ -6,6 +6,6 @@
     owner: "{{ download.owner | default(omit) }}"
     mode: "{{ download.mode | default(omit) }}"
     copy: no
-    extra_opts: "{{ download.unarchive_extra_opts|default(omit) }}"
+    extra_opts: "{{ download.unarchive_extra_opts | default(omit) }}"
   when:
     - download.unarchive | default(false)
diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml
index 536c293a7c22a7c3a65448c5403f25f7d9074687..92313a58ac00c173731d522c6f7139c08cb28671 100644
--- a/roles/download/tasks/main.yml
+++ b/roles/download/tasks/main.yml
@@ -2,7 +2,7 @@
 - name: download | Prepare working directories and variables
   import_tasks: prep_download.yml
   when:
-    - not skip_downloads|default(false)
+    - not skip_downloads | default(false)
   tags:
     - download
     - upload
@@ -10,7 +10,7 @@
 - name: download | Get kubeadm binary and list of required images
   include_tasks: prep_kubeadm_images.yml
   when:
-    - not skip_downloads|default(false)
+    - not skip_downloads | default(false)
     - inventory_hostname in groups['kube_control_plane']
   tags:
     - download
diff --git a/roles/download/tasks/prep_download.yml b/roles/download/tasks/prep_download.yml
index 587810d48159548427f668165a44bac7c1c1ecff..0554d1b295bac9d4bf9cca8b44902d9f37c54880 100644
--- a/roles/download/tasks/prep_download.yml
+++ b/roles/download/tasks/prep_download.yml
@@ -58,7 +58,7 @@
 
 - name: prep_download | Register docker images info
   shell: "{{ image_info_command }}"  # noqa command-instead-of-shell - image_info_command contains pipe therefore requires shell
-  no_log: "{{ not (unsafe_show_logs|bool) }}"
+  no_log: "{{ not (unsafe_show_logs | bool) }}"
   register: docker_images
   failed_when: false
   changed_when: false
diff --git a/roles/download/tasks/prep_kubeadm_images.yml b/roles/download/tasks/prep_kubeadm_images.yml
index 2ab216a761b829f87cf8b4c9c3cfbf6869588c92..e1dc3af4c0c9810605ba4884d11f268aa752e85d 100644
--- a/roles/download/tasks/prep_kubeadm_images.yml
+++ b/roles/download/tasks/prep_kubeadm_images.yml
@@ -20,7 +20,7 @@
     dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
     mode: 0644
   when:
-    - not skip_kubeadm_images|default(false)
+    - not skip_kubeadm_images | default(false)
 
 - name: prep_kubeadm_images | Copy kubeadm binary from download dir to system path
   copy:
@@ -36,36 +36,36 @@
     state: file
 
 - name: prep_kubeadm_images | Generate list of required images
-  shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'"
+  shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns | pause'"
   args:
     executable: /bin/bash
   register: kubeadm_images_raw
   run_once: true
   changed_when: false
   when:
-    - not skip_kubeadm_images|default(false)
+    - not skip_kubeadm_images | default(false)
 
 - name: prep_kubeadm_images | Parse list of images
   vars:
     kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}"
   set_fact:
     kubeadm_image:
-      key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*','')).split(':')[0] }}"
+      key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*', '')).split(':')[0] }}"
       value:
         enabled: true
         container: true
-        repo: "{{ item | regex_replace('^(.*):.*$','\\1') }}"
-        tag: "{{ item | regex_replace('^.*:(.*)$','\\1') }}"
+        repo: "{{ item | regex_replace('^(.*):.*$', '\\1') }}"
+        tag: "{{ item | regex_replace('^.*:(.*)$', '\\1') }}"
         groups: k8s_cluster
   loop: "{{ kubeadm_images_list | flatten(levels=1) }}"
   register: kubeadm_images_cooked
   run_once: true
   when:
-    - not skip_kubeadm_images|default(false)
+    - not skip_kubeadm_images | default(false)
 
 - name: prep_kubeadm_images | Convert list of images to dict for later use
   set_fact:
     kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}"
   run_once: true
   when:
-    - not skip_kubeadm_images|default(false)
+    - not skip_kubeadm_images | default(false)
diff --git a/roles/download/templates/kubeadm-images.yaml.j2 b/roles/download/templates/kubeadm-images.yaml.j2
index 3a9121defdb9d4289fe2bab129e93ab56037f077..36154b31a886ae6b260718c564407c386da9f2a0 100644
--- a/roles/download/templates/kubeadm-images.yaml.j2
+++ b/roles/download/templates/kubeadm-images.yaml.j2
@@ -21,5 +21,5 @@ etcd:
 {% endif %}
 dns:
   type: CoreDNS
-  imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$','') }}
+  imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$', '') }}
   imageTag: {{ coredns_image_tag }}
diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml
index b67c6b2c97495885bb5bd3f64a82384905d38838..2cb802d4e965c6cf5e4e85780071a20490608355 100644
--- a/roles/etcd/tasks/check_certs.yml
+++ b/roles/etcd/tasks/check_certs.yml
@@ -42,7 +42,7 @@
 - name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(1/2)"
   set_fact:
     gen_certs: true
-  when: force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list
+  when: force_etcd_cert_refresh or not item in etcdcert_master.files | map(attribute='path') | list
   run_once: true
   with_items: "{{ expected_files }}"
   vars:
@@ -59,7 +59,7 @@
       {% for host in k8s_nodes %}
         '{{ etcd_cert_dir }}/node-{{ host }}.pem',
         '{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
-        {% if not loop.last %}{{','}}{% endif %}
+        {% if not loop.last %}{{ ',' }}{% endif %}
       {% endfor %}]
 
 - name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(2/2)"
@@ -77,28 +77,29 @@
         '{{ etcd_cert_dir }}/member-{{ host }}.pem',
         '{{ etcd_cert_dir }}/member-{{ host }}-key.pem',
       {% endfor %}
-      {% set k8s_nodes = groups['k8s_cluster']|unique|sort %}
+      {% set k8s_nodes = groups['k8s_cluster'] | unique | sort %}
       {% for host in k8s_nodes %}
         '{{ etcd_cert_dir }}/node-{{ host }}.pem',
         '{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
-        {% if not loop.last %}{{','}}{% endif %}
+        {% if not loop.last %}{{ ',' }}{% endif %}
       {% endfor %}]
   when:
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
-    - force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list
+    - force_etcd_cert_refresh or not item in etcdcert_master.files | map(attribute='path') | list
 
 - name: "Check_certs | Set 'gen_master_certs' object to track whether member and admin certs exist on first etcd node"
   set_fact:
+    # noqa: jinja[spacing]
     gen_master_certs: |-
       {
       {% set etcd_members = groups['etcd'] -%}
-      {% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %}
+      {% set existing_certs = etcdcert_master.files | map(attribute='path') | list | sort %}
       {% for host in etcd_members -%}
-        {% set member_cert = "%s/member-%s.pem"|format(etcd_cert_dir, host) %}
-        {% set member_key = "%s/member-%s-key.pem"|format(etcd_cert_dir, host) %}
-        {% set admin_cert = "%s/admin-%s.pem"|format(etcd_cert_dir, host) %}
-        {% set admin_key = "%s/admin-%s-key.pem"|format(etcd_cert_dir, host) %}
+        {% set member_cert = "%s/member-%s.pem" | format(etcd_cert_dir, host) %}
+        {% set member_key = "%s/member-%s-key.pem" | format(etcd_cert_dir, host) %}
+        {% set admin_cert = "%s/admin-%s.pem" | format(etcd_cert_dir, host) %}
+        {% set admin_key = "%s/admin-%s-key.pem" | format(etcd_cert_dir, host) %}
         {% if force_etcd_cert_refresh -%}
         "{{ host }}": True,
         {% elif member_cert in existing_certs and member_key in existing_certs and admin_cert in existing_certs and admin_key in existing_certs  -%}
@@ -112,13 +113,14 @@
 
 - name: "Check_certs | Set 'gen_node_certs' object to track whether node certs exist on first etcd node"
   set_fact:
+    # noqa: jinja[spacing]
     gen_node_certs: |-
       {
       {% set k8s_nodes = groups['k8s_cluster'] -%}
-      {% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %}
+      {% set existing_certs = etcdcert_master.files | map(attribute='path') | list | sort %}
       {% for host in k8s_nodes -%}
-        {% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %}
-        {% set host_key = "%s/node-%s-key.pem"|format(etcd_cert_dir, host) %}
+        {% set host_cert = "%s/node-%s.pem" | format(etcd_cert_dir, host) %}
+        {% set host_key = "%s/node-%s-key.pem" | format(etcd_cert_dir, host) %}
         {% if force_etcd_cert_refresh -%}
         "{{ host }}": True,
         {% elif host_cert in existing_certs and host_key in existing_certs -%}
@@ -135,16 +137,16 @@
     etcd_member_requires_sync: true
   when:
     - inventory_hostname in groups['etcd']
-    - (not etcd_member_certs.results[0].stat.exists|default(false)) or
-      (not etcd_member_certs.results[1].stat.exists|default(false)) or
-      (not etcd_member_certs.results[2].stat.exists|default(false)) or
-      (not etcd_member_certs.results[3].stat.exists|default(false)) or
-      (not etcd_member_certs.results[4].stat.exists|default(false)) or
-      (etcd_member_certs.results[0].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[0].stat.path)|map(attribute="checksum")|first|default('')) or
-      (etcd_member_certs.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[1].stat.path)|map(attribute="checksum")|first|default('')) or
-      (etcd_member_certs.results[2].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[2].stat.path)|map(attribute="checksum")|first|default('')) or
-      (etcd_member_certs.results[3].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[3].stat.path)|map(attribute="checksum")|first|default('')) or
-      (etcd_member_certs.results[4].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[4].stat.path)|map(attribute="checksum")|first|default(''))
+    - (not etcd_member_certs.results[0].stat.exists | default(false)) or
+      (not etcd_member_certs.results[1].stat.exists | default(false)) or
+      (not etcd_member_certs.results[2].stat.exists | default(false)) or
+      (not etcd_member_certs.results[3].stat.exists | default(false)) or
+      (not etcd_member_certs.results[4].stat.exists | default(false)) or
+      (etcd_member_certs.results[0].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[0].stat.path) | map(attribute="checksum") | first | default('')) or
+      (etcd_member_certs.results[1].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[1].stat.path) | map(attribute="checksum") | first | default('')) or
+      (etcd_member_certs.results[2].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[2].stat.path) | map(attribute="checksum") | first | default('')) or
+      (etcd_member_certs.results[3].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[3].stat.path) | map(attribute="checksum") | first | default('')) or
+      (etcd_member_certs.results[4].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[4].stat.path) | map(attribute="checksum") | first | default(''))
 
 - name: "Check_certs | Set 'kubernetes_host_requires_sync' to true if ca or node cert and key don't exist on kubernetes host or checksum doesn't match"
   set_fact:
@@ -152,18 +154,18 @@
   when:
     - inventory_hostname in groups['k8s_cluster'] and
       inventory_hostname not in groups['etcd']
-    - (not etcd_node_certs.results[0].stat.exists|default(false)) or
-      (not etcd_node_certs.results[1].stat.exists|default(false)) or
-      (not etcd_node_certs.results[2].stat.exists|default(false)) or
-      (etcd_node_certs.results[0].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[0].stat.path)|map(attribute="checksum")|first|default('')) or
-      (etcd_node_certs.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[1].stat.path)|map(attribute="checksum")|first|default('')) or
-      (etcd_node_certs.results[2].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[2].stat.path)|map(attribute="checksum")|first|default(''))
+    - (not etcd_node_certs.results[0].stat.exists | default(false)) or
+      (not etcd_node_certs.results[1].stat.exists | default(false)) or
+      (not etcd_node_certs.results[2].stat.exists | default(false)) or
+      (etcd_node_certs.results[0].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_node_certs.results[0].stat.path) | map(attribute="checksum") | first | default('')) or
+      (etcd_node_certs.results[1].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_node_certs.results[1].stat.path) | map(attribute="checksum") | first | default('')) or
+      (etcd_node_certs.results[2].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_node_certs.results[2].stat.path) | map(attribute="checksum") | first | default(''))
 
 - name: "Check_certs | Set 'sync_certs' to true"
   set_fact:
     sync_certs: true
   when:
-    - etcd_member_requires_sync|default(false) or
-      kubernetes_host_requires_sync|default(false) or
+    - etcd_member_requires_sync | default(false) or
+      kubernetes_host_requires_sync | default(false) or
       (inventory_hostname in gen_master_certs and gen_master_certs[inventory_hostname]) or
       (inventory_hostname in gen_node_certs and gen_node_certs[inventory_hostname])
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index 7beda4d7895698496f3feacb4256b4a8b6befa55..cd66de7ebeaa4d731abf8fcde1c18e356a36067d 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -25,7 +25,7 @@
   run_once: yes
   delegate_to: "{{ groups['etcd'][0] }}"
   when:
-    - gen_certs|default(false)
+    - gen_certs | default(false)
     - inventory_hostname == groups['etcd'][0]
 
 - name: Gen_certs | copy certs generation script
@@ -35,7 +35,7 @@
     mode: 0700
   run_once: yes
   when:
-    - gen_certs|default(false)
+    - gen_certs | default(false)
     - inventory_hostname == groups['etcd'][0]
 
 - name: Gen_certs | run cert generation script for etcd and kube control plane nodes
@@ -55,7 +55,7 @@
       {% endfor %}
   run_once: yes
   delegate_to: "{{ groups['etcd'][0] }}"
-  when: gen_certs|default(false)
+  when: gen_certs | default(false)
   notify: set etcd_secret_changed
 
 - name: Gen_certs | run cert generation script for all clients
@@ -72,7 +72,7 @@
   when:
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
-    - gen_certs|default(false)
+    - gen_certs | default(false)
   notify: set etcd_secret_changed
 
 - name: Gen_certs | Gather etcd member/admin and kube_control_plane client certs from first etcd node
@@ -95,7 +95,7 @@
   delegate_to: "{{ groups['etcd'][0] }}"
   when:
     - inventory_hostname in groups['etcd']
-    - sync_certs|default(false)
+    - sync_certs | default(false)
     - inventory_hostname != groups['etcd'][0]
   notify: set etcd_secret_changed
 
@@ -109,7 +109,7 @@
   with_items: "{{ etcd_master_certs.results }}"
   when:
     - inventory_hostname in groups['etcd']
-    - sync_certs|default(false)
+    - sync_certs | default(false)
     - inventory_hostname != groups['etcd'][0]
   loop_control:
     label: "{{ item.item }}"
@@ -150,14 +150,14 @@
 - include_tasks: gen_nodes_certs_script.yml
   when:
     - inventory_hostname in groups['kube_control_plane'] and
-        sync_certs|default(false) and inventory_hostname not in groups['etcd']
+        sync_certs | default(false) and inventory_hostname not in groups['etcd']
 
 - include_tasks: gen_nodes_certs_script.yml
   when:
     - kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
     - kube_network_plugin != "calico" or calico_datastore == "etcd"
     - inventory_hostname in groups['k8s_cluster'] and
-        sync_certs|default(false) and inventory_hostname not in groups['etcd']
+        sync_certs | default(false) and inventory_hostname not in groups['etcd']
 
 - name: Gen_certs | check certificate permissions
   file:
diff --git a/roles/etcd/tasks/gen_nodes_certs_script.yml b/roles/etcd/tasks/gen_nodes_certs_script.yml
index 73e64c29f9910387b13e2323b8f96ecea07ff03f..a7b31db26ac3d2f64db84f685601aeb013b5e952 100644
--- a/roles/etcd/tasks/gen_nodes_certs_script.yml
+++ b/roles/etcd/tasks/gen_nodes_certs_script.yml
@@ -14,18 +14,18 @@
     - "{{ my_etcd_node_certs }}"
 
 - name: Gen_certs | Gather node certs
-  shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0"
+  shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs | join(' ') }} | base64 --wrap=0"
   args:
     executable: /bin/bash
-  no_log: "{{ not (unsafe_show_logs|bool) }}"
+  no_log: "{{ not (unsafe_show_logs | bool) }}"
   register: etcd_node_certs
   check_mode: no
   delegate_to: "{{ groups['etcd'][0] }}"
   changed_when: false
 
 - name: Gen_certs | Copy certs on nodes
-  shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
+  shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout | quote }}' | tar xz -C {{ etcd_cert_dir }}"
   args:
     executable: /bin/bash
-  no_log: "{{ not (unsafe_show_logs|bool) }}"
+  no_log: "{{ not (unsafe_show_logs | bool) }}"
   changed_when: false
diff --git a/roles/etcd/tasks/install_docker.yml b/roles/etcd/tasks/install_docker.yml
index 025a0ba17736767bfb09675cfb1c18b3fb59e946..4c0923b6ee24ba9a3877b8c894e0fb45a0f1dec4 100644
--- a/roles/etcd/tasks/install_docker.yml
+++ b/roles/etcd/tasks/install_docker.yml
@@ -17,14 +17,14 @@
   notify: restart etcd
   when:
     - etcd_cluster_setup
-    - etcd_image_tag not in etcd_current_docker_image.stdout|default('')
+    - etcd_image_tag not in etcd_current_docker_image.stdout | default('')
 
 - name: Restart etcd-events if necessary
   command: /bin/true
   notify: restart etcd-events
   when:
     - etcd_events_cluster_setup
-    - etcd_image_tag not in etcd_events_current_docker_image.stdout|default('')
+    - etcd_image_tag not in etcd_events_current_docker_image.stdout | default('')
 
 - name: Install etcd launch script
   template:
diff --git a/roles/etcd/tasks/install_host.yml b/roles/etcd/tasks/install_host.yml
index 14a75b48b857e1e93c429f72de5abfa9be56ee41..6abea352bc58693dfd9953af0422cfd2c72d3a9a 100644
--- a/roles/etcd/tasks/install_host.yml
+++ b/roles/etcd/tasks/install_host.yml
@@ -11,14 +11,14 @@
   notify: restart etcd
   when:
     - etcd_cluster_setup
-    - etcd_version.lstrip('v') not in etcd_current_host_version.stdout|default('')
+    - etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
 
 - name: Restart etcd-events if necessary
   command: /bin/true
   notify: restart etcd-events
   when:
     - etcd_events_cluster_setup
-    - etcd_version.lstrip('v') not in etcd_current_host_version.stdout|default('')
+    - etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
 
 - name: install | Download etcd and etcdctl
   include_tasks: "../../download/tasks/download_file.yml"
diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml
index cfd0a33b01022a0c6fb263d0a7683f0696c9ccdc..205549badb30195d97b5719fe4c64497b6db5dea 100644
--- a/roles/etcd/tasks/join_etcd-events_member.yml
+++ b/roles/etcd/tasks/join_etcd-events_member.yml
@@ -14,10 +14,11 @@
 
 - include_tasks: refresh_config.yml
   vars:
+    # noqa: jinja[spacing]
     etcd_events_peer_addresses: >-
       {% for host in groups['etcd'] -%}
         {%- if hostvars[host]['etcd_events_member_in_cluster'].rc == 0 -%}
-          {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2382,
+          {{ "etcd" + loop.index | string }}=https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2382,
         {%- endif -%}
         {%- if loop.last -%}
           {{ etcd_member_name }}={{ etcd_events_peer_url }}
diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml
index 1cc2abf4f7fde4b235a09a7cf151b4e051322bf2..b3d8e013c7f8e2935d2cae8ad51e3f0894e9db65 100644
--- a/roles/etcd/tasks/join_etcd_member.yml
+++ b/roles/etcd/tasks/join_etcd_member.yml
@@ -15,10 +15,11 @@
 
 - include_tasks: refresh_config.yml
   vars:
+    # noqa: jinja[spacing]
     etcd_peer_addresses: >-
       {% for host in groups['etcd'] -%}
         {%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%}
-          {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].etcd_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2380,
+          {{ "etcd" + loop.index | string }}=https://{{ hostvars[host].etcd_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2380,
         {%- endif -%}
         {%- if loop.last -%}
           {{ etcd_member_name }}={{ etcd_peer_url }}
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 432d5e20debaf2d5fa395b4a7c779a2addea5629..f3d304bb8860653ce0c0767088b4179770c9a26d 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -7,13 +7,13 @@
 
 - include_tasks: "gen_certs_script.yml"
   when:
-    - cert_management |d('script') == "script"
+    - cert_management | d('script') == "script"
   tags:
     - etcd-secrets
 
 - include_tasks: upd_ca_trust.yml
   when:
-    - inventory_hostname in groups['etcd']|union(groups['kube_control_plane'])|unique|sort
+    - inventory_hostname in groups['etcd'] | union(groups['kube_control_plane']) | unique | sort
   tags:
     - etcd-secrets
 
@@ -63,12 +63,12 @@
 - name: Restart etcd if certs changed
   command: /bin/true
   notify: restart etcd
-  when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed|default(false)
+  when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed | default(false)
 
 - name: Restart etcd-events if certs changed
   command: /bin/true
   notify: restart etcd
-  when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed|default(false)
+  when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed | default(false)
 
 # After etcd cluster is assembled, make sure that
 # initial state of the cluster is in `existing`
diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml
index e743037f9943de5d45c1d58aa8eb5eb22bdc75a2..4de1fe916f686d4969f83e9918ffff693c3d427a 100644
--- a/roles/kubernetes-apps/ansible/defaults/main.yml
+++ b/roles/kubernetes-apps/ansible/defaults/main.yml
@@ -6,10 +6,10 @@
 dns_memory_limit: 300Mi
 dns_cpu_requests: 100m
 dns_memory_requests: 70Mi
-dns_min_replicas: "{{ [ 2, groups['k8s_cluster'] | length ] | min }}"
+dns_min_replicas: "{{ [2, groups['k8s_cluster'] | length] | min }}"
 dns_nodes_per_replica: 16
 dns_cores_per_replica: 256
-dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas|int > 1 else 'false' }}"
+dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas | int > 1 else 'false' }}"
 enable_coredns_reverse_dns_lookups: true
 coredns_ordinal_suffix: ""
 # dns_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]
diff --git a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml
index b94509f4557428dc6b68fb81dc4f3aa1395aac0c..b438afb88c4f968c99c2116ed6499791a2ced40b 100644
--- a/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml
+++ b/roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml
@@ -1,6 +1,7 @@
 ---
 - name: Kubernetes Apps | set up necessary nodelocaldns parameters
   set_fact:
+    # noqa: jinja[spacing]
     primaryClusterIP: >-
       {%- if dns_mode in ['coredns', 'coredns_dual'] -%}
       {{ skydns_server }}
@@ -26,6 +27,7 @@
     - { name: nodelocaldns, file: nodelocaldns-daemonset.yml, type: daemonset }
   register: nodelocaldns_manifests
   vars:
+    # noqa: jinja[spacing]
     forwardTarget: >-
       {%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%}
       {{ primaryClusterIP }} {{ secondaryclusterIP }}
@@ -33,8 +35,8 @@
       {{ primaryClusterIP }}
       {%- endif -%}
     upstreamForwardTarget: >-
-      {%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
-      {{ upstream_dns_servers|join(' ') }}
+      {%- if upstream_dns_servers is defined and upstream_dns_servers | length > 0 -%}
+      {{ upstream_dns_servers | join(' ') }}
       {%- else -%}
       /etc/resolv.conf
       {%- endif -%}
@@ -54,15 +56,17 @@
     - { name: nodelocaldns, file: nodelocaldns-second-daemonset.yml, type: daemonset }
   register: nodelocaldns_second_manifests
   vars:
+    # noqa: jinja[spacing]
     forwardTarget: >-
       {%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%}
       {{ primaryClusterIP }} {{ secondaryclusterIP }}
       {%- else -%}
       {{ primaryClusterIP }}
       {%- endif -%}
+    # noqa: jinja[spacing]
     upstreamForwardTarget: >-
-      {%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
-      {{ upstream_dns_servers|join(' ') }}
+      {%- if upstream_dns_servers is defined and upstream_dns_servers | length > 0 -%}
+      {{ upstream_dns_servers | join(' ') }}
       {%- else -%}
       /etc/resolv.conf
       {%- endif -%}
diff --git a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
index 0373780428da36fcd4eab4f157d216b90cab8846..7a06023e84b37982ecd4501b16fa0510f3989287 100644
--- a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
@@ -8,12 +8,12 @@ metadata:
       addonmanager.kubernetes.io/mode: EnsureExists
 data:
   Corefile: |
-{% if coredns_external_zones is defined and coredns_external_zones|length > 0 %}
+{% if coredns_external_zones is defined and coredns_external_zones | length > 0 %}
 {%   for block in coredns_external_zones %}
     {{ block['zones'] | join(' ') }} {
         log
         errors
-{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
+{% if block['rewrite'] is defined and block['rewrite'] | length > 0 %}
 {% for rewrite_match in block['rewrite'] %}
         rewrite {{ rewrite_match }}
 {% endfor %}
@@ -57,7 +57,7 @@ data:
 {% endif %}
         }
         prometheus :9153
-        forward . {{ upstream_dns_servers|join(' ') if upstream_dns_servers is defined and upstream_dns_servers|length > 0 else '/etc/resolv.conf' }} {
+        forward . {{ upstream_dns_servers | join(' ') if upstream_dns_servers is defined and upstream_dns_servers | length > 0 else '/etc/resolv.conf' }} {
           prefer_udp
           max_concurrent 1000
 {% if dns_upstream_forward_extra_opts is defined %}
diff --git a/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2
index 9704155bb2dd73ec73f4c665e9f414bc156f60f1..c085405342f44d2b8dd46346233dabf11e96c876 100644
--- a/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2
@@ -32,7 +32,7 @@ spec:
       annotations:
     spec:
       nodeSelector:
-        {{ dns_autoscaler_deployment_nodeselector}}
+        {{ dns_autoscaler_deployment_nodeselector }}
       priorityClassName: system-cluster-critical
       securityContext:
         seccompProfile:
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
index 47dbf70a3c0d4e47a4040486d03694a45222e483..40dd199e006ee07c70bd3bfcfe9751e34364413a 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
@@ -15,7 +15,7 @@ spec:
       labels:
         app: netchecker-agent
     spec:
-      priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
+      priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
       tolerations:
         - effect: NoSchedule
           operator: Exists
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
index 8b2e51a42146aaa2d36df6c817d9b2eef5b89e30..50e2793353ad4aaa0c14c085b11ded08db729243 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
@@ -19,7 +19,7 @@ spec:
       dnsPolicy: ClusterFirstWithHostNet
       nodeSelector:
         kubernetes.io/os: linux
-      priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
+      priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
       tolerations:
         - effect: NoSchedule
           operator: Exists
diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
index edda5c5b2ce3a4906bd680f356ea5d988d6409f3..02fd6b6809e54da58fb2806dc9756766476c82ea 100644
--- a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2
@@ -16,7 +16,7 @@ spec:
       labels:
         app: netchecker-server
     spec:
-      priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
+      priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
       volumes:
         - name: etcd-data
           emptyDir: {}
diff --git a/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2
index 231c8bac1600e2ac77e42875239a97ce03ce3905..b15ea89e996ed2238aebe69c560fddebee4b583e 100644
--- a/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2
+++ b/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2
@@ -8,13 +8,13 @@ metadata:
 
 data:
   Corefile: |
-{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %}
+{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones | length > 0 %}
 {% for block in nodelocaldns_external_zones %}
     {{ block['zones'] | join(' ') }} {
         errors
         cache {{ block['cache'] | default(30) }}
         reload
-{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
+{% if block['rewrite'] is defined and block['rewrite'] | length > 0 %}
 {% for rewrite_match in block['rewrite'] %}
         rewrite {{ rewrite_match }}
 {% endfor %}
@@ -95,7 +95,7 @@ data:
     }
 {% if enable_nodelocaldns_secondary %}
   Corefile-second: |
-{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %}
+{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones | length > 0 %}
 {% for block in nodelocaldns_external_zones %}
     {{ block['zones'] | join(' ') }} {
         errors
diff --git a/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2 b/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2
index de7709354aa9c1689adeb14f63d38c93c8a617ae..d585de1f014afe78bb4967402c51de14bacfcdfa 100644
--- a/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2
+++ b/roles/kubernetes-apps/cloud_controller/oci/templates/controller-manager-config.yml.j2
@@ -63,7 +63,7 @@ loadBalancer:
   #                    inbound traffic to load balancers.
   securityListManagementMode: {{ oci_security_list_management }}
 
-{% if oci_security_lists is defined and oci_security_lists|length > 0 %}
+{% if oci_security_lists is defined and oci_security_lists | length > 0 %}
   # Optional specification of which security lists to modify per subnet. This does not apply if security list management is off.
   securityLists:
 {% for subnet_ocid, list_ocid in oci_security_lists.items() %}
@@ -71,7 +71,7 @@ loadBalancer:
 {% endfor %}
 {% endif %}
 
-{% if oci_rate_limit is defined and oci_rate_limit|length > 0 %}
+{% if oci_rate_limit is defined and oci_rate_limit | length > 0 %}
 # Optional rate limit controls for accessing OCI API
 rateLimiter:
 {% if oci_rate_limit.rate_limit_qps_read %}
diff --git a/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2 b/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2
index d50f1393faf31881bb045d3a4df312efeb30b055..6b45d818c8354de3539d27f637db0fe83cf4716a 100644
--- a/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2
+++ b/roles/kubernetes-apps/cloud_controller/oci/templates/oci-cloud-provider.yml.j2
@@ -30,7 +30,7 @@ spec:
     spec:
 {% if oci_cloud_controller_pull_secret is defined %}
       imagePullSecrets:
-      - name: {{oci_cloud_controller_pull_secret}}
+      - name: {{ oci_cloud_controller_pull_secret }}
 {% endif %}
       serviceAccountName: cloud-controller-manager
       hostNetwork: true
@@ -56,7 +56,7 @@ spec:
             path: /etc/kubernetes
       containers:
         - name: oci-cloud-controller-manager
-          image: {{oci_cloud_controller_pull_source}}:{{oci_cloud_controller_version}}
+          image: {{ oci_cloud_controller_pull_source }}:{{ oci_cloud_controller_version }}
           command: ["/usr/local/bin/oci-cloud-controller-manager"]
           args:
             - --cloud-config=/etc/oci/cloud-provider.yaml
diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
index 643c0ce454b3a635efe6624111c54d8c59af5861..668f18afd3e9cf4f5c6435f61ccea5bb3365d9ff 100644
--- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml
+++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml
@@ -70,7 +70,7 @@
     src: k8s-cluster-critical-pc.yml
     dest: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
     mode: 0640
-  when: inventory_hostname == groups['kube_control_plane']|last
+  when: inventory_hostname == groups['kube_control_plane'] | last
 
 - name: PriorityClass | Create k8s-cluster-critical
   kube:
@@ -79,4 +79,4 @@
     resource: "PriorityClass"
     filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
     state: latest
-  when: inventory_hostname == groups['kube_control_plane']|last
+  when: inventory_hostname == groups['kube_control_plane'] | last
diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
index 62ecaf90f893f1f5b31ceb8796541addbc84c0df..8cba9bf3774500cb7117bd3aef94d226107974a7 100644
--- a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
+++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml
@@ -1,25 +1,25 @@
 ---
 
-- name: Container Engine Acceleration Nvidia GPU| gather os specific variables
+- name: Container Engine Acceleration Nvidia GPU | gather os specific variables
   include_vars: "{{ item }}"
   with_first_found:
     - files:
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
-        - "{{ ansible_distribution|lower }}.yml"
-        - "{{ ansible_os_family|lower }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
+        - "{{ ansible_distribution | lower }}.yml"
+        - "{{ ansible_os_family | lower }}.yml"
       skip: true
 
 - name: Container Engine Acceleration Nvidia GPU | Set fact of download url Tesla
   set_fact:
     nvidia_driver_download_url_default: "{{ nvidia_gpu_tesla_base_url }}{{ nvidia_url_end }}"
-  when: nvidia_gpu_flavor|lower == "tesla"
+  when: nvidia_gpu_flavor | lower == "tesla"
 
 - name: Container Engine Acceleration Nvidia GPU | Set fact of download url GTX
   set_fact:
     nvidia_driver_download_url_default: "{{ nvidia_gpu_gtx_base_url }}{{ nvidia_url_end }}"
-  when: nvidia_gpu_flavor|lower == "gtx"
+  when: nvidia_gpu_flavor | lower == "gtx"
 
 - name: Container Engine Acceleration Nvidia GPU | Create addon dir
   file:
diff --git a/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml b/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml
index 6a13e8612531b48f7d5543e15d2e70042dd9eb3a..501f3689c0ae472fc3b80f05810fd9827639e30f 100644
--- a/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml
+++ b/roles/kubernetes-apps/csi_driver/cinder/defaults/main.yml
@@ -2,18 +2,18 @@
 # To access Cinder, the CSI controller will need credentials to access
 # openstack apis. Per default this values will be
 # read from the environment.
-cinder_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
-cinder_username: "{{ lookup('env','OS_USERNAME') }}"
-cinder_password: "{{ lookup('env','OS_PASSWORD') }}"
-cinder_application_credential_id: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_ID')  }}"
-cinder_application_credential_name: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_NAME')  }}"
-cinder_application_credential_secret: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_SECRET')  }}"
-cinder_region: "{{ lookup('env','OS_REGION_NAME') }}"
-cinder_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID'),true) }}"
-cinder_tenant_name: "{{ lookup('env','OS_TENANT_NAME')| default(lookup('env','OS_PROJECT_NAME'),true) }}"
-cinder_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
-cinder_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
-cinder_cacert: "{{ lookup('env','OS_CACERT') }}"
+cinder_auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
+cinder_username: "{{ lookup('env', 'OS_USERNAME') }}"
+cinder_password: "{{ lookup('env', 'OS_PASSWORD') }}"
+cinder_application_credential_id: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_ID') }}"
+cinder_application_credential_name: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_NAME') }}"
+cinder_application_credential_secret: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_SECRET') }}"
+cinder_region: "{{ lookup('env', 'OS_REGION_NAME') }}"
+cinder_tenant_id: "{{ lookup('env', 'OS_TENANT_ID') | default(lookup('env', 'OS_PROJECT_ID'), true) }}"
+cinder_tenant_name: "{{ lookup('env', 'OS_TENANT_NAME') | default(lookup('env', 'OS_PROJECT_NAME'), true) }}"
+cinder_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
+cinder_domain_id: "{{ lookup('env', 'OS_USER_DOMAIN_ID') }}"
+cinder_cacert: "{{ lookup('env', 'OS_CACERT') }}"
 
 # For now, only Cinder v3 is supported in Cinder CSI driver
 cinder_blockstorage_version: "v3"
diff --git a/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml b/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml
index cb65f42b0df2a3cc565c3bf099ed63102554079e..d7977326bb67a99ddf3fe9230ee6d9d04192128e 100644
--- a/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml
+++ b/roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-credential-check.yml
@@ -16,7 +16,7 @@
     msg: "cinder_application_credential_id is missing"
   when:
     - cinder_application_credential_name is defined
-    - cinder_application_credential_name|length > 0
+    - cinder_application_credential_name | length > 0
     - cinder_application_credential_id is not defined or not cinder_application_credential_id
 
 - name: Cinder CSI Driver | check cinder_application_credential_secret value
@@ -24,7 +24,7 @@
     msg: "cinder_application_credential_secret is missing"
   when:
     - cinder_application_credential_name is defined
-    - cinder_application_credential_name|length > 0
+    - cinder_application_credential_name | length > 0
     - cinder_application_credential_secret is not defined or not cinder_application_credential_secret
 
 - name: Cinder CSI Driver | check cinder_password value
@@ -32,7 +32,7 @@
     msg: "cinder_password is missing"
   when:
     - cinder_username is defined
-    - cinder_username|length > 0
+    - cinder_username | length > 0
     - cinder_application_credential_name is not defined or not cinder_application_credential_name
     - cinder_application_credential_secret is not defined or not cinder_application_credential_secret
     - cinder_password is not defined or not cinder_password
diff --git a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2
index a4db6421510ad85fea0717b6fdd6fe66dcb35a0c..b0b8f78fdb39bac1fe228bd861948d6a6316106f 100644
--- a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2
@@ -133,7 +133,7 @@ spec:
             - name: ca-certs
               mountPath: /etc/ssl/certs
               readOnly: true
-{% if ssl_ca_dirs|length %}
+{% if ssl_ca_dirs | length %}
 {% for dir in ssl_ca_dirs %}
             - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
               mountPath: {{ dir }}
@@ -155,7 +155,7 @@ spec:
           hostPath:
             path: /etc/ssl/certs
             type: DirectoryOrCreate
-{% if ssl_ca_dirs|length %}
+{% if ssl_ca_dirs | length %}
 {% for dir in ssl_ca_dirs %}
         - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
           hostPath:
diff --git a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2 b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2
index 41f922a2fb7d8a221e7ed52c2bec56f4b33c318e..289b1683063533609ee23d39610bb39b63365df4 100644
--- a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2
+++ b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-nodeplugin.yml.j2
@@ -89,7 +89,7 @@ spec:
             - name: ca-certs
               mountPath: /etc/ssl/certs
               readOnly: true
-{% if ssl_ca_dirs|length %}
+{% if ssl_ca_dirs | length %}
 {% for dir in ssl_ca_dirs %}
             - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
               mountPath: {{ dir }}
@@ -125,7 +125,7 @@ spec:
           hostPath:
             path: /etc/ssl/certs
             type: DirectoryOrCreate
-{% if ssl_ca_dirs|length %}
+{% if ssl_ca_dirs | length %}
 {% for dir in ssl_ca_dirs %}
         - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
           hostPath:
diff --git a/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml b/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml
index 0f9eac4713435f4b7020249d076277292062863a..ea828f3332f64b1d0b27da650cb4733f079a01c9 100644
--- a/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml
+++ b/roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml
@@ -5,12 +5,12 @@ upcloud_csi_attacher_image_tag: "v3.4.0"
 upcloud_csi_resizer_image_tag: "v1.4.0"
 upcloud_csi_plugin_image_tag: "v0.3.3"
 upcloud_csi_node_image_tag: "v2.5.0"
-upcloud_username: "{{ lookup('env','UPCLOUD_USERNAME')  }}"
-upcloud_password: "{{ lookup('env','UPCLOUD_PASSWORD')  }}"
+upcloud_username: "{{ lookup('env', 'UPCLOUD_USERNAME') }}"
+upcloud_password: "{{ lookup('env', 'UPCLOUD_PASSWORD') }}"
 upcloud_tolerations: []
 upcloud_csi_enable_volume_snapshot: false
 upcloud_csi_snapshot_controller_replicas: 2
 upcloud_csi_snapshotter_image_tag: "v4.2.1"
 upcloud_csi_snapshot_controller_image_tag: "v4.2.1"
 upcloud_csi_snapshot_validation_webhook_image_tag: "v4.2.1"
-upcloud_cacert: "{{ lookup('env','OS_CACERT') }}"
+upcloud_cacert: "{{ lookup('env', 'OS_CACERT') }}"
diff --git a/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml b/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml
index 63e37bcf5f0cedc24525440310a20fb7a684bce2..8f0b69f8c8615570c0fc1886036a3bfb6b8ad2d7 100644
--- a/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/upcloud/tasks/main.yml
@@ -9,7 +9,7 @@
     msg: "UpCloud password is missing. Env UPCLOUD_PASSWORD is mandatory"
   when:
     - upcloud_username is defined
-    - upcloud_username|length > 0
+    - upcloud_username | length > 0
     - upcloud_password is not defined or not upcloud_password
 
 - name: UpCloud CSI Driver | Generate Manifests
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml
index ede7cb0b7c780fa65ee889b862558c106a7eff77..e01b36b1dc5ac9f8a357ad82a42686473b66210f 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml
+++ b/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml
@@ -36,8 +36,8 @@ unsafe_show_logs: false
 # according to the above link , we can controler the block-volume-snapshot parameter
 vsphere_csi_block_volume_snapshot: false
 
-external_vsphere_user: "{{ lookup('env','VSPHERE_USER') }}"
-external_vsphere_password: "{{ lookup('env','VSPHERE_PASSWORD') }}"
+external_vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
+external_vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
 
 # Controller resources
 vsphere_csi_snapshotter_resources: {}
diff --git a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
index bb0161429b15eb4b89d4b3518b74e3c32bf08c6a..0fe5c49e3907ef220255df187613a4e8e02ae0e3 100644
--- a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
+++ b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
@@ -44,11 +44,11 @@
   command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml"
   register: vsphere_csi_secret_manifest
   when: inventory_hostname == groups['kube_control_plane'][0]
-  no_log: "{{ not (unsafe_show_logs|bool) }}"
+  no_log: "{{ not (unsafe_show_logs | bool) }}"
 
 - name: vSphere CSI Driver | Apply a CSI secret manifest
   command:
     cmd: "{{ kubectl }} apply -f -"
     stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
   when: inventory_hostname == groups['kube_control_plane'][0]
-  no_log: "{{ not (unsafe_show_logs|bool) }}"
+  no_log: "{{ not (unsafe_show_logs | bool) }}"
diff --git a/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml b/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml
index e09f99d1fab98148b65429342ad915764d1e2486..c626e78e9a9673f6bc855b1a484654275be967a6 100644
--- a/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml
+++ b/roles/kubernetes-apps/external_cloud_controller/hcloud/tasks/main.yml
@@ -9,7 +9,7 @@
     - {name: external-hcloud-cloud-secret, file: external-hcloud-cloud-secret.yml}
     - {name: external-hcloud-cloud-service-account, file: external-hcloud-cloud-service-account.yml}
     - {name: external-hcloud-cloud-role-bindings, file: external-hcloud-cloud-role-bindings.yml}
-    - {name: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks' if external_hcloud_cloud.with_networks  else 'external-hcloud-cloud-controller-manager-ds' }}", file: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks.yml' if external_hcloud_cloud.with_networks  else  'external-hcloud-cloud-controller-manager-ds.yml' }}"}
+    - {name: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds' }}", file: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks.yml' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds.yml' }}"}
 
   register: external_hcloud_manifests
   when: inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2 b/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2
index c2ea894a9c56cf4348d11180bd745eee331aebe8..a750c2fd9fa0243625b61a9b8e5b28490d2336ae 100644
--- a/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2
+++ b/roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2
@@ -7,5 +7,5 @@ metadata:
 data:
   token: "{{ external_hcloud_cloud.hcloud_api_token | b64encode }}"
 {% if external_hcloud_cloud.with_networks  %}
-  network: "{{ network_id|b64encode }}"
+  network: "{{ network_id | b64encode }}"
 {% endif %}
diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml b/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml
index f19ad7deb906e505622f024fb71fbadb145826fc..4bcf135a33efc2175263c43d321bfc86e7de1562 100644
--- a/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml
+++ b/roles/kubernetes-apps/external_cloud_controller/openstack/defaults/main.yml
@@ -2,18 +2,18 @@
 # The external cloud controller will need credentials to access
 # openstack apis. Per default these values will be
 # read from the environment.
-external_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL')  }}"
-external_openstack_username: "{{ lookup('env','OS_USERNAME')  }}"
-external_openstack_password: "{{ lookup('env','OS_PASSWORD')  }}"
-external_openstack_application_credential_id: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_ID')  }}"
-external_openstack_application_credential_name: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_NAME')  }}"
-external_openstack_application_credential_secret: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_SECRET')  }}"
-external_openstack_region: "{{ lookup('env','OS_REGION_NAME')  }}"
-external_openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID'),true) }}"
-external_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME')| default(lookup('env','OS_PROJECT_NAME'),true) }}"
-external_openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
-external_openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
-external_openstack_cacert: "{{ lookup('env','OS_CACERT') }}"
+external_openstack_auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
+external_openstack_username: "{{ lookup('env', 'OS_USERNAME') }}"
+external_openstack_password: "{{ lookup('env', 'OS_PASSWORD') }}"
+external_openstack_application_credential_id: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_ID') }}"
+external_openstack_application_credential_name: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_NAME') }}"
+external_openstack_application_credential_secret: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_SECRET') }}"
+external_openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}"
+external_openstack_tenant_id: "{{ lookup('env', 'OS_TENANT_ID') | default(lookup('env', 'OS_PROJECT_ID'), true) }}"
+external_openstack_tenant_name: "{{ lookup('env', 'OS_TENANT_NAME') | default(lookup('env', 'OS_PROJECT_NAME'), true) }}"
+external_openstack_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
+external_openstack_domain_id: "{{ lookup('env', 'OS_USER_DOMAIN_ID') }}"
+external_openstack_cacert: "{{ lookup('env', 'OS_CACERT') }}"
 
 ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset
 ## Format:
diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml
index 9abc927e2bc02ae0c38ecd894afb447ff4bc0b9e..6a146584f60b73dbe62df4f554952d102b8a7e1f 100644
--- a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml
+++ b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/openstack-credential-check.yml
@@ -18,7 +18,7 @@
     msg: "external_openstack_application_credential_id is missing"
   when:
     - external_openstack_application_credential_name is defined
-    - external_openstack_application_credential_name|length > 0
+    - external_openstack_application_credential_name | length > 0
     - external_openstack_application_credential_id is not defined or not external_openstack_application_credential_id
 
 
@@ -27,7 +27,7 @@
     msg: "external_openstack_application_credential_secret is missing"
   when:
     - external_openstack_application_credential_name is defined
-    - external_openstack_application_credential_name|length > 0
+    - external_openstack_application_credential_name | length > 0
     - external_openstack_application_credential_secret is not defined or not external_openstack_application_credential_secret
 
 
@@ -36,7 +36,7 @@
     msg: "external_openstack_password is missing"
   when:
     - external_openstack_username is defined
-    - external_openstack_username|length > 0
+    - external_openstack_username | length > 0
     - external_openstack_application_credential_name is not defined or not external_openstack_application_credential_name
     - external_openstack_application_credential_secret is not defined or not external_openstack_application_credential_secret
     - external_openstack_password is not defined or not external_openstack_password
diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2 b/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2
index 6649a24ec9d596cad38c38a2b0428984df0e61be..565875dfff9f87f635ea7622d22082b3136c2de9 100644
--- a/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2
+++ b/roles/kubernetes-apps/external_cloud_controller/openstack/templates/external-openstack-cloud-controller-manager-ds.yml.j2
@@ -57,7 +57,7 @@ spec:
             - mountPath: /etc/ssl/certs
               name: ca-certs
               readOnly: true
-{% if ssl_ca_dirs|length %}
+{% if ssl_ca_dirs | length %}
 {% for dir in ssl_ca_dirs %}
             - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
               mountPath: {{ dir }}
@@ -98,7 +98,7 @@ spec:
         hostPath:
           path: /etc/ssl/certs
           type: DirectoryOrCreate
-{% if ssl_ca_dirs|length %}
+{% if ssl_ca_dirs | length %}
 {% for dir in ssl_ca_dirs %}
       - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
         hostPath:
diff --git a/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml b/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml
index 91b126ed90bad4855773266e4f8e281e4e2ffe80..b6fb797a817835f76ae037385e112c12847da5ee 100644
--- a/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml
+++ b/roles/kubernetes-apps/external_cloud_controller/vsphere/defaults/main.yml
@@ -10,5 +10,5 @@ external_vsphere_insecure: "true"
 external_vsphere_cloud_controller_extra_args: {}
 external_vsphere_cloud_controller_image_tag: "latest"
 
-external_vsphere_user: "{{ lookup('env','VSPHERE_USER') }}"
-external_vsphere_password: "{{ lookup('env','VSPHERE_PASSWORD') }}"
+external_vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
+external_vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2
index ac3bb33db2bc546030b44db450029c6d664f9598..8d9eb08bb839af0b7b7ad32a5248b0730dbd1006 100644
--- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/deploy-cephfs-provisioner.yml.j2
@@ -19,7 +19,7 @@ spec:
         app: cephfs-provisioner
         version: {{ cephfs_provisioner_image_tag }}
     spec:
-      priorityClassName: {% if cephfs_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
+      priorityClassName: {% if cephfs_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
       serviceAccount: cephfs-provisioner
       containers:
         - name: cephfs-provisioner
diff --git a/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2
index 6922691cf8c0e1a2e2ca73f46c18b1de7e56e8d8..6ce426a005b8c7f17aed30116160456d97222cdf 100644
--- a/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-deployment.yml.j2
@@ -24,7 +24,7 @@ spec:
         - start
         - --config
         - /etc/config/config.json
-{% if local_path_provisioner_debug|default(false) %}
+{% if local_path_provisioner_debug | default(false) %}
         - --debug
 {% endif %}
         volumeMounts:
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml
index 16ed6ffab4fd776f96dc1d422b8aa5be8f4f3d56..38afefb2c8cca379eb283a48f4f7f8a2c85d24fe 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml
@@ -12,7 +12,7 @@ local_volume_provisioner_use_node_name_only: false
 local_volume_provisioner_storage_classes: |
   {
     "{{ local_volume_provisioner_storage_class | default('local-storage') }}": {
-      "host_dir": "{{ local_volume_provisioner_base_dir | default ('/mnt/disks') }}",
+      "host_dir": "{{ local_volume_provisioner_base_dir | default('/mnt/disks') }}",
       "mount_dir": "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}",
       "volume_mode": "Filesystem",
       "fs_type": "ext4"
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2
index 76625b6df55ec6d594ef69b1095c73d09fab91bc..7e37283b16cc42077bf204eed268eac821923ccd 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2
@@ -1,8 +1,8 @@
 # Macro to convert camelCase dictionary keys to snake_case keys
 {% macro convert_keys(mydict) -%}
-  {% for key in mydict.keys()|list -%}
+  {% for key in mydict.keys() | list -%}
     {% set key_split = key.split('_') -%}
-    {% set new_key = key_split[0] + key_split[1:]|map('capitalize')|join -%}
+    {% set new_key = key_split[0] + key_split[1:] | map('capitalize') | join -%}
     {% set value = mydict.pop(key) -%}
     {{ mydict.__setitem__(new_key, value) -}}
     {{ convert_keys(value) if value is mapping else None -}}
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
index a8747a230420f2d61b10d6dc36e10f4299d69361..90a47309093c70fba05ec51e002e2ffd106a001e 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2
@@ -18,7 +18,7 @@ spec:
         k8s-app: local-volume-provisioner
         version: {{ local_volume_provisioner_image_tag }}
     spec:
-      priorityClassName: {% if local_volume_provisioner_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
+      priorityClassName: {% if local_volume_provisioner_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
       serviceAccountName: local-volume-provisioner
       nodeSelector:
         kubernetes.io/os: linux
diff --git a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2 b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2
index dccc16564036531b8f547a6cd68096d2b0eb967d..b8643db64abcd6511dd0dab6d9a5d353b24e33d1 100644
--- a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2
+++ b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/deploy-rbd-provisioner.yml.j2
@@ -21,7 +21,7 @@ spec:
         app: rbd-provisioner
         version: {{ rbd_provisioner_image_tag }}
     spec:
-      priorityClassName: {% if rbd_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
+      priorityClassName: {% if rbd_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
       serviceAccount: rbd-provisioner
       containers:
         - name: rbd-provisioner
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index 0ac7edca9e44c30601c3a429bd2681547facfb30..eae0e217156764e8c2f4e3d47cb9e18b2d9b1dd9 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -3,11 +3,11 @@
   include_vars: "{{ item }}"
   with_first_found:
     - files:
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
-        - "{{ ansible_distribution|lower }}.yml"
-        - "{{ ansible_os_family|lower }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
+        - "{{ ansible_distribution | lower }}.yml"
+        - "{{ ansible_os_family | lower }}.yml"
         - defaults.yml
       paths:
         - ../vars
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml
index b7751d5121f13ebe596f562b0bb8130f3adae890..0f58bd5bb7c3f7ad9ade66cc6bc6ad6d707f4129 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml
@@ -10,9 +10,9 @@ cert_manager_controller_extra_args: []
 
 ## Allow http_proxy, https_proxy and no_proxy environment variables
 ## Details https://github.com/kubernetes-sigs/kubespray/blob/master/docs/proxy.md
-cert_manager_http_proxy: "{{ http_proxy|default('') }}"
-cert_manager_https_proxy: "{{ https_proxy|default('') }}"
-cert_manager_no_proxy: "{{ no_proxy|default('') }}"
+cert_manager_http_proxy: "{{ http_proxy | default('') }}"
+cert_manager_https_proxy: "{{ https_proxy | default('') }}"
+cert_manager_no_proxy: "{{ no_proxy | default('') }}"
 
 ## Change leader election namespace when deploying on GKE Autopilot that forbid the changes on kube-system namespace.
 ## See https://github.com/jetstack/cert-manager/issues/3717
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
index 4afb75d3aef5ad21d0492aeef38894f7a3930661..70e4ea0ea5b154d488881aacb6bdb4e09b98b239 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ds-ingress-nginx-controller.yml.j2
@@ -35,7 +35,7 @@ spec:
       tolerations:
         {{ ingress_nginx_tolerations | to_nice_yaml(indent=2) | indent(width=8) }}
 {% endif %}
-      priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
+      priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
       containers:
         - name: ingress-nginx-controller
           image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }}
diff --git a/roles/kubernetes-apps/metallb/tasks/main.yml b/roles/kubernetes-apps/metallb/tasks/main.yml
index 91d16e5c33828b6903001d52a79a930307fa77d0..4b26d3a78eb74a886287f1fe8be75e81785d4f0e 100644
--- a/roles/kubernetes-apps/metallb/tasks/main.yml
+++ b/roles/kubernetes-apps/metallb/tasks/main.yml
@@ -41,7 +41,7 @@
     name: "MetalLB"
     kubectl: "{{ bin_dir }}/kubectl"
     filename: "{{ kube_config_dir }}/metallb.yaml"
-    state: "{{ metallb_rendering.changed | ternary('latest','present') }}"
+    state: "{{ metallb_rendering.changed | ternary('latest', 'present') }}"
     wait: true
   become: true
   when:
@@ -67,7 +67,7 @@
         name: "MetalLB"
         kubectl: "{{ bin_dir }}/kubectl"
         filename: "{{ kube_config_dir }}/pools.yaml"
-        state: "{{ pools_rendering.changed | ternary('latest','present') }}"
+        state: "{{ pools_rendering.changed | ternary('latest', 'present') }}"
       become: true
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
@@ -87,7 +87,7 @@
         name: "MetalLB"
         kubectl: "{{ bin_dir }}/kubectl"
         filename: "{{ kube_config_dir }}/layer2.yaml"
-        state: "{{ layer2_rendering.changed | ternary('latest','present') }}"
+        state: "{{ layer2_rendering.changed | ternary('latest', 'present') }}"
       become: true
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
@@ -107,7 +107,7 @@
         name: "MetalLB"
         kubectl: "{{ bin_dir }}/kubectl"
         filename: "{{ kube_config_dir }}/layer3.yaml"
-        state: "{{ layer3_rendering.changed | ternary('latest','present') }}"
+        state: "{{ layer3_rendering.changed | ternary('latest', 'present') }}"
       become: true
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
diff --git a/roles/kubernetes-apps/metallb/templates/layer3.yaml.j2 b/roles/kubernetes-apps/metallb/templates/layer3.yaml.j2
index 57d9465bee0dd11408519fa8d511adf71b2ee0c7..490bae24fdbb2ee88276ec50d1434bab5f594033 100644
--- a/roles/kubernetes-apps/metallb/templates/layer3.yaml.j2
+++ b/roles/kubernetes-apps/metallb/templates/layer3.yaml.j2
@@ -57,7 +57,7 @@ spec:
   aggregationLengthV6: 128
   communities:
   - no-advertise
-  localpref: "{{ peer.localpref | default ("100") }}"
+  localpref: "{{ peer.localpref | default("100") }}"
   ipAddressPools:
   {% for address_pool in peer.address_pool %}
   - "{{ address_pool }}"
diff --git a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
index bfaa9b3a26eb1d8328006a73704fa9770a3d1553..8e56d34ee0171504df240f68e7c8f3fd613b9320 100644
--- a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
+++ b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
@@ -9,10 +9,10 @@
     state: "latest"
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   run_once: true
-  with_items: "{{ multus_manifest_1.results + (multus_nodes_list|map('extract', hostvars, 'multus_manifest_2')|list|json_query('[].results')) }}"
+  with_items: "{{ multus_manifest_1.results + (multus_nodes_list | map('extract', hostvars, 'multus_manifest_2') | list | json_query('[].results')) }}"
   loop_control:
     label: "{{ item.item.name }}"
   vars:
-    multus_nodes_list: "{{ groups['k8s_cluster'] if ansible_play_batch|length == ansible_play_hosts_all|length else ansible_play_batch }}"
+    multus_nodes_list: "{{ groups['k8s_cluster'] if ansible_play_batch | length == ansible_play_hosts_all | length else ansible_play_batch }}"
   when:
     - not item is skipped
diff --git a/roles/kubernetes-apps/registry/tasks/main.yml b/roles/kubernetes-apps/registry/tasks/main.yml
index 5090212e18f3828da288ee17fbf681e4438e9a47..06f1f6a1355594ed6086ef89d43b0441ab0476bb 100644
--- a/roles/kubernetes-apps/registry/tasks/main.yml
+++ b/roles/kubernetes-apps/registry/tasks/main.yml
@@ -8,21 +8,21 @@
   fail:
     msg: "registry_service_cluster_ip support only compatible with ClusterIP."
   when:
-    - registry_service_cluster_ip is defined and registry_service_cluster_ip|length > 0
+    - registry_service_cluster_ip is defined and registry_service_cluster_ip | length > 0
     - registry_service_type != "ClusterIP"
 
 - name: Registry | Stop if registry_service_loadbalancer_ip is defined when registry_service_type is not 'LoadBalancer'
   fail:
     msg: "registry_service_loadbalancer_ip support only compatible with LoadBalancer."
   when:
-    - registry_service_loadbalancer_ip is defined and registry_service_loadbalancer_ip|length > 0
+    - registry_service_loadbalancer_ip is defined and registry_service_loadbalancer_ip | length > 0
     - registry_service_type != "LoadBalancer"
 
 - name: Registry | Stop if registry_service_nodeport is defined when registry_service_type is not 'NodePort'
   fail:
     msg: "registry_service_nodeport support only compatible with NodePort."
   when:
-    - registry_service_nodeport is defined and registry_service_nodeport|length > 0
+    - registry_service_nodeport is defined and registry_service_nodeport | length > 0
     - registry_service_type != "NodePort"
 
 - name: Registry | Create addon dir
diff --git a/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2
index 47519f9d80f273f406a51d68ed55e237fdf800be..3b516845cd16c901e91827c5c326129902c00896 100644
--- a/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2
+++ b/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2
@@ -24,7 +24,7 @@ spec:
         k8s-app: registry
         version: v{{ registry_image_tag }}
     spec:
-      priorityClassName: {% if registry_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
+      priorityClassName: {% if registry_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
       serviceAccountName: registry
       securityContext:
         fsGroup: 1000
diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml
index cb9e81e79fb915aa8b0102063a9d5f306107287f..4483038f9a19b84edb310f995a36661f14b8d632 100644
--- a/roles/kubernetes/client/tasks/main.yml
+++ b/roles/kubernetes/client/tasks/main.yml
@@ -1,6 +1,7 @@
 ---
 - name: Set external kube-apiserver endpoint
   set_fact:
+    # noqa: jinja[spacing]
     external_apiserver_address: >-
       {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined -%}
       {{ loadbalancer_apiserver.address }}
@@ -9,9 +10,10 @@
       {%- else -%}
       {{ kube_apiserver_access_address }}
       {%- endif -%}
+    # noqa: jinja[spacing]
     external_apiserver_port: >-
       {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and loadbalancer_apiserver.port is defined -%}
-      {{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
+      {{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
       {%- else -%}
       {{ kube_apiserver_port }}
       {%- endif -%}
@@ -69,9 +71,9 @@
     user_certs: "{{ admin_kubeconfig['users'][0]['user'] }}"
     username: "kubernetes-admin-{{ cluster_name }}"
     context: "kubernetes-admin-{{ cluster_name }}@{{ cluster_name }}"
-    override_cluster_name: "{{ { 'clusters': [ { 'cluster': (cluster_infos|combine({'server': 'https://'+external_apiserver_address+':'+(external_apiserver_port|string)})), 'name': cluster_name } ] } }}"
-    override_context: "{{ { 'contexts': [ { 'context': { 'user': username, 'cluster': cluster_name }, 'name': context } ], 'current-context': context } }}"
-    override_user: "{{ { 'users': [ { 'name': username, 'user': user_certs  } ] } }}"
+    override_cluster_name: "{{ {'clusters': [{'cluster': (cluster_infos | combine({'server': 'https://' + external_apiserver_address + ':' + (external_apiserver_port | string)})), 'name': cluster_name}]} }}"
+    override_context: "{{ {'contexts': [{'context': {'user': username, 'cluster': cluster_name}, 'name': context}], 'current-context': context} }}"
+    override_user: "{{ {'users': [{'name': username, 'user': user_certs}]} }}"
   when: kubeconfig_localhost
 
 - name: Write admin kubeconfig on ansible host
diff --git a/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml b/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml
index 5503212ab80267316311bf43bc42d93830e77685..24ebc6cc51c634c311ea16e980b448c81e5e4880 100644
--- a/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml
+++ b/roles/kubernetes/control-plane/defaults/main/kube-proxy.yml
@@ -111,4 +111,4 @@ kube_proxy_oom_score_adj: -999
 
 # portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
 # in order to proxy service traffic. If unspecified, 0, or (0-0) then ports will be randomly chosen.
-kube_proxy_port_range: ''
\ No newline at end of file
+kube_proxy_port_range: ''
diff --git a/roles/kubernetes/control-plane/defaults/main/main.yml b/roles/kubernetes/control-plane/defaults/main/main.yml
index c25fbc10dd246e3b2d93720d5b68e1cdeffbee04..2a9eda14a51d12a6e71c2293445974140013779b 100644
--- a/roles/kubernetes/control-plane/defaults/main/main.yml
+++ b/roles/kubernetes/control-plane/defaults/main/main.yml
@@ -5,7 +5,7 @@ upgrade_cluster_setup: false
 # By default the external API listens on all interfaces, this can be changed to
 # listen on a specific address/interface.
 # NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
-# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} too.
+# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
 kube_apiserver_bind_address: 0.0.0.0
 
 # A port range to reserve for services with NodePort visibility.
@@ -181,12 +181,12 @@ kube_encryption_resources: [secrets]
 
 # If non-empty, will use this string as identification instead of the actual hostname
 kube_override_hostname: >-
-  {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
+  {%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
   {%- else -%}
   {{ inventory_hostname }}
   {%- endif -%}
 
-secrets_encryption_query: "resources[*].providers[0].{{kube_encryption_algorithm}}.keys[0].secret"
+secrets_encryption_query: "resources[*].providers[0].{{ kube_encryption_algorithm }}.keys[0].secret"
 
 ## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
 # tls_min_version: ""
diff --git a/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml b/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml
index d01f511bdda762335309debc448f2df45f7cb525..64e2de785c726ca85189088b76585064296d1aed 100644
--- a/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml
+++ b/roles/kubernetes/control-plane/tasks/define-first-kube-control.yml
@@ -8,7 +8,7 @@
 
 - name: Set fact joined_control_panes
   set_fact:
-    joined_control_planes: "{{ ((kube_control_planes_raw.stdout| from_json)['items'])| default([]) | map (attribute='metadata') | map (attribute='name') | list }}"
+    joined_control_planes: "{{ ((kube_control_planes_raw.stdout | from_json)['items']) | default([]) | map(attribute='metadata') | map(attribute='name') | list }}"
   delegate_to: item
   loop: "{{ groups['kube_control_plane'] }}"
   when: kube_control_planes_raw is succeeded
@@ -16,4 +16,4 @@
 
 - name: Set fact first_kube_control_plane
   set_fact:
-    first_kube_control_plane: "{{ joined_control_planes|default([]) | first | default(groups['kube_control_plane']|first) }}"
+    first_kube_control_plane: "{{ joined_control_planes | default([]) | first | default(groups['kube_control_plane'] | first) }}"
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
index a4869fec8bfd0d2ea6e2b2a74334a30eee2934f9..f1c92aeee03df33e7848ec55c1f5b70ebc85595a 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
@@ -1,6 +1,7 @@
 ---
 - name: Set kubeadm_discovery_address
   set_fact:
+    # noqa: jinja[spacing]
     kubeadm_discovery_address: >-
       {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
       {{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
index 097fb0f44031d24291501ca66c217b3f18fa4379..4f1ea288d1c814c29b0d9ab6cb8e3113ddada0fb 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
@@ -52,26 +52,26 @@
     path: "{{ audit_policy_file | dirname }}"
     state: directory
     mode: 0640
-  when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
+  when: kubernetes_audit | default(false) or kubernetes_audit_webhook | default(false)
 
 - name: Write api audit policy yaml
   template:
     src: apiserver-audit-policy.yaml.j2
     dest: "{{ audit_policy_file }}"
     mode: 0640
-  when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
+  when: kubernetes_audit | default(false) or kubernetes_audit_webhook | default(false)
 
 - name: Write api audit webhook config yaml
   template:
     src: apiserver-audit-webhook-config.yaml.j2
     dest: "{{ audit_webhook_config_file }}"
     mode: 0640
-  when: kubernetes_audit_webhook|default(false)
+  when: kubernetes_audit_webhook | default(false)
 
 # Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
 - name: set kubeadm_config_api_fqdn define
   set_fact:
-    kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}"
+    kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name | default('lb-apiserver.kubernetes.local') }}"
   when: loadbalancer_apiserver is defined
 
 - name: Set kubeadm api version to v1beta3
@@ -100,8 +100,8 @@
 
 - name: kubeadm | Push admission control config files
   template:
-    src: "{{ item|lower }}.yaml.j2"
-    dest: "{{ kube_config_dir }}/admission-controls/{{ item|lower }}.yaml"
+    src: "{{ item | lower }}.yaml.j2"
+    dest: "{{ kube_config_dir }}/admission-controls/{{ item | lower }}.yaml"
     mode: 0640
   when:
     - kube_apiserver_admission_control_config_file
@@ -123,8 +123,8 @@
       register: apiserver_sans_host_check
       changed_when: apiserver_sans_host_check.stdout is not search('does match certificate')
   vars:
-    apiserver_ips: "{{ apiserver_sans|map('ipaddr')|reject('equalto', False)|list }}"
-    apiserver_hosts: "{{ apiserver_sans|difference(apiserver_ips) }}"
+    apiserver_ips: "{{ apiserver_sans | map('ipaddr') | reject('equalto', False) | list }}"
+    apiserver_hosts: "{{ apiserver_sans | difference(apiserver_ips) }}"
   when:
     - kubeadm_already_run.stat.exists
     - not kube_external_ca_mode
@@ -186,7 +186,7 @@
 
 - name: set kubeadm certificate key
   set_fact:
-    kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}"
+    kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)', '\\1') | first }}"
   with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
   when:
     - kubeadm_certificate_key is not defined
diff --git a/roles/kubernetes/control-plane/tasks/main.yml b/roles/kubernetes/control-plane/tasks/main.yml
index 4df4783431ee3cef2213a5b92e671bda72015d31..2fab9d57cf27c6af3062b03abc6a3ec15414fab0 100644
--- a/roles/kubernetes/control-plane/tasks/main.yml
+++ b/roles/kubernetes/control-plane/tasks/main.yml
@@ -8,14 +8,14 @@
     src: webhook-token-auth-config.yaml.j2
     dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
     mode: 0640
-  when: kube_webhook_token_auth|default(false)
+  when: kube_webhook_token_auth | default(false)
 
 - name: Create webhook authorization config
   template:
     src: webhook-authorization-config.yaml.j2
     dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
     mode: 0640
-  when: kube_webhook_authorization|default(false)
+  when: kube_webhook_authorization | default(false)
 
 - name: Create kube-scheduler config
   template:
diff --git a/roles/kubernetes/control-plane/tasks/pre-upgrade.yml b/roles/kubernetes/control-plane/tasks/pre-upgrade.yml
index 4c33624e46f0775ca56ba560ecee39cf82552530..2d7dce5bd0db557b7470dac661b1c5dbf06ca6b4 100644
--- a/roles/kubernetes/control-plane/tasks/pre-upgrade.yml
+++ b/roles/kubernetes/control-plane/tasks/pre-upgrade.yml
@@ -6,7 +6,7 @@
   with_items:
     - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
   register: kube_apiserver_manifest_replaced
-  when: etcd_secret_changed|default(false)
+  when: etcd_secret_changed | default(false)
 
 - name: "Pre-upgrade | Delete master containers forcefully"  # noqa no-handler
   shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
diff --git a/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 b/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2
index 34f5f188ce6d58474978f26ee92f97575c2b2ae5..fc4d0efbfd0d185eeeb55d2ebeda2222a581aeca 100644
--- a/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2
+++ b/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2
@@ -4,6 +4,6 @@ plugins:
 {% for plugin in kube_apiserver_enable_admission_plugins %}
 {% if plugin in kube_apiserver_admission_plugins_needs_configuration %}
 - name: {{ plugin }}
-  path: {{ kube_config_dir }}/{{ plugin|lower }}.yaml
+  path: {{ kube_config_dir }}/{{ plugin | lower }}.yaml
 {% endif %}
 {% endfor %}
diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2
index e2d41fbe0e1f5b9aa719ba7e175f25259fb39c32..d284c754359d580c56e5f473ae9a5ac9abe6d81f 100644
--- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2
+++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta3.yaml.j2
@@ -13,7 +13,7 @@ localAPIEndpoint:
 certificateKey: {{ kubeadm_certificate_key }}
 {% endif %}
 nodeRegistration:
-{% if kube_override_hostname|default('') %}
+{% if kube_override_hostname | default('') %}
   name: {{ kube_override_hostname }}
 {% endif %}
 {% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %}
@@ -89,7 +89,7 @@ etcd:
 {% endfor %}
 {% endif %}
 dns:
-  imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$','') }}
+  imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$', '') }}
   imageTag: {{ coredns_image_tag }}
 networking:
   dnsDomain: {{ dns_domain }}
@@ -100,7 +100,7 @@ networking:
 {% if kubeadm_feature_gates %}
 featureGates:
 {%   for feature in kubeadm_feature_gates %}
-  {{ feature|replace("=", ": ") }}
+  {{ feature | replace("=", ": ") }}
 {%   endfor %}
 {% endif %}
 kubernetesVersion: {{ kube_version }}
@@ -124,13 +124,13 @@ apiServer:
 {% endif %}
     authorization-mode: {{ authorization_modes | join(',') }}
     bind-address: {{ kube_apiserver_bind_address }}
-{% if kube_apiserver_enable_admission_plugins|length > 0 %}
+{% if kube_apiserver_enable_admission_plugins | length > 0 %}
     enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
 {% endif %}
 {% if kube_apiserver_admission_control_config_file %}
     admission-control-config-file: {{ kube_config_dir }}/admission-controls.yaml
 {% endif %}
-{% if kube_apiserver_disable_admission_plugins|length > 0 %}
+{% if kube_apiserver_disable_admission_plugins | length > 0 %}
     disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }}
 {% endif %}
     apiserver-count: "{{ kube_apiserver_count }}"
@@ -144,13 +144,13 @@ apiServer:
     profiling: "{{ kube_profiling }}"
     request-timeout: "{{ kube_apiserver_request_timeout }}"
     enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
-{% if kube_token_auth|default(true) %}
+{% if kube_token_auth | default(true) %}
     token-auth-file: {{ kube_token_dir }}/known_tokens.csv
 {% endif %}
 {% if kube_apiserver_service_account_lookup %}
     service-account-lookup: "{{ kube_apiserver_service_account_lookup }}"
 {% endif %}
-{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
+{% if kube_oidc_auth | default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
     oidc-issuer-url: "{{ kube_oidc_url }}"
     oidc-client-id: "{{ kube_oidc_client_id }}"
 {%   if kube_oidc_ca_file is defined %}
@@ -169,17 +169,17 @@ apiServer:
     oidc-groups-prefix: "{{ kube_oidc_groups_prefix }}"
 {%   endif %}
 {% endif %}
-{% if kube_webhook_token_auth|default(false) %}
+{% if kube_webhook_token_auth | default(false) %}
     authentication-token-webhook-config-file: {{ kube_config_dir }}/webhook-token-auth-config.yaml
 {% endif %}
-{% if kube_webhook_authorization|default(false) %}
+{% if kube_webhook_authorization | default(false) %}
     authorization-webhook-config-file: {{ kube_config_dir }}/webhook-authorization-config.yaml
 {% endif %}
 {% if kube_encrypt_secret_data %}
     encryption-provider-config: {{ kube_cert_dir }}/secrets_encryption.yaml
 {% endif %}
     storage-backend: {{ kube_apiserver_storage_backend }}
-{% if kube_api_runtime_config|length > 0 %}
+{% if kube_api_runtime_config | length > 0 %}
     runtime-config: {{ kube_api_runtime_config | join(',') }}
 {% endif %}
     allow-privileged: "true"
@@ -223,24 +223,24 @@ apiServer:
 {% if kubelet_rotate_server_certificates %}
     kubelet-certificate-authority: {{ kube_cert_dir }}/ca.crt
 {% endif %}
-{% if kubernetes_audit or kube_token_auth|default(true) or kube_webhook_token_auth|default(false) or ( cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] ) or apiserver_extra_volumes or ssl_ca_dirs|length %}
+{% if kubernetes_audit or kube_token_auth | default(true) or kube_webhook_token_auth | default(false) or ( cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] ) or apiserver_extra_volumes or ssl_ca_dirs | length %}
   extraVolumes:
 {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
   - name: cloud-config
     hostPath: {{ kube_config_dir }}/cloud_config
     mountPath: {{ kube_config_dir }}/cloud_config
 {% endif %}
-{% if kube_token_auth|default(true) %}
+{% if kube_token_auth | default(true) %}
   - name: token-auth-config
     hostPath: {{ kube_token_dir }}
     mountPath: {{ kube_token_dir }}
 {% endif %}
-{% if kube_webhook_token_auth|default(false) %}
+{% if kube_webhook_token_auth | default(false) %}
   - name: webhook-token-auth-config
     hostPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
     mountPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
 {% endif %}
-{% if kube_webhook_authorization|default(false) %}
+{% if kube_webhook_authorization | default(false) %}
   - name: webhook-authorization-config
     hostPath: {{ kube_config_dir }}/webhook-authorization-config.yaml
     mountPath: {{ kube_config_dir }}/webhook-authorization-config.yaml
@@ -269,7 +269,7 @@ apiServer:
     mountPath: {{ volume.mountPath }}
     readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }}
 {% endfor %}
-{% if ssl_ca_dirs|length %}
+{% if ssl_ca_dirs | length %}
 {% for dir in ssl_ca_dirs %}
   - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
     hostPath: {{ dir }}
@@ -316,7 +316,7 @@ controllerManager:
     configure-cloud-routes: "false"
 {% endif %}
 {% if kubelet_flexvolumes_plugins_dir is defined %}
-    flex-volume-plugin-dir: {{kubelet_flexvolumes_plugins_dir}}
+    flex-volume-plugin-dir: {{ kubelet_flexvolumes_plugins_dir }}
 {% endif %}
 {% if tls_min_version is defined %}
     tls-min-version: {{ tls_min_version }}
@@ -352,7 +352,7 @@ scheduler:
     feature-gates: "{{ kube_scheduler_feature_gates | default(kube_feature_gates, true) | join(',') }}"
 {% endif %}
     profiling: "{{ kube_profiling }}"
-{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
+{% if kube_kubeadm_scheduler_extra_args | length > 0 %}
 {% for key in kube_kubeadm_scheduler_extra_args %}
     {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
 {% endfor %}
@@ -422,7 +422,7 @@ portRange: {{ kube_proxy_port_range }}
 {% set feature_gates = ( kube_proxy_feature_gates | default(kube_feature_gates, true) ) %}
 featureGates:
 {%   for feature in feature_gates %}
-  {{ feature|replace("=", ": ") }}
+  {{ feature | replace("=", ": ") }}
 {%   endfor %}
 {% endif %}
 {# DNS settings for kubelet #}
@@ -448,6 +448,6 @@ clusterDNS:
 {% set feature_gates = ( kubelet_feature_gates | default(kube_feature_gates, true) ) %}
 featureGates:
 {%   for feature in feature_gates %}
-  {{ feature|replace("=", ": ") }}
+  {{ feature | replace("=", ": ") }}
 {%   endfor %}
 {% endif %}
diff --git a/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2
index 78e399d5f9af946779b87650b0444e709b8cb9d5..fc696ae3eebc1c0625d9799753288fbb5a4276a4 100644
--- a/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2
+++ b/roles/kubernetes/control-plane/templates/kubeadm-controlplane.v1beta3.yaml.j2
@@ -17,7 +17,7 @@ controlPlane:
     bindPort: {{ kube_apiserver_port }}
   certificateKey: {{ kubeadm_certificate_key }}
 nodeRegistration:
-  name: {{ kube_override_hostname|default(inventory_hostname) }}
+  name: {{ kube_override_hostname | default(inventory_hostname) }}
   criSocket: {{ cri_socket }}
 {% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %}
   taints:
diff --git a/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2 b/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2
index be41418d45d556b07191fc3e2bde04927f9de429..4be4b083d909530445fc8b1593a4877de89a4168 100644
--- a/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2
+++ b/roles/kubernetes/control-plane/templates/kubescheduler-config.yaml.j2
@@ -1,5 +1,5 @@
 {% set kubescheduler_config_api_version = "v1beta3" %}
-apiVersion: kubescheduler.config.k8s.io/{{ kubescheduler_config_api_version|d('v1') }}
+apiVersion: kubescheduler.config.k8s.io/{{ kubescheduler_config_api_version | d('v1') }}
 kind: KubeSchedulerConfiguration
 clientConnection:
   kubeconfig: "{{ kube_config_dir }}/scheduler.conf"
diff --git a/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2 b/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2
index 0a650fa102ad4cf8b314b595b645f657369046c3..c973733066db38050cc1ee60f6db5292ecd1473b 100644
--- a/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2
+++ b/roles/kubernetes/control-plane/templates/podsecurity.yaml.j2
@@ -9,9 +9,9 @@ defaults:
   warn: "{{ kube_pod_security_default_warn }}"
   warn-version: "{{ kube_pod_security_default_warn_version }}"
 exemptions:
-  usernames: {{ kube_pod_security_exemptions_usernames|to_json }}
-  runtimeClasses: {{ kube_pod_security_exemptions_runtime_class_names|to_json }}
-  namespaces: {{ kube_pod_security_exemptions_namespaces|to_json }}
+  usernames: {{ kube_pod_security_exemptions_usernames | to_json }}
+  runtimeClasses: {{ kube_pod_security_exemptions_runtime_class_names | to_json }}
+  namespaces: {{ kube_pod_security_exemptions_namespaces | to_json }}
 {% else %}
 # This file is intentinally left empty as kube_pod_security_use_default={{ kube_pod_security_use_default }}
 {% endif %}
diff --git a/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2 b/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2
index 9105bb69b83d02cfdc25c541c06cecd0a90d9e44..3c521ff124c87f746489118cef3a384b47b50bfd 100644
--- a/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2
+++ b/roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2
@@ -2,7 +2,7 @@ apiVersion: apiserver.config.k8s.io/v1
 kind: EncryptionConfiguration
 resources:
   - resources:
-{{ kube_encryption_resources|to_nice_yaml|indent(4, True) }}
+{{ kube_encryption_resources | to_nice_yaml | indent(4, True) }}
     providers:
     - {{ kube_encryption_algorithm }}:
         keys:
diff --git a/roles/kubernetes/kubeadm/defaults/main.yml b/roles/kubernetes/kubeadm/defaults/main.yml
index 0449b8ae740de4d9f662b489e8154143fa9860fe..61b132e61f648b92cf514e3c825f16beefd90d4f 100644
--- a/roles/kubernetes/kubeadm/defaults/main.yml
+++ b/roles/kubernetes/kubeadm/defaults/main.yml
@@ -6,7 +6,7 @@ kubeadm_join_timeout: 120s
 
 # If non-empty, will use this string as identification instead of the actual hostname
 kube_override_hostname: >-
-  {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
+  {%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
   {%- else -%}
   {{ inventory_hostname }}
   {%- endif -%}
diff --git a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml
index 651bcc39dd0d611b2e606752707af6dd31edae84..d39ea2b9f95693c55f64d5e473a46241661c8ade 100644
--- a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml
+++ b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml
@@ -51,7 +51,7 @@
   register: "etcd_client_cert_serial_result"
   changed_when: false
   when:
-    - inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
+    - inventory_hostname in groups['k8s_cluster'] | union(groups['calico_rr'] | default([])) | unique | sort
   tags:
     - network
 
diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml
index 6449e01e3f5f34f76fb5b4f2e14d91f757111125..c8b76f0197e3cee2c01381f22007c2e7a74425cb 100644
--- a/roles/kubernetes/kubeadm/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/tasks/main.yml
@@ -1,6 +1,7 @@
 ---
 - name: Set kubeadm_discovery_address
   set_fact:
+    # noqa: jinja[spacing]
     kubeadm_discovery_address: >-
       {%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
       {{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
@@ -138,7 +139,7 @@
   args:
     executable: /bin/bash
   run_once: true
-  delegate_to: "{{ groups['kube_control_plane']|first }}"
+  delegate_to: "{{ groups['kube_control_plane'] | first }}"
   delegate_facts: false
   when:
     - kubeadm_config_api_fqdn is not defined
@@ -158,7 +159,7 @@
 - name: Restart all kube-proxy pods to ensure that they load the new configmap
   command: "{{ kubectl }} delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
   run_once: true
-  delegate_to: "{{ groups['kube_control_plane']|first }}"
+  delegate_to: "{{ groups['kube_control_plane'] | first }}"
   delegate_facts: false
   when:
     - kubeadm_config_api_fqdn is not defined
diff --git a/roles/kubernetes/node-label/tasks/main.yml b/roles/kubernetes/node-label/tasks/main.yml
index 0904ffca7219080eb70b0c8c07d6595d24741c09..cda700ce02488b78683501865349d8e58abaa90e 100644
--- a/roles/kubernetes/node-label/tasks/main.yml
+++ b/roles/kubernetes/node-label/tasks/main.yml
@@ -17,10 +17,10 @@
 
 - name: Node label for nvidia GPU nodes
   set_fact:
-    role_node_labels: "{{ role_node_labels + [ 'nvidia.com/gpu=true' ] }}"
+    role_node_labels: "{{ role_node_labels + ['nvidia.com/gpu=true'] }}"
   when:
     - nvidia_gpu_nodes is defined
-    - nvidia_accelerator_enabled|bool
+    - nvidia_accelerator_enabled | bool
     - inventory_hostname in nvidia_gpu_nodes
 
 - name: Set inventory node label to empty list
@@ -29,8 +29,8 @@
 
 - name: Populate inventory node label
   set_fact:
-    inventory_node_labels: "{{ inventory_node_labels + [ '%s=%s'|format(item.key, item.value) ] }}"
-  loop: "{{ node_labels|d({})|dict2items }}"
+    inventory_node_labels: "{{ inventory_node_labels + ['%s=%s' | format(item.key, item.value)] }}"
+  loop: "{{ node_labels | d({}) | dict2items }}"
   when:
     - node_labels is defined
     - node_labels is mapping
diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml
index 1eb288c1798d1ebe3b578246f135b91e4c18f760..f5dbf38ab0ce6a91875b6f901c0bcc5fb479ef2c 100644
--- a/roles/kubernetes/node/defaults/main.yml
+++ b/roles/kubernetes/node/defaults/main.yml
@@ -141,7 +141,7 @@ kubelet_node_custom_flags: []
 
 # If non-empty, will use this string as identification instead of the actual hostname
 kube_override_hostname: >-
-  {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
+  {%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
   {%- else -%}
   {{ inventory_hostname }}
   {%- endif -%}
@@ -161,14 +161,14 @@ sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
 # For the openstack integration kubelet will need credentials to access
 # openstack apis like nova and cinder. Per default this values will be
 # read from the environment.
-openstack_auth_url: "{{ lookup('env','OS_AUTH_URL')  }}"
-openstack_username: "{{ lookup('env','OS_USERNAME')  }}"
-openstack_password: "{{ lookup('env','OS_PASSWORD')  }}"
-openstack_region: "{{ lookup('env','OS_REGION_NAME')  }}"
-openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true),true) }}"
-openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
-openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
-openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
+openstack_auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
+openstack_username: "{{ lookup('env', 'OS_USERNAME') }}"
+openstack_password: "{{ lookup('env', 'OS_PASSWORD') }}"
+openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}"
+openstack_tenant_id: "{{ lookup('env', 'OS_TENANT_ID') | default(lookup('env', 'OS_PROJECT_ID') | default(lookup('env', 'OS_PROJECT_NAME'), true), true) }}"
+openstack_tenant_name: "{{ lookup('env', 'OS_TENANT_NAME') }}"
+openstack_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
+openstack_domain_id: "{{ lookup('env', 'OS_USER_DOMAIN_ID') }}"
 
 # For the vsphere integration, kubelet will need credentials to access
 # vsphere apis
@@ -186,7 +186,7 @@ vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
 
 vsphere_scsi_controller_type: pvscsi
 # vsphere_public_network is name of the network the VMs are joined to
-vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
+vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK') | default('') }}"
 
 ## When azure is used, you need to also set the following variables.
 ## see docs/azure.md for details on how to get these values
diff --git a/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml b/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml
index 62337fc296713ed92ead33969b3f67c435be0523..8ff55cf991f6dff0deb8c563db9279b31746d73f 100644
--- a/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml
+++ b/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml
@@ -61,15 +61,15 @@
 
 - name: "check azure_exclude_master_from_standard_lb is a bool"
   assert:
-    that: azure_exclude_master_from_standard_lb |type_debug == 'bool'
+    that: azure_exclude_master_from_standard_lb | type_debug == 'bool'
 
 - name: "check azure_disable_outbound_snat is a bool"
   assert:
-    that: azure_disable_outbound_snat |type_debug == 'bool'
+    that: azure_disable_outbound_snat | type_debug == 'bool'
 
 - name: "check azure_use_instance_metadata is a bool"
   assert:
-    that: azure_use_instance_metadata |type_debug == 'bool'
+    that: azure_use_instance_metadata | type_debug == 'bool'
 
 - name: check azure_vmtype value
   fail:
diff --git a/roles/kubernetes/node/tasks/facts.yml b/roles/kubernetes/node/tasks/facts.yml
index d68d5bdde8303e97e72c37213dbd90b35405dbe9..43af5ccebd1ac3fb42903255acb6346fdf4f0b6a 100644
--- a/roles/kubernetes/node/tasks/facts.yml
+++ b/roles/kubernetes/node/tasks/facts.yml
@@ -52,9 +52,9 @@
   include_vars: "{{ item }}"
   with_first_found:
   - files:
-    - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
-    - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
-    - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
-    - "{{ ansible_distribution|lower }}.yml"
-    - "{{ ansible_os_family|lower }}.yml"
+    - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
+    - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml"
+    - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
+    - "{{ ansible_distribution | lower }}.yml"
+    - "{{ ansible_os_family | lower }}.yml"
     skip: true
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
index 99babd64f7232f64d376df17d7ccc67f0ce2d05a..e79ca5c4d052fbad9990495acea11edeecbdaeae 100644
--- a/roles/kubernetes/node/tasks/main.yml
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -151,7 +151,7 @@
 
 - name: Test if openstack_cacert is a base64 string
   set_fact:
-    openstack_cacert_is_base64: "{% if openstack_cacert is search ('^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$') %}true{% else %}false{% endif %}"
+    openstack_cacert_is_base64: "{% if openstack_cacert is search('^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}= | [A-Za-z0-9+/]{2}==)?$') %}true{% else %}false{% endif %}"
   when:
     - cloud_provider is defined
     - cloud_provider == 'openstack'
diff --git a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2
index d8cc557f4935b472afb846ba710a8fb951bcf116..995919fa0b67a5c1dbaca6ea2ef1cf1becf50992 100644
--- a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2
+++ b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2
@@ -34,13 +34,13 @@ healthzPort: {{ kubelet_healthz_port }}
 healthzBindAddress: {{ kubelet_healthz_bind_address }}
 kubeletCgroups: {{ kubelet_kubelet_cgroups }}
 clusterDomain: {{ dns_domain }}
-{% if kubelet_protect_kernel_defaults|bool %}
+{% if kubelet_protect_kernel_defaults | bool %}
 protectKernelDefaults: true
 {% endif %}
-{% if kubelet_rotate_certificates|bool %}
+{% if kubelet_rotate_certificates | bool %}
 rotateCertificates: true
 {% endif %}
-{% if kubelet_rotate_server_certificates|bool %}
+{% if kubelet_rotate_server_certificates | bool %}
 serverTLSBootstrap: true
 {% endif %}
 {# DNS settings for kubelet #}
@@ -60,10 +60,10 @@ clusterDNS:
 - {{ dns_address }}
 {% endfor %}
 {# Node reserved CPU/memory #}
-{% if kube_reserved|bool %}
+{% if kube_reserved | bool %}
 kubeReservedCgroup: {{ kube_reserved_cgroups }}
 kubeReserved:
-{% if is_kube_master|bool %}
+{% if is_kube_master | bool %}
   cpu: {{ kube_master_cpu_reserved }}
   memory: {{ kube_master_memory_reserved }}
 {% if kube_master_ephemeral_storage_reserved is defined %}
@@ -83,10 +83,10 @@ kubeReserved:
 {% endif %}
 {% endif %}
 {% endif %}
-{% if system_reserved|bool %}
+{% if system_reserved | bool %}
 systemReservedCgroup: {{ system_reserved_cgroups }}
 systemReserved:
-{% if is_kube_master|bool %}
+{% if is_kube_master | bool %}
   cpu: {{ system_master_cpu_reserved }}
   memory: {{ system_master_memory_reserved }}
 {% if system_master_ephemeral_storage_reserved is defined %}
@@ -106,10 +106,10 @@ systemReserved:
 {% endif %}
 {% endif %}
 {% endif %}
-{% if is_kube_master|bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %}
+{% if is_kube_master | bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %}
 evictionHard:
   {{ eviction_hard_control_plane | to_nice_yaml(indent=2) | indent(2) }}
-{% elif not is_kube_master|bool and eviction_hard is defined and eviction_hard %}
+{% elif not is_kube_master | bool and eviction_hard is defined and eviction_hard %}
 evictionHard:
   {{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }}
 {% endif %}
@@ -123,7 +123,7 @@ resolvConf: "{{ kube_resolv_conf }}"
 {% if kubelet_feature_gates or kube_feature_gates %}
 featureGates:
 {% for feature in (kubelet_feature_gates | default(kube_feature_gates, true)) %}
-  {{ feature|replace("=", ": ") }}
+  {{ feature | replace("=", ": ") }}
 {% endfor %}
 {% endif %}
 {% if tls_min_version is defined %}
diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml
index 147033f3884d3ead9314219d29db044d194bb1e2..f7670318fb3fb253be2151de21dee6d6f3ae77c0 100644
--- a/roles/kubernetes/preinstall/defaults/main.yml
+++ b/roles/kubernetes/preinstall/defaults/main.yml
@@ -73,7 +73,7 @@ ping_access_ip: true
 ntp_enabled: false
 # The package to install which provides NTP functionality.
 # The default is ntp for most platforms, or chrony on RHEL/CentOS 7 and later.
-# The ntp_package can be one of ['ntp','chrony']
+# The ntp_package can be one of ['ntp', 'chrony']
 ntp_package: >-
       {% if ansible_os_family == "RedHat" -%}
       chrony
diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml
index 7cb0c318502080889e99d5e769a580feeeedc9bb..8ae931f267d728e39ea944f05ed4dd6ad42c45a1 100644
--- a/roles/kubernetes/preinstall/handlers/main.yml
+++ b/roles/kubernetes/preinstall/handlers/main.yml
@@ -45,7 +45,7 @@
     - Preinstall | restart kube-controller-manager crio/containerd
     - Preinstall | restart kube-apiserver docker
     - Preinstall | restart kube-apiserver crio/containerd
-  when: not dns_early|bool
+  when: not dns_early | bool
 
 # FIXME(mattymo): Also restart for kubeadm mode
 - name: Preinstall | kube-apiserver configured
diff --git a/roles/kubernetes/preinstall/tasks/0020-set_facts.yml b/roles/kubernetes/preinstall/tasks/0020-set_facts.yml
index d8638ff2b0f4c4a453f4f516dd23d65d9c449383..8d4c9ac319c8d57415ab60631707b2e93a1af43a 100644
--- a/roles/kubernetes/preinstall/tasks/0020-set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/0020-set_facts.yml
@@ -84,12 +84,12 @@
 
 - name: Stop if /etc/resolv.conf not configured nameservers
   assert:
-    that: configured_nameservers|length>0
+    that: configured_nameservers | length>0
     fail_msg: "nameserver should not empty in /etc/resolv.conf"
   when:
     - not ignore_assert_errors
     - configured_nameservers is defined
-    - not (upstream_dns_servers is defined and upstream_dns_servers|length > 0)
+    - not (upstream_dns_servers is defined and upstream_dns_servers | length > 0)
     - not (disable_host_nameservers | default(false))
 
 - name: NetworkManager | Check if host has NetworkManager
@@ -111,14 +111,14 @@
 - name: set default dns if remove_default_searchdomains is false
   set_fact:
     default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"]
-  when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0)
+  when: not remove_default_searchdomains | default() | bool or (remove_default_searchdomains | default() | bool and searchdomains | default([]) | length==0)
 
 - name: set dns facts
   set_fact:
     resolvconf: >-
       {%- if resolvconf.rc == 0 and resolvconfd_path.stat.isdir is defined and resolvconfd_path.stat.isdir -%}true{%- else -%}false{%- endif -%}
     bogus_domains: |-
-      {% for d in default_searchdomains|default([]) + searchdomains|default([]) -%}
+      {% for d in default_searchdomains | default([]) + searchdomains | default([]) -%}
       {{ dns_domain }}.{{ d }}./{{ d }}.{{ d }}./com.{{ d }}./
       {%- endfor %}
     cloud_resolver: "{{ ['169.254.169.254'] if cloud_provider is defined and cloud_provider == 'gce' else
@@ -142,9 +142,9 @@
   set_fact:
     resolvconffile: /etc/resolv.conf
     base: >-
-      {%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/base{%- endif -%}
+      {%- if resolvconf | bool -%}/etc/resolvconf/resolv.conf.d/base{%- endif -%}
     head: >-
-      {%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/head{%- endif -%}
+      {%- if resolvconf | bool -%}/etc/resolvconf/resolv.conf.d/head{%- endif -%}
   when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos
 
 - name: target temporary resolvconf cloud init file (Flatcar Container Linux by Kinvolk / Fedora CoreOS)
@@ -191,36 +191,36 @@
 - name: generate search domains to resolvconf
   set_fact:
     searchentries:
-      search {{ (default_searchdomains|default([]) + searchdomains|default([])) | join(' ') }}
+      search {{ (default_searchdomains | default([]) + searchdomains | default([])) | join(' ') }}
     domainentry:
       domain {{ dns_domain }}
     supersede_search:
-      supersede domain-search "{{ (default_searchdomains|default([]) + searchdomains|default([])) | join('", "') }}";
+      supersede domain-search "{{ (default_searchdomains | default([]) + searchdomains | default([])) | join('", "') }}";
     supersede_domain:
       supersede domain-name "{{ dns_domain }}";
 
 - name: pick coredns cluster IP or default resolver
   set_fact:
     coredns_server: |-
-      {%- if dns_mode == 'coredns' and not dns_early|bool -%}
-        {{ [ skydns_server ] }}
-      {%- elif dns_mode == 'coredns_dual' and not dns_early|bool -%}
-        {{ [ skydns_server ] + [ skydns_server_secondary ] }}
-      {%- elif dns_mode == 'manual' and not dns_early|bool -%}
-        {{ ( manual_dns_server.split(',') | list) }}
-      {%- elif dns_mode == 'none' and not dns_early|bool -%}
+      {%- if dns_mode == 'coredns' and not dns_early | bool -%}
+        {{ [skydns_server] }}
+      {%- elif dns_mode == 'coredns_dual' and not dns_early | bool -%}
+        {{ [skydns_server] + [skydns_server_secondary] }}
+      {%- elif dns_mode == 'manual' and not dns_early | bool -%}
+        {{ (manual_dns_server.split(',') | list) }}
+      {%- elif dns_mode == 'none' and not dns_early | bool -%}
         []
-      {%- elif dns_early|bool -%}
-        {{ upstream_dns_servers|default([]) }}
+      {%- elif dns_early | bool -%}
+        {{ upstream_dns_servers | default([]) }}
       {%- endif -%}
 
 # This task should only run after cluster/nodelocal DNS is up, otherwise all DNS lookups will timeout
 - name: generate nameservers for resolvconf, including cluster DNS
   set_fact:
     nameserverentries: |-
-      {{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([]) + (configured_nameservers|d([]) if not disable_host_nameservers|d()|bool else [])) | unique | join(',') }}
+      {{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server | d([]) if not enable_nodelocaldns else []) + nameservers | d([]) + cloud_resolver | d([]) + (configured_nameservers | d([]) if not disable_host_nameservers | d() | bool else [])) | unique | join(',') }}
     supersede_nameserver:
-      supersede domain-name-servers {{ ( ( [nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }};
+      supersede domain-name-servers {{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server | d([]) if not enable_nodelocaldns else []) + nameservers | d([]) + cloud_resolver | d([])) | unique | join(', ') }};
   when: not dns_early or dns_late
 
 # This task should run instead of the above task when cluster/nodelocal DNS hasn't
@@ -228,20 +228,20 @@
 - name: generate nameservers for resolvconf, not including cluster DNS
   set_fact:
     nameserverentries: |-
-      {{ ( nameservers|d([]) + cloud_resolver|d([]) + configured_nameservers|d([])) | unique | join(',') }}
+      {{ (nameservers | d([]) + cloud_resolver | d([]) + configured_nameservers | d([])) | unique | join(',') }}
     supersede_nameserver:
-      supersede domain-name-servers {{ ( nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }};
+      supersede domain-name-servers {{ (nameservers | d([]) + cloud_resolver | d([])) | unique | join(', ') }};
   when: dns_early and not dns_late
 
 - name: gather os specific variables
   include_vars: "{{ item }}"
   with_first_found:
     - files:
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
-        - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
-        - "{{ ansible_distribution|lower }}.yml"
-        - "{{ ansible_os_family|lower }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml"
+        - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
+        - "{{ ansible_distribution | lower }}.yml"
+        - "{{ ansible_os_family | lower }}.yml"
         - defaults.yml
       paths:
         - ../vars
diff --git a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
index b8f2b479b90057a5ba689881735e6da47c511474..3b4ec4bd79093d8512d58f68ddc4ba84cd33c2e5 100644
--- a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml
@@ -45,7 +45,7 @@
 # simplify this items-list when   https://github.com/ansible/ansible/issues/15753  is resolved
 - name: "Stop if known booleans are set as strings (Use JSON format on CLI: -e \"{'key': true }\")"
   assert:
-    that: item.value|type_debug == 'bool'
+    that: item.value | type_debug == 'bool'
     msg: "{{ item.value }} isn't a bool"
   run_once: yes
   with_items:
@@ -58,7 +58,7 @@
 
 - name: Stop if even number of etcd hosts
   assert:
-    that: groups.etcd|length is not divisibleby 2
+    that: groups.etcd | length is not divisibleby 2
   when:
     - not ignore_assert_errors
     - inventory_hostname in groups.get('etcd',[])
@@ -102,6 +102,7 @@
 
 - name: Ensure ping package
   package:
+    # noqa: jinja[spacing]
     name: >-
           {%- if ansible_os_family == 'Debian' -%}
           iputils-ping
@@ -207,7 +208,7 @@
 
 - name: Stop if unknown cert_management
   assert:
-    that: cert_management|d('script') in ['script', 'none']
+    that: cert_management | d('script') in ['script', 'none']
     msg: "cert_management can only be 'script' or 'none'"
   run_once: true
 
diff --git a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
index 4397cdd63a14c6fe3305846868fab44561ed2dfa..884ffbb49773a27fadb96838fd8a718054c0c3e2 100644
--- a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
+++ b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml
@@ -13,7 +13,7 @@
       {% for item in nameserverentries.split(',') %}
       nameserver {{ item }}
       {% endfor %}
-      options ndots:{{ ndots }} timeout:{{ dns_timeout|default('2') }} attempts:{{ dns_attempts|default('2') }}
+      options ndots:{{ ndots }} timeout:{{ dns_timeout | default('2') }} attempts:{{ dns_attempts | default('2') }}
     state: present
     insertbefore: BOF
     create: yes
@@ -28,7 +28,7 @@
     regexp: '^{{ item[1] }}[^#]*(?=# Ansible entries BEGIN)'
     backup: "{{ not resolvconf_stat.stat.islnk }}"
   with_nested:
-    - "{{ [resolvconffile, base|default(''), head|default('')] | difference(['']) }}"
+    - "{{ [resolvconffile, base | default(''), head | default('')] | difference(['']) }}"
     - [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ]
   notify: Preinstall | propagate resolvconf to k8s components
 
@@ -39,7 +39,7 @@
     replace: '\1'
     backup: "{{ not resolvconf_stat.stat.islnk }}"
   with_nested:
-    - "{{ [resolvconffile, base|default(''), head|default('')] | difference(['']) }}"
+    - "{{ [resolvconffile, base | default(''), head | default('')] | difference(['']) }}"
     - [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ]
   notify: Preinstall | propagate resolvconf to k8s components
 
diff --git a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
index 9ad5f7d107c6d5453cb9da12516aff8d6f3ce38d..ae5e68914daa12ada1f2580d752177ec69285b6d 100644
--- a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
+++ b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml
@@ -12,14 +12,14 @@
 - name: set default dns if remove_default_searchdomains is false
   set_fact:
     default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"]
-  when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0)
+  when: not remove_default_searchdomains | default() | bool or (remove_default_searchdomains | default() | bool and searchdomains | default([]) | length==0)
 
 - name: NetworkManager | Add DNS search to NM configuration
   community.general.ini_file:
     path: /etc/NetworkManager/conf.d/dns.conf
     section: global-dns
     option: searches
-    value: "{{ (default_searchdomains|default([]) + searchdomains|default([])) | join(',') }}"
+    value: "{{ (default_searchdomains | default([]) + searchdomains | default([])) | join(',') }}"
     mode: '0600'
     backup: yes
   notify: Preinstall | update resolvconf for networkmanager
@@ -29,7 +29,7 @@
     path: /etc/NetworkManager/conf.d/dns.conf
     section: global-dns
     option: options
-    value: "ndots:{{ ndots }},timeout:{{ dns_timeout|default('2') }},attempts:{{ dns_attempts|default('2') }}"
+    value: "ndots:{{ ndots }},timeout:{{ dns_timeout | default('2') }},attempts:{{ dns_attempts | default('2') }}"
     mode: '0600'
     backup: yes
   notify: Preinstall | update resolvconf for networkmanager
diff --git a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
index b4fccfb896a92078a2db4a67efef97717a36e784..eb81d7d8be1434ed25dffcfd057bb89e1d7d5fee 100644
--- a/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
+++ b/roles/kubernetes/preinstall/tasks/0070-system-packages.yml
@@ -55,7 +55,7 @@
   delay: "{{ retry_stagger | random + 3 }}"
   when:
     - ansible_distribution == "Fedora"
-    - ansible_distribution_major_version|int >= 30
+    - ansible_distribution_major_version | int >= 30
     - not is_fedora_coreos
   changed_when: False
   tags:
@@ -68,18 +68,18 @@
   when:
     - ansible_os_family == "RedHat"
     - not is_fedora_coreos
-    - epel_enabled|bool
+    - epel_enabled | bool
   tags:
     - bootstrap-os
 
 - name: Update common_required_pkgs with ipvsadm when kube_proxy_mode is ipvs
   set_fact:
-    common_required_pkgs: "{{ common_required_pkgs|default([]) + ['ipvsadm', 'ipset'] }}"
+    common_required_pkgs: "{{ common_required_pkgs | default([]) + ['ipvsadm', 'ipset'] }}"
   when: kube_proxy_mode == 'ipvs'
 
 - name: Install packages requirements
   package:
-    name: "{{ required_pkgs | default([]) | union(common_required_pkgs|default([])) }}"
+    name: "{{ required_pkgs | default([]) | union(common_required_pkgs | default([])) }}"
     state: present
   register: pkgs_task_result
   until: pkgs_task_result is succeeded
diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
index 2bf5523515ff075570562f538bcfb67325243359..87fb176672552309fe617a4282449100f82b105a 100644
--- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
+++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml
@@ -119,7 +119,7 @@
     - { name: kernel.panic_on_oops, value: 1 }
     - { name: vm.overcommit_memory, value: 1 }
     - { name: vm.panic_on_oom, value: 0 }
-  when: kubelet_protect_kernel_defaults|bool
+  when: kubelet_protect_kernel_defaults | bool
 
 - name: Check dummy module
   community.general.modprobe:
diff --git a/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml b/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml
index c2e42366d61027e1215db7db026544ed444ca2d3..4efe1e3a1db28b3f8de3ffcd3f71accbf256556b 100644
--- a/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml
+++ b/roles/kubernetes/preinstall/tasks/0081-ntp-configurations.yml
@@ -17,6 +17,7 @@
 
 - name: Set fact NTP settings
   set_fact:
+    # noqa: jinja[spacing]
     ntp_config_file: >-
       {% if ntp_package == "ntp" -%}
       /etc/ntp.conf
@@ -25,6 +26,7 @@
       {%- else -%}
       /etc/chrony/chrony.conf
       {%- endif -%}
+    # noqa: jinja[spacing]
     ntp_service_name: >-
       {% if ntp_package == "chrony" -%}
       chronyd
@@ -51,6 +53,7 @@
     - ntp_force_sync_immediately
 
 - name: Force Sync NTP Immediately
+  # noqa: jinja[spacing]
   command: >-
       timeout -k 60s 60s
       {% if ntp_package == "ntp" -%}
diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
index ed5ce291fbdecae0c3b6b617f3cd0749b256ab56..d38ef5857630562c5da6d9122704057e556e8e56 100644
--- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
+++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml
@@ -4,7 +4,7 @@
     - name: Hosts | create list from inventory
       set_fact:
         etc_hosts_inventory_block: |-
-          {% for item in (groups['k8s_cluster'] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%}
+          {% for item in (groups['k8s_cluster'] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
           {% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%}
           {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}
           {%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }} {% else %} {{ item }}.{{ dns_domain }} {{ item }} {% endif %}
@@ -51,8 +51,8 @@
     - name: Hosts | Extract existing entries for localhost from hosts file
       set_fact:
         etc_hosts_localhosts_dict: >-
-          {%- set splitted = (item | regex_replace('[ \t]+', ' ')|regex_replace('#.*$')|trim).split( ' ') -%}
-          {{ etc_hosts_localhosts_dict|default({}) | combine({splitted[0]: splitted[1::] }) }}
+          {%- set splitted = (item | regex_replace('[ \t]+', ' ') | regex_replace('#.*$') | trim).split(' ') -%}
+          {{ etc_hosts_localhosts_dict | default({}) | combine({splitted[0]: splitted[1::]}) }}
       with_items: "{{ (etc_hosts_content['content'] | b64decode).splitlines() }}"
       when:
         - etc_hosts_content.content is defined
@@ -61,19 +61,19 @@
     - name: Hosts | Update target hosts file entries dict with required entries
       set_fact:
         etc_hosts_localhosts_dict_target: >-
-          {%- set target_entries = (etc_hosts_localhosts_dict|default({})).get(item.key, []) | difference(item.value.get('unexpected' ,[])) -%}
-          {{ etc_hosts_localhosts_dict_target|default({}) | combine({item.key: (target_entries + item.value.expected)|unique}) }}
-      loop: "{{ etc_hosts_localhost_entries|dict2items }}"
+          {%- set target_entries = (etc_hosts_localhosts_dict | default({})).get(item.key, []) | difference(item.value.get('unexpected', [])) -%}
+          {{ etc_hosts_localhosts_dict_target | default({}) | combine({item.key: (target_entries + item.value.expected) | unique}) }}
+      loop: "{{ etc_hosts_localhost_entries | dict2items }}"
 
     - name: Hosts | Update (if necessary) hosts file
       lineinfile:
         dest: /etc/hosts
-        line: "{{ item.key }} {{ item.value|join(' ') }}"
+        line: "{{ item.key }} {{ item.value | join(' ') }}"
         regexp: "^{{ item.key }}.*$"
         state: present
         backup: yes
         unsafe_writes: yes
-      loop: "{{ etc_hosts_localhosts_dict_target|default({})|dict2items }}"
+      loop: "{{ etc_hosts_localhosts_dict_target | default({}) | dict2items }}"
   when: populate_localhost_entries_to_hosts_file
 
 # gather facts to update ansible_fqdn
diff --git a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
index 50a62026dbabe14743966202445c0f2b564b221a..da3814715c5b01a092209c61339029864d93f205 100644
--- a/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
+++ b/roles/kubernetes/preinstall/tasks/0100-dhclient-hooks.yml
@@ -2,7 +2,7 @@
 - name: Configure dhclient to supersede search/domain/nameservers
   blockinfile:
     block: |-
-      {% for item in [ supersede_domain, supersede_search, supersede_nameserver ] -%}
+      {% for item in [supersede_domain, supersede_search, supersede_nameserver] -%}
       {{ item }}
       {% endfor %}
     path: "{{ dhclientconffile }}"
diff --git a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
index 6a2203ccad8e2fa93c15692f87dc684b9c9ab22a..d4b7957f90f6e764f64ce7410ab023506d5a576f 100644
--- a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
+++ b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml
@@ -14,7 +14,7 @@
 - name: Search root filesystem device
   vars:
     query: "[?mount=='/'].device"
-    _root_device: "{{ ansible_mounts|json_query(query) }}"
+    _root_device: "{{ ansible_mounts | json_query(query) }}"
   set_fact:
     device: "{{ _root_device | first | regex_replace('([^0-9]+)[0-9]+', '\\1') }}"
     partition: "{{ _root_device | first | regex_replace('[^0-9]+([0-9]+)', '\\1') }}"
diff --git a/roles/kubernetes/preinstall/vars/centos.yml b/roles/kubernetes/preinstall/vars/centos.yml
index 2a5b6c75d09c2c813502b8cdc994f1e95bc0d162..9b1a8749e62f36cc818c02afa7b876d139c73c9b 100644
--- a/roles/kubernetes/preinstall/vars/centos.yml
+++ b/roles/kubernetes/preinstall/vars/centos.yml
@@ -1,6 +1,6 @@
 ---
 required_pkgs:
-  - "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}"
+  - "{{ ((ansible_distribution_major_version | int) < 8) | ternary('libselinux-python', 'python3-libselinux') }}"
   - device-mapper-libs
   - nss
   - conntrack
diff --git a/roles/kubernetes/preinstall/vars/redhat.yml b/roles/kubernetes/preinstall/vars/redhat.yml
index 2a5b6c75d09c2c813502b8cdc994f1e95bc0d162..9b1a8749e62f36cc818c02afa7b876d139c73c9b 100644
--- a/roles/kubernetes/preinstall/vars/redhat.yml
+++ b/roles/kubernetes/preinstall/vars/redhat.yml
@@ -1,6 +1,6 @@
 ---
 required_pkgs:
-  - "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}"
+  - "{{ ((ansible_distribution_major_version | int) < 8) | ternary('libselinux-python', 'python3-libselinux') }}"
   - device-mapper-libs
   - nss
   - conntrack
diff --git a/roles/kubernetes/tokens/tasks/check-tokens.yml b/roles/kubernetes/tokens/tasks/check-tokens.yml
index ae75f0d04a512ce8d0351aae7509c5e0a226bc09..a157a0597ee3f61d9dd41521577474511dd24b01 100644
--- a/roles/kubernetes/tokens/tasks/check-tokens.yml
+++ b/roles/kubernetes/tokens/tasks/check-tokens.yml
@@ -17,7 +17,7 @@
 - name: "Check_tokens | Set 'sync_tokens' and 'gen_tokens' to true"
   set_fact:
     gen_tokens: true
-  when: not known_tokens_master.stat.exists and kube_token_auth|default(true)
+  when: not known_tokens_master.stat.exists and kube_token_auth | default(true)
   run_once: true
 
 - name: "Check tokens | check if a cert already exists"
@@ -34,7 +34,7 @@
       {%- set tokens = {'sync': False} -%}
       {%- for server in groups['kube_control_plane'] | intersect(ansible_play_batch)
         if (not hostvars[server].known_tokens.stat.exists) or
-        (hostvars[server].known_tokens.stat.checksum|default('') != known_tokens_master.stat.checksum|default('')) -%}
+        (hostvars[server].known_tokens.stat.checksum | default('') != known_tokens_master.stat.checksum | default('')) -%}
         {%- set _ = tokens.update({'sync': True}) -%}
       {%- endfor -%}
       {{ tokens.sync }}
diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml
index e80e56d6fe5440a28e9f88c1c0182b2d908cc746..6ac6b4907fc308c7f8813c3f83c74916634776fb 100644
--- a/roles/kubernetes/tokens/tasks/gen_tokens.yml
+++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml
@@ -6,7 +6,7 @@
     mode: 0700
   run_once: yes
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
-  when: gen_tokens|default(false)
+  when: gen_tokens | default(false)
 
 - name: Gen_tokens | generate tokens for master components
   command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
@@ -19,7 +19,7 @@
   changed_when: "'Added' in gentoken_master.stdout"
   run_once: yes
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
-  when: gen_tokens|default(false)
+  when: gen_tokens | default(false)
 
 - name: Gen_tokens | generate tokens for node components
   command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
@@ -32,7 +32,7 @@
   changed_when: "'Added' in gentoken_node.stdout"
   run_once: yes
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
-  when: gen_tokens|default(false)
+  when: gen_tokens | default(false)
 
 - name: Gen_tokens | Get list of tokens from first master
   command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
@@ -40,7 +40,7 @@
   check_mode: no
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   run_once: true
-  when: sync_tokens|default(false)
+  when: sync_tokens | default(false)
 
 - name: Gen_tokens | Gather tokens
   shell: "set -o pipefail && tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0"
@@ -50,14 +50,14 @@
   check_mode: no
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   run_once: true
-  when: sync_tokens|default(false)
+  when: sync_tokens | default(false)
 
 - name: Gen_tokens | Copy tokens on masters
-  shell: "set -o pipefail && echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /"
+  shell: "set -o pipefail && echo '{{ tokens_data.stdout | quote }}' | base64 -d | tar xz -C /"
   args:
     executable: /bin/bash
   when:
     - inventory_hostname in groups['kube_control_plane']
-    - sync_tokens|default(false)
+    - sync_tokens | default(false)
     - inventory_hostname != groups['kube_control_plane'][0]
     - tokens_data.stdout
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index d32dd3a5aae29bfdfc0fd4056a72c17a16212e08..7055462f0b2a0c9fa4be5543fc8c76781ca4f8f5 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -21,7 +21,7 @@ kube_version: v1.26.6
 ## The minimum version working
 kube_version_min_required: v1.25.0
 
-## Kube Proxy mode One of ['iptables','ipvs']
+## Kube Proxy mode One of ['iptables', 'ipvs']
 kube_proxy_mode: ipvs
 
 ## The timeout for init first control-plane
@@ -33,13 +33,13 @@ kubeadm_init_timeout: 300s
 kubeadm_init_phases_skip_default: [ "addon/coredns" ]
 kubeadm_init_phases_skip: >-
   {%- if kube_network_plugin == 'kube-router' and (kube_router_run_service_proxy is defined and kube_router_run_service_proxy) -%}
-  {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }}
+  {{ kubeadm_init_phases_skip_default + ["addon/kube-proxy"] }}
   {%- elif kube_network_plugin == 'cilium' and (cilium_kube_proxy_replacement is defined and cilium_kube_proxy_replacement == 'strict') -%}
-  {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }}
+  {{ kubeadm_init_phases_skip_default + ["addon/kube-proxy"] }}
   {%- elif kube_network_plugin == 'calico' and (calico_bpf_enabled is defined and calico_bpf_enabled) -%}
-  {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }}
+  {{ kubeadm_init_phases_skip_default + ["addon/kube-proxy"] }}
   {%- elif kube_proxy_remove is defined and kube_proxy_remove -%}
-  {{ kubeadm_init_phases_skip_default + [ "addon/kube-proxy" ] }}
+  {{ kubeadm_init_phases_skip_default + ["addon/kube-proxy"] }}
   {%- else -%}
   {{ kubeadm_init_phases_skip_default }}
   {%- endif -%}
@@ -116,19 +116,19 @@ resolvconf_mode: host_resolvconf
 # Deploy netchecker app to verify DNS resolve as an HTTP service
 deploy_netchecker: false
 # Ip address of the kubernetes DNS service (called skydns for historical reasons)
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
+skydns_server: "{{ kube_service_addresses | ipaddr('net') | ipaddr(3) | ipaddr('address') }}"
+skydns_server_secondary: "{{ kube_service_addresses | ipaddr('net') | ipaddr(4) | ipaddr('address') }}"
 dns_domain: "{{ cluster_name }}"
 docker_dns_search_domains:
 - 'default.svc.{{ dns_domain }}'
 - 'svc.{{ dns_domain }}'
 
 kube_dns_servers:
-  coredns: ["{{skydns_server}}"]
-  coredns_dual: "{{[skydns_server] + [ skydns_server_secondary ]}}"
-  manual: ["{{manual_dns_server}}"]
+  coredns: ["{{ skydns_server }}"]
+  coredns_dual: "{{ [skydns_server] + [skydns_server_secondary] }}"
+  manual: ["{{ manual_dns_server }}"]
 
-dns_servers: "{{kube_dns_servers[dns_mode]}}"
+dns_servers: "{{ kube_dns_servers[dns_mode] }}"
 
 enable_coredns_k8s_external: false
 coredns_k8s_external_zone: k8s_external.local
@@ -179,7 +179,7 @@ kube_network_plugin: calico
 kube_network_plugin_multus: false
 
 # Determines if calico_rr group exists
-peer_with_calico_rr: "{{ 'calico_rr' in groups and groups['calico_rr']|length > 0 }}"
+peer_with_calico_rr: "{{ 'calico_rr' in groups and groups['calico_rr'] | length > 0 }}"
 
 # Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
 calico_datastore: "kdd"
@@ -232,10 +232,10 @@ kube_network_node_prefix_ipv6: 120
 # listening on.
 # NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint
 # access IP value (automatically evaluated below)
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_ip: "{{ kube_service_addresses | ipaddr('net') | ipaddr(1) | ipaddr('address') }}"
 
 # NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
-# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} too.
+# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
 kube_apiserver_bind_address: 0.0.0.0
 
 # https
@@ -243,7 +243,7 @@ kube_apiserver_port: 6443
 
 # If non-empty, will use this string as identification instead of the actual hostname
 kube_override_hostname: >-
-  {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
+  {%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
   {%- else -%}
   {{ inventory_hostname }}
   {%- endif -%}
@@ -443,7 +443,7 @@ openstack_lbaas_create_monitor: "yes"
 openstack_lbaas_monitor_delay: "1m"
 openstack_lbaas_monitor_timeout: "30s"
 openstack_lbaas_monitor_max_retries: "3"
-openstack_cacert: "{{ lookup('env','OS_CACERT') }}"
+openstack_cacert: "{{ lookup('env', 'OS_CACERT') }}"
 
 # Default values for the external OpenStack Cloud Controller
 external_openstack_lbaas_enabled: true
@@ -509,7 +509,7 @@ kubeadm_feature_gates: []
 local_volume_provisioner_storage_classes: |
   {
     "{{ local_volume_provisioner_storage_class | default('local-storage') }}": {
-      "host_dir": "{{ local_volume_provisioner_base_dir | default ('/mnt/disks') }}",
+      "host_dir": "{{ local_volume_provisioner_base_dir | default('/mnt/disks') }}",
       "mount_dir": "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}",
       "volume_mode": "Filesystem",
       "fs_type": "ext4"
@@ -546,7 +546,7 @@ loadbalancer_apiserver_type: "nginx"
 apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
 kube_apiserver_global_endpoint: |-
   {% if loadbalancer_apiserver is defined -%}
-      https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
+      https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
   {%- elif loadbalancer_apiserver_localhost and (loadbalancer_apiserver_port is not defined or loadbalancer_apiserver_port == kube_apiserver_port) -%}
       https://localhost:{{ kube_apiserver_port }}
   {%- else -%}
@@ -554,11 +554,11 @@ kube_apiserver_global_endpoint: |-
   {%- endif %}
 kube_apiserver_endpoint: |-
   {% if loadbalancer_apiserver is defined -%}
-      https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
+      https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
   {%- elif not is_kube_master and loadbalancer_apiserver_localhost -%}
-      https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
+      https://localhost:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }}
   {%- elif is_kube_master -%}
-      https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }}
+      https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0', '127.0.0.1') }}:{{ kube_apiserver_port }}
   {%- else -%}
       https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
   {%- endif %}
@@ -594,20 +594,20 @@ etcd_metrics_addresses: |-
   {% for item in etcd_hosts -%}
     https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}:{{ etcd_metrics_port | default(2381) }}{% if not loop.last %},{% endif %}
   {%- endfor %}
-etcd_events_access_addresses: "{{etcd_events_access_addresses_list | join(',')}}"
-etcd_events_access_addresses_semicolon: "{{etcd_events_access_addresses_list | join(';')}}"
+etcd_events_access_addresses: "{{ etcd_events_access_addresses_list | join(',') }}"
+etcd_events_access_addresses_semicolon: "{{ etcd_events_access_addresses_list | join(';') }}"
 # user should set etcd_member_name in inventory/mycluster/hosts.ini
 etcd_member_name: |-
   {% for host in groups['etcd'] %}
-  {%   if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index|string) }}{% endif %}
+  {%   if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index | string) }}{% endif %}
   {% endfor %}
 etcd_peer_addresses: |-
   {% for item in groups['etcd'] -%}
-    {{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}=https://{{ hostvars[item].etcd_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2380{% if not loop.last %},{% endif %}
+    {{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}=https://{{ hostvars[item].etcd_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2380{% if not loop.last %},{% endif %}
   {%- endfor %}
 etcd_events_peer_addresses: |-
   {% for item in groups['etcd'] -%}
-    {{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}-events=https://{{ hostvars[item].etcd_events_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2382{% if not loop.last %},{% endif %}
+    {{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}-events=https://{{ hostvars[item].etcd_events_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2382{% if not loop.last %},{% endif %}
   {%- endfor %}
 
 podsecuritypolicy_enabled: false
@@ -653,16 +653,16 @@ host_os: >-
 kubelet_event_record_qps: 5
 
 proxy_env_defaults:
-  http_proxy: "{{ http_proxy | default ('') }}"
-  HTTP_PROXY: "{{ http_proxy | default ('') }}"
-  https_proxy: "{{ https_proxy | default ('') }}"
-  HTTPS_PROXY: "{{ https_proxy | default ('') }}"
-  no_proxy: "{{ no_proxy | default ('') }}"
-  NO_PROXY: "{{ no_proxy | default ('') }}"
+  http_proxy: "{{ http_proxy | default('') }}"
+  HTTP_PROXY: "{{ http_proxy | default('') }}"
+  https_proxy: "{{ https_proxy | default('') }}"
+  HTTPS_PROXY: "{{ https_proxy | default('') }}"
+  no_proxy: "{{ no_proxy | default('') }}"
+  NO_PROXY: "{{ no_proxy | default('') }}"
 
 # If we use SSL_CERT_FILE: {{ omit }} it cause in value __omit_place_holder__ and break environments
 # Combine dict is avoiding the problem with omit placeholder. Maybe it can be better solution?
-proxy_env: "{{ proxy_env_defaults | combine({ 'SSL_CERT_FILE': https_proxy_cert_file }) if https_proxy_cert_file is defined else proxy_env_defaults }}"
+proxy_env: "{{ proxy_env_defaults | combine({'SSL_CERT_FILE': https_proxy_cert_file}) if https_proxy_cert_file is defined else proxy_env_defaults }}"
 
 proxy_disable_env:
   ALL_PROXY: ''
diff --git a/roles/kubespray-defaults/tasks/fallback_ips.yml b/roles/kubespray-defaults/tasks/fallback_ips.yml
index d42faee8f0a022b42e2133523b39b914954c3acc..86b0bd7f9466eab8c6c419bb7d8f3678bef10c11 100644
--- a/roles/kubespray-defaults/tasks/fallback_ips.yml
+++ b/roles/kubespray-defaults/tasks/fallback_ips.yml
@@ -10,7 +10,7 @@
   delegate_to: "{{ item }}"
   delegate_facts: yes
   when: hostvars[item].ansible_default_ipv4 is not defined
-  loop: "{{ (groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([])) | unique }}"
+  loop: "{{ (groups['k8s_cluster'] | default([]) + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique }}"
   run_once: yes
   tags: always
 
@@ -18,7 +18,7 @@
   set_fact:
     fallback_ips_base: |
       ---
-      {% for item in (groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique %}
+      {% for item in (groups['k8s_cluster'] | default([]) + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique %}
       {% set found = hostvars[item].get('ansible_default_ipv4') %}
       {{ item }}: "{{ found.get('address', '127.0.0.1') }}"
       {% endfor %}
diff --git a/roles/kubespray-defaults/tasks/no_proxy.yml b/roles/kubespray-defaults/tasks/no_proxy.yml
index 6e6a5c9bbc7b02a55eeec541523cd1b78472c967..d2d5cc6d1e1c955a26a52480668b4087e13620c2 100644
--- a/roles/kubespray-defaults/tasks/no_proxy.yml
+++ b/roles/kubespray-defaults/tasks/no_proxy.yml
@@ -1,9 +1,10 @@
 ---
 - name: Set no_proxy to all assigned cluster IPs and hostnames
   set_fact:
+    # noqa: jinja[spacing]
     no_proxy_prepare: >-
       {%- if loadbalancer_apiserver is defined -%}
-      {{ apiserver_loadbalancer_domain_name| default('') }},
+      {{ apiserver_loadbalancer_domain_name | default('') }},
       {{ loadbalancer_apiserver.address | default('') }},
       {%- endif -%}
       {%- if no_proxy_exclude_workers | default(false) -%}
@@ -11,12 +12,12 @@
       {%- else -%}
       {% set cluster_or_master = 'k8s_cluster' %}
       {%- endif -%}
-      {%- for item in (groups[cluster_or_master] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%}
+      {%- for item in (groups[cluster_or_master] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
       {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }},
-      {%-   if item != hostvars[item].get('ansible_hostname', '') -%}
+      {%- if item != hostvars[item].get('ansible_hostname', '') -%}
       {{ hostvars[item]['ansible_hostname'] }},
       {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
-      {%-   endif -%}
+      {%- endif -%}
       {{ item }},{{ item }}.{{ dns_domain }},
       {%- endfor -%}
       {%- if additional_no_proxy is defined -%}
@@ -32,7 +33,8 @@
 - name: Populates no_proxy to all hosts
   set_fact:
     no_proxy: "{{ hostvars.localhost.no_proxy_prepare }}"
+    # noqa: jinja[spacing]
     proxy_env: "{{ proxy_env | combine({
-      'no_proxy': hostvars.localhost.no_proxy_prepare,
-      'NO_PROXY': hostvars.localhost.no_proxy_prepare
-    }) }}"
+        'no_proxy': hostvars.localhost.no_proxy_prepare,
+        'NO_PROXY': hostvars.localhost.no_proxy_prepare
+      }) }}"
diff --git a/roles/network_plugin/calico/rr/tasks/update-node.yml b/roles/network_plugin/calico/rr/tasks/update-node.yml
index 930429139ca7154d4330cff29dffbf5918541c07..59841148cdea653b2ce67410d36b9397924b712e 100644
--- a/roles/network_plugin/calico/rr/tasks/update-node.yml
+++ b/roles/network_plugin/calico/rr/tasks/update-node.yml
@@ -4,7 +4,7 @@
 - block:
   - name: Set the retry count
     set_fact:
-      retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}"
+      retry_count: "{{ 0 if retry_count is undefined else retry_count | int + 1 }}"
 
   - name: Calico | Set label for route reflector  # noqa command-instead-of-shell
     shell: "{{ bin_dir }}/calicoctl.sh label node  {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite"
@@ -24,6 +24,7 @@
     retries: 10
 
   - name: Calico-rr | Set route reflector cluster ID
+    # noqa: jinja[spacing]
     set_fact:
       calico_rr_node_patched: >-
         {{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
@@ -38,7 +39,7 @@
   - name: Fail if retry limit is reached
     fail:
       msg: Ended after 10 retries
-    when: retry_count|int == 10
+    when: retry_count | int == 10
 
   - name: Retrying node configuration
     debug:
diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml
index 0ea75241807b47af8612d725c37266c9d39dd56e..8506e4499d16737e2f2c92ac7684e17940cbc762 100644
--- a/roles/network_plugin/calico/tasks/check.yml
+++ b/roles/network_plugin/calico/tasks/check.yml
@@ -168,7 +168,7 @@
 - name: "Check if inventory match current cluster configuration"
   assert:
     that:
-      - calico_pool_conf.spec.blockSize|int == (calico_pool_blocksize | default(kube_network_node_prefix) | int)
+      - calico_pool_conf.spec.blockSize | int == (calico_pool_blocksize | default(kube_network_node_prefix) | int)
       - calico_pool_conf.spec.cidr == (calico_pool_cidr | default(kube_pods_subnet))
       - not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode
       - not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode
diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml
index 7d509f90ee5cbe21ff856268a68a1572fbd0dd0d..6dbcc3170c2d1618b62f08c286e676ff41b0dec8 100644
--- a/roles/network_plugin/calico/tasks/install.yml
+++ b/roles/network_plugin/calico/tasks/install.yml
@@ -122,7 +122,7 @@
 - block:
     - name: Calico | Check if extra directory is needed
       stat:
-        path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('v3.22.3','<')) else 'crd' }}"
+        path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('v3.22.3', '<')) else 'crd' }}"
       register: kdd_path
     - name: Calico | Set kdd path when calico < v3.22.3
       set_fact:
@@ -196,7 +196,7 @@
     - name: Calico | Configure calico FelixConfiguration
       command:
         cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
-        stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config|to_json) }}"
+        stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config | to_json) }}"
       changed_when: False
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
@@ -222,7 +222,7 @@
               "cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}",
               "ipipMode": "{{ calico_ipip_mode }}",
               "vxlanMode": "{{ calico_vxlan_mode }}",
-              "natOutgoing": {{ nat_outgoing|default(false) }}
+              "natOutgoing": {{ nat_outgoing | default(false) }}
             }
           }
 
@@ -235,7 +235,7 @@
     - name: Calico | Configure calico network pool
       command:
         cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
-        stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool|to_json) }}"
+        stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool | to_json) }}"
       changed_when: False
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
@@ -261,7 +261,7 @@
               "cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}",
               "ipipMode": "{{ calico_ipip_mode_ipv6 }}",
               "vxlanMode": "{{ calico_vxlan_mode_ipv6 }}",
-              "natOutgoing": {{ nat_outgoing_ipv6|default(false) }}
+              "natOutgoing": {{ nat_outgoing_ipv6 | default(false) }}
             }
           }
 
@@ -274,7 +274,7 @@
     - name: Calico | Configure calico ipv6 network pool
       command:
         cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
-        stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6|to_json) }}"
+        stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6 | to_json) }}"
       changed_when: False
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
@@ -282,13 +282,13 @@
 
 - name: Populate Service External IPs
   set_fact:
-    _service_external_ips: "{{ _service_external_ips|default([]) + [ {'cidr': item} ] }}"
+    _service_external_ips: "{{ _service_external_ips | default([]) + [{'cidr': item}] }}"
   with_items: "{{ calico_advertise_service_external_ips }}"
   run_once: yes
 
 - name: Populate Service LoadBalancer IPs
   set_fact:
-    _service_loadbalancer_ips: "{{ _service_loadbalancer_ips|default([]) + [ {'cidr': item} ] }}"
+    _service_loadbalancer_ips: "{{ _service_loadbalancer_ips | default([]) + [{'cidr': item}] }}"
   with_items: "{{ calico_advertise_service_loadbalancer_ips }}"
   run_once: yes
 
@@ -296,7 +296,7 @@
   set_fact:
     nodeToNodeMeshEnabled: "false"
   when:
-    - peer_with_router|default(false) or peer_with_calico_rr|default(false)
+    - peer_with_router | default(false) or peer_with_calico_rr | default(false)
     - inventory_hostname in groups['k8s_cluster']
   run_once: yes
 
@@ -309,6 +309,7 @@
 
     - name: Calico | Set kubespray BGP Configuration
       set_fact:
+        # noqa: jinja[spacing]
         _bgp_config: >
           {
             "kind": "BGPConfiguration",
@@ -319,12 +320,12 @@
             "spec": {
               "listenPort": {{ calico_bgp_listen_port }},
               "logSeverityScreen": "Info",
-              {% if not calico_no_global_as_num|default(false) %}"asNumber": {{ global_as_num }},{% endif %}
-              "nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled|default('true') }} ,
-              {% if calico_advertise_cluster_ips|default(false) %}
+              {% if not calico_no_global_as_num | default(false) %}"asNumber": {{ global_as_num }},{% endif %}
+              "nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled | default('true') }} ,
+              {% if calico_advertise_cluster_ips | default(false) %}
               "serviceClusterIPs": [{"cidr": "{{ kube_service_addresses }}" } {{ ',{"cidr":"' + kube_service_addresses_ipv6 + '"}' if enable_dual_stack_networks else '' }}],{% endif %}
-              {% if calico_advertise_service_loadbalancer_ips|length > 0  %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %}
-              "serviceExternalIPs": {{ _service_external_ips|default([]) }}
+              {% if calico_advertise_service_loadbalancer_ips | length > 0  %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %}
+              "serviceExternalIPs": {{ _service_external_ips | default([]) }}
             }
           }
 
@@ -337,7 +338,7 @@
     - name: Calico | Set up BGP Configuration
       command:
         cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
-        stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config|to_json) }}"
+        stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config | to_json) }}"
       changed_when: False
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
@@ -464,8 +465,8 @@
 
 - include_tasks: peer_with_calico_rr.yml
   when:
-    - peer_with_calico_rr|default(false)
+    - peer_with_calico_rr | default(false)
 
 - include_tasks: peer_with_router.yml
   when:
-    - peer_with_router|default(false)
+    - peer_with_router | default(false)
diff --git a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml
index 5e6010ced38edd98b5dfd480b89ae1740ace73c0..9d216bd2073bfa6c6dfe727f89896179c5105020 100644
--- a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml
+++ b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml
@@ -13,7 +13,7 @@
   command:
     cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
     # revert when it's already a string
-    stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
+    stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}"
   vars:
     stdin: >
       {"apiVersion": "projectcalico.org/v3",
@@ -38,7 +38,7 @@
   command:
     cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
     # revert when it's already a string
-    stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
+    stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}"
   vars:
     stdin: >
       {"apiVersion": "projectcalico.org/v3",
@@ -64,7 +64,7 @@
   command:
     cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
     # revert when it's already a string
-    stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
+    stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}"
   vars:
     stdin: >
       {"apiVersion": "projectcalico.org/v3",
diff --git a/roles/network_plugin/calico/tasks/peer_with_router.yml b/roles/network_plugin/calico/tasks/peer_with_router.yml
index a698ed1da9b12ba8d0542150bba49f76fa620a52..a29ca36ddf96a922d4aba2dfc2ac867f1b19b28e 100644
--- a/roles/network_plugin/calico/tasks/peer_with_router.yml
+++ b/roles/network_plugin/calico/tasks/peer_with_router.yml
@@ -2,13 +2,13 @@
 - name: Calico | Configure peering with router(s) at global scope
   command:
     cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
-    stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
+    stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}"
   vars:
     stdin: >
       {"apiVersion": "projectcalico.org/v3",
       "kind": "BGPPeer",
       "metadata": {
-        "name": "global-{{ item.name | default(item.router_id|replace(':','-')) }}"
+        "name": "global-{{ item.name | default(item.router_id | replace(':', '-')) }}"
       },
       "spec": {
         "asNumber": "{{ item.as }}",
@@ -19,14 +19,14 @@
   until: output.rc == 0
   delay: "{{ retry_stagger | random + 3 }}"
   with_items:
-    - "{{ peers|selectattr('scope','defined')|selectattr('scope','equalto', 'global')|list|default([]) }}"
+    - "{{ peers | selectattr('scope', 'defined') | selectattr('scope', 'equalto', 'global') | list | default([]) }}"
   when:
     - inventory_hostname == groups['kube_control_plane'][0]
 
 - name: Calico | Configure node asNumber for per node peering
   command:
     cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
-    stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
+    stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}"
   vars:
     stdin: >
       {"apiVersion": "projectcalico.org/v3",
@@ -52,26 +52,26 @@
 - name: Calico | Configure peering with router(s) at node scope
   command:
     cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
-    stdin: "{{ stdin is string | ternary(stdin, stdin|to_json) }}"
+    stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}"
   vars:
     stdin: >
       {"apiVersion": "projectcalico.org/v3",
       "kind": "BGPPeer",
       "metadata": {
-        "name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id|replace(':','-')) }}"
+        "name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id | replace(':', '-')) }}"
       },
       "spec": {
         "asNumber": "{{ item.as }}",
         "node": "{{ inventory_hostname }}",
         "peerIP": "{{ item.router_id }}",
-        "sourceAddress": "{{ item.sourceaddress|default('UseNodeIP') }}"
+        "sourceAddress": "{{ item.sourceaddress | default('UseNodeIP') }}"
       }}
   register: output
   retries: 4
   until: output.rc == 0
   delay: "{{ retry_stagger | random + 3 }}"
   with_items:
-    - "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}"
+    - "{{ peers | selectattr('scope', 'undefined') | list | default([]) | union(peers | selectattr('scope', 'defined') | selectattr('scope', 'equalto', 'node') | list | default([])) }}"
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when:
     - inventory_hostname in groups['k8s_cluster']
diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml
index 162aca150dd21b6ab1f305bf87765913a109e991..fc87769f028861df943c0dc6b15abfcd705281b1 100644
--- a/roles/network_plugin/calico/tasks/pre.yml
+++ b/roles/network_plugin/calico/tasks/pre.yml
@@ -32,12 +32,12 @@
   include_vars: "{{ item }}"
   with_first_found:
   - files:
-    - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
-    - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
-    - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
-    - "{{ ansible_distribution|lower }}.yml"
-    - "{{ ansible_os_family|lower }}-{{ ansible_architecture }}.yml"
-    - "{{ ansible_os_family|lower }}.yml"
+    - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
+    - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml"
+    - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
+    - "{{ ansible_distribution | lower }}.yml"
+    - "{{ ansible_os_family | lower }}-{{ ansible_architecture }}.yml"
+    - "{{ ansible_os_family | lower }}.yml"
     - defaults.yml
     paths:
     - ../vars
diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2
index 0379b62712eec681d3bb4ca7025e0c96b3e8fb40..4012ef784c342204146e43f842fd161942d19c85 100644
--- a/roles/network_plugin/calico/templates/calico-config.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-config.yml.j2
@@ -22,8 +22,8 @@ data:
   cluster_type: "kubespray"
   calico_backend: "{{ calico_network_backend }}"
 {% endif %}
-{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router|default(false) %}
-  as: "{{ local_as|default(global_as_num) }}"
+{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router | default(false) %}
+  as: "{{ local_as | default(global_as_num) }}"
 {% endif -%}
   # The CNI network configuration to install on each node. The special
   # values in this config will be automatically populated.
@@ -73,7 +73,7 @@ data:
               "allow_ip_forwarding": true
             },
           {% endif %}
-          {% if (calico_feature_control is defined) and (calico_feature_control|length > 0) %}
+          {% if (calico_feature_control is defined) and (calico_feature_control | length > 0) %}
             "feature_control": {
               {% for fc in calico_feature_control -%}
               {% set fcval = calico_feature_control[fc] -%}
diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2
index bd6c63c2432fab35885d17386d04ff51e44d7a5c..4e49f3bc4d90ddb54d4d8ddc75f66dae74f2763c 100644
--- a/roles/network_plugin/calico/templates/calico-node.yml.j2
+++ b/roles/network_plugin/calico/templates/calico-node.yml.j2
@@ -211,7 +211,7 @@ spec:
               value: "true"
             # Set Felix endpoint to host default action to ACCEPT.
             - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
-              value: "{{ calico_endpoint_to_host_action|default('RETURN') }}"
+              value: "{{ calico_endpoint_to_host_action | default('RETURN') }}"
             - name: FELIX_HEALTHHOST
               value: "{{ calico_healthhost }}"
 {% if kube_proxy_mode == 'ipvs' and kube_apiserver_node_port_range is defined %}
@@ -286,7 +286,7 @@ spec:
             - name: IP6
               value: autodetect
 {% endif %}
-{% if calico_use_default_route_src_ipaddr|default(false) %}
+{% if calico_use_default_route_src_ipaddr | default(false) %}
             - name: FELIX_DEVICEROUTESOURCEADDRESS
               valueFrom:
                 fieldRef:
diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml
index b6f68c9c01bc6ec9b5d9f8a81c37965538910716..f4c70e4795bb3c1a58bec7a7a930864020fd4bc8 100644
--- a/roles/network_plugin/cilium/defaults/main.yml
+++ b/roles/network_plugin/cilium/defaults/main.yml
@@ -8,7 +8,7 @@ cilium_enable_ipv4: true
 cilium_enable_ipv6: false
 
 # Cilium agent health port
-cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879 {%- else -%} 9876 {%- endif -%}"
+cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879{%- else -%}9876{%- endif -%}"
 
 # Identity allocation mode selects how identities are shared between cilium
 # nodes by setting how they are stored. The options are "crd" or "kvstore".
diff --git a/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 b/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2
index 200b9efe64716475cc4a42323f79c167ebe0d10e..14189656f4da2383eabaf318315c259085f800f1 100644
--- a/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2
@@ -124,7 +124,7 @@ spec:
               mountPath: /var/lib/etcd-config
               readOnly: true
             - name: etcd-secrets
-              mountPath: "{{cilium_cert_dir}}"
+              mountPath: "{{ cilium_cert_dir }}"
               readOnly: true
 {% endif %}
 {% for volume_mount in cilium_operator_extra_volume_mounts %}
@@ -163,7 +163,7 @@ spec:
           # To read the k8s etcd secrets in case the user might want to use TLS
         - name: etcd-secrets
           hostPath:
-            path: "{{cilium_cert_dir}}"
+            path: "{{ cilium_cert_dir }}"
 {% endif %}
 {% for volume in cilium_operator_extra_volumes %}
         - {{ volume | to_nice_yaml(indent=2) | indent(10) }}
diff --git a/roles/network_plugin/cilium/templates/cilium/config.yml.j2 b/roles/network_plugin/cilium/templates/cilium/config.yml.j2
index 700dd0841d09d7604ce6e341b59b6fd9ede2c750..399d8ced87ed0b3ebc69c040f5236c01b1cf10c1 100644
--- a/roles/network_plugin/cilium/templates/cilium/config.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium/config.yml.j2
@@ -104,7 +104,7 @@ data:
   #
   # If this option is set to "false" during an upgrade from 1.3 or earlier to
   # 1.4 or later, then it may cause one-time disruptions during the upgrade.
-  preallocate-bpf-maps: "{{cilium_preallocate_bpf_maps}}"
+  preallocate-bpf-maps: "{{ cilium_preallocate_bpf_maps }}"
 
   # Regular expression matching compatible Istio sidecar istio-proxy
   # container image names
@@ -251,6 +251,6 @@ data:
 {% for cidr in cilium_non_masquerade_cidrs %}
       - {{ cidr }}
 {% endfor %}
-    masqLinkLocal: {{ cilium_masq_link_local|bool }}
+    masqLinkLocal: {{ cilium_masq_link_local | bool }}
     resyncInterval: "{{ cilium_ip_masq_resync_interval }}"
 {% endif %}
diff --git a/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium/ds.yml.j2
index 13c5d8465e7ed65a94a66f2b8876fb3ce5ba836c..38360342b3b815f2690f394ec4fb3d9e46630a9e 100644
--- a/roles/network_plugin/cilium/templates/cilium/ds.yml.j2
+++ b/roles/network_plugin/cilium/templates/cilium/ds.yml.j2
@@ -28,7 +28,7 @@ spec:
     spec:
       containers:
       - name: cilium-agent
-        image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
+        image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
         imagePullPolicy: {{ k8s_image_pull_policy }}
         command:
         - cilium-agent
@@ -160,7 +160,7 @@ spec:
           mountPath: /var/lib/etcd-config
           readOnly: true
         - name: etcd-secrets
-          mountPath: "{{cilium_cert_dir}}"
+          mountPath: "{{ cilium_cert_dir }}"
           readOnly: true
 {% endif %}
         - name: clustermesh-secrets
@@ -201,7 +201,7 @@ spec:
       initContainers:
 {% if cilium_version | regex_replace('v') is version('1.11', '>=') and cilium_cgroup_auto_mount %}
       - name: mount-cgroup
-        image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
+        image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
         imagePullPolicy: {{ k8s_image_pull_policy }}
         env:
         - name: CGROUP_ROOT
@@ -230,7 +230,7 @@ spec:
 {% endif %}
 {% if cilium_version | regex_replace('v') is version('1.11.7', '>=') %}
       - name: apply-sysctl-overwrites
-        image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
+        image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
         imagePullPolicy: {{ k8s_image_pull_policy }}
         env:
         - name: BIN_PATH
@@ -256,7 +256,7 @@ spec:
           privileged: true
 {% endif %}
       - name: clean-cilium-state
-        image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
+        image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
         imagePullPolicy: {{ k8s_image_pull_policy }}
         command:
         - /init-container.sh
@@ -309,7 +309,7 @@ spec:
 {% if cilium_version | regex_replace('v') is version('1.13.1', '>=') %}
       # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
       - name: install-cni-binaries
-        image: "{{cilium_image_repo}}:{{cilium_image_tag}}"
+        image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}"
         imagePullPolicy: {{ k8s_image_pull_policy }}
         command:
           - "/install-plugin.sh"
@@ -398,7 +398,7 @@ spec:
         # To read the k8s etcd secrets in case the user might want to use TLS
       - name: etcd-secrets
         hostPath:
-          path: "{{cilium_cert_dir}}"
+          path: "{{ cilium_cert_dir }}"
 {% endif %}
         # To read the clustermesh configuration
       - name: clustermesh-secrets
diff --git a/roles/network_plugin/flannel/defaults/main.yml b/roles/network_plugin/flannel/defaults/main.yml
index cd1dcf16d72f8959f47d6bb5dece48a403bbf4d5..8d7713bb92d420fecbd5ed2a6a577d375c424ca2 100644
--- a/roles/network_plugin/flannel/defaults/main.yml
+++ b/roles/network_plugin/flannel/defaults/main.yml
@@ -2,7 +2,7 @@
 # Flannel public IP
 # The address that flannel should advertise as how to access the system
 # Disabled until https://github.com/coreos/flannel/issues/712 is fixed
-# flannel_public_ip: "{{ access_ip|default(ip|default(fallback_ips[inventory_hostname])) }}"
+# flannel_public_ip: "{{ access_ip | default(ip | default(fallback_ips[inventory_hostname])) }}"
 
 ## interface that should be used for flannel operations
 ## This is actually an inventory cluster-level item
diff --git a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2
index 472cea21919311cbcf207484e88c427b448b65d3..cee7ccbf4a60c12d58c3eeef9a43eef5ce0ae2e8 100644
--- a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2
+++ b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2
@@ -42,23 +42,23 @@ spec:
           imagePullPolicy: {{ k8s_image_pull_policy }}
           args:
           - /kube-ovn/start-controller.sh
-          - --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{''}}
-          - --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{''}}
-          - --default-gateway-check={{ kube_ovn_default_gateway_check|string }}
-          - --default-logical-gateway={{ kube_ovn_default_logical_gateway|string }}
+          - --default-cidr={{ kube_pods_subnet }}{% if enable_dual_stack_networks %},{{ kube_ovn_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}{% endif %}{{ '' }}
+          - --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{ '' }}
+          - --default-gateway-check={{ kube_ovn_default_gateway_check | string }}
+          - --default-logical-gateway={{ kube_ovn_default_logical_gateway | string }}
           - --default-u2o-interconnection={{ kube_ovn_u2o_interconnection }}
-          - --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{''}}
-          - --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{''}}
-          - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{''}}
+          - --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{ '' }}
+          - --node-switch-cidr={{ kube_ovn_node_switch_cidr }}{% if enable_dual_stack_networks %},{{ kube_ovn_node_switch_cidr_ipv6 }}{% endif %}{{ '' }}
+          - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }}
           - --network-type={{ kube_ovn_network_type }}
-          - --default-interface-name={{ kube_ovn_default_interface_name|default('') }}
+          - --default-interface-name={{ kube_ovn_default_interface_name | default('') }}
           - --default-vlan-id={{ kube_ovn_default_vlan_id }}
           - --ls-dnat-mod-dl-dst={{ kube_ovn_ls_dnat_mod_dl_dst }}
           - --pod-nic-type={{ kube_ovn_pod_nic_type }}
-          - --enable-lb={{ kube_ovn_enable_lb|string }}
-          - --enable-np={{ kube_ovn_enable_np|string }}
+          - --enable-lb={{ kube_ovn_enable_lb | string }}
+          - --enable-np={{ kube_ovn_enable_np | string }}
           - --enable-eip-snat={{ kube_ovn_eip_snat_enabled }}
-          - --enable-external-vpc={{ kube_ovn_enable_external_vpc|string }}
+          - --enable-external-vpc={{ kube_ovn_enable_external_vpc | string }}
           - --logtostderr=false
           - --alsologtostderr=true
           - --gc-interval=360
@@ -187,11 +187,11 @@ spec:
         args:
           - --enable-mirror={{ kube_ovn_traffic_mirror | lower }}
           - --encap-checksum={{ kube_ovn_encap_checksum | lower }}
-          - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{''}}
-          - --iface={{ kube_ovn_iface|default('') }}
+          - --service-cluster-ip-range={{ kube_service_addresses }}{% if enable_dual_stack_networks %},{{ kube_service_addresses_ipv6 }}{% endif %}{{ '' }}
+          - --iface={{ kube_ovn_iface | default('') }}
           - --dpdk-tunnel-iface={{ kube_ovn_dpdk_tunnel_iface }}
           - --network-type={{ kube_ovn_network_type }}
-          - --default-interface-name={{ kube_ovn_default_interface_name|default('') }}
+          - --default-interface-name={{ kube_ovn_default_interface_name | default('') }}
           {% if kube_ovn_mtu is defined %}
           - --mtu={{ kube_ovn_mtu }}
 {% endif %}
@@ -359,7 +359,7 @@ spec:
           command:
           - /kube-ovn/kube-ovn-pinger
           args:
-          - --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{''}}
+          - --external-address={{ kube_ovn_external_address }}{% if enable_dual_stack_networks %},{{ kube_ovn_external_address_ipv6 }}{% endif %}{{ '' }}
           - --external-dns={{ kube_ovn_external_dns }}
           - --logtostderr=false
           - --alsologtostderr=true
@@ -668,6 +668,6 @@ data:
   ic-db-host: "{{ kube_ovn_ic_dbhost }}"
   ic-nb-port: "6645"
   ic-sb-port: "6646"
-  gw-nodes: "{{ kube_ovn_central_hosts|join(',') }}"
+  gw-nodes: "{{ kube_ovn_central_hosts | join(',') }}"
   auto-route: "{{ kube_ovn_ic_autoroute | lower }}"
 {% endif %}
diff --git a/roles/network_plugin/macvlan/handlers/main.yml b/roles/network_plugin/macvlan/handlers/main.yml
index 88997c92dae158d9d5dd6c8797b40e455672ad62..aba4cbc00315dda4229dc32aab72584609c1af3f 100644
--- a/roles/network_plugin/macvlan/handlers/main.yml
+++ b/roles/network_plugin/macvlan/handlers/main.yml
@@ -7,6 +7,7 @@
 
 - name: Macvlan | reload network
   service:
+    # noqa: jinja[spacing]
     name: >-
       {% if ansible_os_family == "RedHat" -%}
       network
diff --git a/roles/network_plugin/multus/defaults/main.yml b/roles/network_plugin/multus/defaults/main.yml
index cbeb4cb323e2bf3f88be6a7c6385cf6f31db4d6f..c6b7ecd9705a20e92d3a004d13d15a94d645eb36 100644
--- a/roles/network_plugin/multus/defaults/main.yml
+++ b/roles/network_plugin/multus/defaults/main.yml
@@ -3,7 +3,7 @@ multus_conf_file: "auto"
 multus_cni_conf_dir_host: "/etc/cni/net.d"
 multus_cni_bin_dir_host: "/opt/cni/bin"
 multus_cni_run_dir_host: "/run"
-multus_cni_conf_dir: "{{ ('/host',  multus_cni_conf_dir_host) | join }}"
+multus_cni_conf_dir: "{{ ('/host', multus_cni_conf_dir_host) | join }}"
 multus_cni_bin_dir: "{{ ('/host', multus_cni_bin_dir_host) | join }}"
 multus_cni_run_dir: "{{ ('/host', multus_cni_run_dir_host) | join }}"
 multus_cni_version: "0.4.0"
diff --git a/roles/network_plugin/multus/tasks/main.yml b/roles/network_plugin/multus/tasks/main.yml
index ab76268a5680a3db0728259aa6364074714f2dfc..1428929cc94d4ae6fc2507800ff996d9a6338271 100644
--- a/roles/network_plugin/multus/tasks/main.yml
+++ b/roles/network_plugin/multus/tasks/main.yml
@@ -14,7 +14,7 @@
 
 - name: Multus | Check container engine type
   set_fact:
-    container_manager_types: "{{ ansible_play_hosts_all|map('extract', hostvars, ['container_manager'])|list|unique }}"
+    container_manager_types: "{{ ansible_play_hosts_all | map('extract', hostvars, ['container_manager']) | list | unique }}"
 
 - name: Multus | Copy manifest templates
   template:
@@ -28,7 +28,7 @@
   register: multus_manifest_2
   vars:
     query: "*|[?container_manager=='{{ container_manager }}']|[0].inventory_hostname"
-    vars_from_node: "{{ hostvars|json_query(query) }}"
+    vars_from_node: "{{ hostvars | json_query(query) }}"
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when:
     - item.engine in container_manager_types
diff --git a/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 b/roles/network_plugin/multus/templates/multus-daemonset.yml.j2
index 19f91bae84b24e727bc2c684bc8622400b1250e2..10c42c1751331db7389f6a9ddc50ee10772de6de 100644
--- a/roles/network_plugin/multus/templates/multus-daemonset.yml.j2
+++ b/roles/network_plugin/multus/templates/multus-daemonset.yml.j2
@@ -2,7 +2,7 @@
 kind: DaemonSet
 apiVersion: apps/v1
 metadata:
-{% if container_manager_types|length >= 2 %}
+{% if container_manager_types | length >= 2 %}
   name: kube-multus-{{ container_manager }}-{{ image_arch }}
 {% else %}
   name: kube-multus-ds-{{ image_arch }}
@@ -26,7 +26,7 @@ spec:
       dnsPolicy: ClusterFirstWithHostNet
       nodeSelector:
         kubernetes.io/arch: {{ image_arch }}
-{% if container_manager_types|length >= 2 %}
+{% if container_manager_types | length >= 2 %}
         kubespray.io/container_manager: {{ container_manager }}
 {% endif %}
       tolerations:
@@ -62,7 +62,7 @@ spec:
           mountPropagation: HostToContainer
 {% endif %}
         - name: cni
-          mountPath: {{  multus_cni_conf_dir }}
+          mountPath: {{ multus_cni_conf_dir }}
         - name: cnibin
           mountPath: {{ multus_cni_bin_dir }}
       volumes:
diff --git a/roles/recover_control_plane/etcd/tasks/main.yml b/roles/recover_control_plane/etcd/tasks/main.yml
index 8c6deda5cfbfb73587d52d44096c3980beebf10a..0ebd624c81bdb29c334b3b30f3936ebf3a723fbf 100644
--- a/roles/recover_control_plane/etcd/tasks/main.yml
+++ b/roles/recover_control_plane/etcd/tasks/main.yml
@@ -75,7 +75,7 @@
     - has_quorum
 
 - name: Remove broken cluster members
-  command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
+  command: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ', '').split(',')[0] }}"
   environment:
     ETCDCTL_API: "3"
     ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
@@ -89,4 +89,4 @@
     - inventory_hostname in groups['broken_etcd']
     - not healthy
     - has_quorum
-    - hostvars[item[0]]['etcd_member_name'] == item[1].replace(' ','').split(',')[2]
+    - hostvars[item[0]]['etcd_member_name'] == item[1].replace(' ', '').split(',')[2]
diff --git a/roles/recover_control_plane/post-recover/tasks/main.yml b/roles/recover_control_plane/post-recover/tasks/main.yml
index b1cd5e5effdd19f1e9f771aeb1c2d0812eff795a..a62f9127e9bfecfabc0d9a001acaa391c195481e 100644
--- a/roles/recover_control_plane/post-recover/tasks/main.yml
+++ b/roles/recover_control_plane/post-recover/tasks/main.yml
@@ -2,6 +2,7 @@
 # TODO: Figure out why kubeadm does not fix this
 - name: Set etcd-servers fact
   set_fact:
+    # noqa: jinja[spacing]
     etcd_servers: >-
       {% for host in groups['etcd'] -%}
         {% if not loop.last -%}
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index ff8a06d848168e7d5f6a911eb597bccb7bc52931..61694547f196fb7b6b0d264336ec8b3eb2fc57d8 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -1,11 +1,11 @@
 ---
 - name: remove-node | Delete node
-  command: "{{ kubectl }} delete node {{ kube_override_hostname|default(inventory_hostname) }}"
-  delegate_to: "{{ groups['kube_control_plane']|first }}"
+  command: "{{ kubectl }} delete node {{ kube_override_hostname | default(inventory_hostname) }}"
+  delegate_to: "{{ groups['kube_control_plane'] | first }}"
   when:
     - groups['kube_control_plane'] | length > 0
     # ignore servers that are not nodes
-    - inventory_hostname in groups['k8s_cluster'] and kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines
+    - inventory_hostname in groups['k8s_cluster'] and kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines
   retries: "{{ delete_node_retries }}"
   # Sometimes the api-server can have a short window of indisponibility when we delete a master node
   delay: "{{ delete_node_delay_seconds }}"
diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml
index 31d959c7c1fa6044ae7c37f091aa4bee5135e7ad..d16df1a36d54f9007019f8a0417d5869dcbd9df7 100644
--- a/roles/remove-node/pre-remove/tasks/main.yml
+++ b/roles/remove-node/pre-remove/tasks/main.yml
@@ -5,7 +5,7 @@
   register: nodes
   when:
     - groups['kube_control_plane'] | length > 0
-  delegate_to: "{{ groups['kube_control_plane']|first }}"
+  delegate_to: "{{ groups['kube_control_plane'] | first }}"
   changed_when: false
   run_once: true
 
@@ -16,14 +16,14 @@
       --ignore-daemonsets
       --grace-period {{ drain_grace_period }}
       --timeout {{ drain_timeout }}
-      --delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }}
+      --delete-emptydir-data {{ kube_override_hostname | default(inventory_hostname) }}
   when:
     - groups['kube_control_plane'] | length > 0
     # ignore servers that are not nodes
-    - kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines
+    - kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines
   register: result
   failed_when: result.rc != 0 and not allow_ungraceful_removal
-  delegate_to: "{{ groups['kube_control_plane']|first }}"
+  delegate_to: "{{ groups['kube_control_plane'] | first }}"
   until: result.rc == 0 or allow_ungraceful_removal
   retries: "{{ drain_retries }}"
   delay: "{{ drain_retry_delay_seconds }}"
@@ -32,12 +32,12 @@
   command: >-
     {{ kubectl }} get volumeattachments -o go-template={% raw %}'{{ range .items }}{{ .spec.nodeName }}{{ "\n" }}{{ end }}'{% endraw %}
   register: nodes_with_volumes
-  delegate_to: "{{ groups['kube_control_plane']|first }}"
+  delegate_to: "{{ groups['kube_control_plane'] | first }}"
   changed_when: false
-  until: not (kube_override_hostname|default(inventory_hostname) in nodes_with_volumes.stdout_lines)
+  until: not (kube_override_hostname | default(inventory_hostname) in nodes_with_volumes.stdout_lines)
   retries: 3
   delay: "{{ drain_grace_period }}"
   when:
     - groups['kube_control_plane'] | length > 0
     - not allow_ungraceful_removal
-    - kube_override_hostname|default(inventory_hostname) in nodes.stdout_lines
+    - kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines
diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml
index f7729ea7902c0b47a3ec1f9f32b8ea0165124162..0279018d4fd6cb0883775c917f1fcddd29e47048 100644
--- a/roles/remove-node/remove-etcd-node/tasks/main.yml
+++ b/roles/remove-node/remove-etcd-node/tasks/main.yml
@@ -9,7 +9,7 @@
     - inventory_hostname in groups['etcd']
     - ip is not defined
     - access_ip is not defined
-  delegate_to: "{{ groups['etcd']|first }}"
+  delegate_to: "{{ groups['etcd'] | first }}"
   failed_when: false
 
 - name: Set node IP
@@ -37,22 +37,22 @@
     - facts
   environment:
     ETCDCTL_API: "3"
-    ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}"
-    ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}"
+    ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'] | first + '.pem' }}"
+    ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'] | first + '-key.pem' }}"
     ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}"
     ETCDCTL_ENDPOINTS: "https://127.0.0.1:2379"
-  delegate_to: "{{ groups['etcd']|first }}"
+  delegate_to: "{{ groups['etcd'] | first }}"
   when: inventory_hostname in groups['etcd']
 
 - name: Remove etcd member from cluster
   command: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
   environment:
     ETCDCTL_API: "3"
-    ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '.pem' }}"
-    ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd']|first + '-key.pem' }}"
+    ETCDCTL_CERT: "{{ kube_cert_dir + '/etcd/server.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'] | first + '.pem' }}"
+    ETCDCTL_KEY: "{{ kube_cert_dir + '/etcd/server.key' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/admin-' + groups['etcd'] | first + '-key.pem' }}"
     ETCDCTL_CACERT: "{{ kube_cert_dir + '/etcd/ca.crt' if etcd_deployment_type == 'kubeadm' else etcd_cert_dir + '/ca.pem' }}"
     ETCDCTL_ENDPOINTS: "https://127.0.0.1:2379"
-  delegate_to: "{{ groups['etcd']|first }}"
+  delegate_to: "{{ groups['etcd'] | first }}"
   when:
     - inventory_hostname in groups['etcd']
     - etcd_member_id.stdout | length > 0
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 2d06b5c438bcdc0d342ac5f3d29d0c62792ef23d..534033d1894d900c299d08c494e0804f36687930 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -205,7 +205,7 @@
     - nat
     - mangle
     - raw
-  when: flush_iptables|bool
+  when: flush_iptables | bool
   tags:
     - iptables
 
@@ -219,7 +219,7 @@
     - nat
     - mangle
     - raw
-  when: flush_iptables|bool and enable_dual_stack_networks
+  when: flush_iptables | bool and enable_dual_stack_networks
   tags:
     - ip6tables
 
@@ -254,7 +254,7 @@
 - name: reset | Remove nodelocaldns
   command: "ip link del nodelocaldns"
   when:
-    - enable_nodelocaldns|default(false)|bool
+    - enable_nodelocaldns | default(false) | bool
     - nodelocaldns_device.stat.exists
 
 - name: reset | Check whether /var/lib/kubelet directory exists
@@ -279,7 +279,7 @@
     state: touch
     attributes: "-i"
     mode: 0644
-  loop: "{{ var_lib_kubelet_files_dirs_w_attrs.stdout_lines|select('search', 'Immutable')|list }}"
+  loop: "{{ var_lib_kubelet_files_dirs_w_attrs.stdout_lines | select('search', 'Immutable') | list }}"
   loop_control:
     loop_var: file_dir_line
     label: "{{ filedir_path }}"
@@ -428,9 +428,10 @@
 
 - name: reset | Restart network
   service:
+    # noqa: jinja[spacing]
     name: >-
       {% if ansible_os_family == "RedHat" -%}
-      {%- if ansible_distribution_major_version|int >= 8 or is_fedora_coreos or ansible_distribution == "Fedora" -%}
+      {%- if ansible_distribution_major_version | int >= 8 or is_fedora_coreos or ansible_distribution == "Fedora" -%}
       NetworkManager
       {%- else -%}
       network
diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml
index d1b1af0bee32e56b06202b69188b97abf71fa7a3..fb33dcf93036bb8cedb09158822b73fcb8f36ad5 100644
--- a/roles/upgrade/post-upgrade/tasks/main.yml
+++ b/roles/upgrade/post-upgrade/tasks/main.yml
@@ -1,12 +1,12 @@
 ---
 - name: wait for cilium
   when:
-    - needs_cordoning|default(false)
+    - needs_cordoning | default(false)
     - kube_network_plugin == 'cilium'
   command: >
     {{ kubectl }}
     wait pod -n kube-system -l k8s-app=cilium
-    --field-selector 'spec.nodeName=={{ kube_override_hostname|default(inventory_hostname) }}'
+    --field-selector 'spec.nodeName=={{ kube_override_hostname | default(inventory_hostname) }}'
     --for=condition=Ready
     --timeout={{ upgrade_post_cilium_wait_timeout }}
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
@@ -26,7 +26,7 @@
     - upgrade_node_post_upgrade_pause_seconds != 0
 
 - name: Uncordon node
-  command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}"
+  command: "{{ kubectl }} uncordon {{ kube_override_hostname | default(inventory_hostname) }}"
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when:
-    - needs_cordoning|default(false)
+    - needs_cordoning | default(false)
diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml
index 210818b3c41f2ea670e054bcc14ebad8a3483eab..a4b89f822094f1e9b08c10d85a9f4da3b5c20746 100644
--- a/roles/upgrade/pre-upgrade/tasks/main.yml
+++ b/roles/upgrade/pre-upgrade/tasks/main.yml
@@ -18,7 +18,7 @@
 # Node NotReady: type = ready, status = Unknown
 - name: See if node is in ready state
   command: >
-    {{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }}
+    {{ kubectl }} get node {{ kube_override_hostname | default(inventory_hostname) }}
     -o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }'
   register: kubectl_node_ready
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
@@ -29,7 +29,7 @@
 # else unschedulable key doesn't exist
 - name: See if node is schedulable
   command: >
-    {{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }}
+    {{ kubectl }} get node {{ kube_override_hostname | default(inventory_hostname) }}
     -o jsonpath='{ .spec.unschedulable }'
   register: kubectl_node_schedulable
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
@@ -38,6 +38,7 @@
 
 - name: Set if node needs cordoning
   set_fact:
+    # noqa: jinja[spacing]
     needs_cordoning: >-
       {% if (kubectl_node_ready.stdout == "True" and not kubectl_node_schedulable.stdout) or upgrade_node_always_cordon -%}
       true
@@ -48,7 +49,7 @@
 - name: Node draining
   block:
     - name: Cordon node
-      command: "{{ kubectl }} cordon {{ kube_override_hostname|default(inventory_hostname) }}"
+      command: "{{ kubectl }} cordon {{ kube_override_hostname | default(inventory_hostname) }}"
       delegate_to: "{{ groups['kube_control_plane'][0] }}"
       changed_when: true
 
@@ -76,7 +77,7 @@
         --ignore-daemonsets
         --grace-period {{ hostvars['localhost']['drain_grace_period_after_failure'] | default(drain_grace_period) }}
         --timeout {{ hostvars['localhost']['drain_timeout_after_failure'] | default(drain_timeout) }}
-        --delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }}
+        --delete-emptydir-data {{ kube_override_hostname | default(inventory_hostname) }}
         {% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %}
       when: drain_nodes
       register: result
@@ -104,7 +105,7 @@
             --ignore-daemonsets
             --grace-period {{ drain_fallback_grace_period }}
             --timeout {{ drain_fallback_timeout }}
-            --delete-emptydir-data {{ kube_override_hostname|default(inventory_hostname) }}
+            --delete-emptydir-data {{ kube_override_hostname | default(inventory_hostname) }}
             {% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %}
             --disable-eviction
           register: drain_fallback_result
@@ -119,11 +120,11 @@
 
   rescue:
     - name: Set node back to schedulable
-      command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}"
+      command: "{{ kubectl }} uncordon {{ kube_override_hostname | default(inventory_hostname) }}"
       when: upgrade_node_uncordon_after_drain_failure
     - name: Fail after rescue
       fail:
-        msg: "Failed to drain node {{ kube_override_hostname|default(inventory_hostname) }}"
+        msg: "Failed to drain node {{ kube_override_hostname | default(inventory_hostname) }}"
       when: upgrade_node_fail_if_drain_fails
   delegate_to: "{{ groups['kube_control_plane'][0] }}"
   when:
diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml
index feb309d3884ecd771e10297539d6d16f4ea21ca8..02fbe4694e9c96cac33024f567972657b8b0a3df 100644
--- a/scripts/collect-info.yaml
+++ b/scripts/collect-info.yaml
@@ -84,7 +84,7 @@
         when: '{{ kube_network_plugin in ["canal", "calico"] }}'
       - name: helm_show_releases_history
         cmd: "for i in `{{ bin_dir }}/helm list -q`; do {{ bin_dir }}/helm history ${i} --col-width=0; done"
-        when: "{{ helm_enabled|default(true) }}"
+        when: "{{ helm_enabled | default(true) }}"
 
     logs:
       - /var/log/syslog
@@ -137,7 +137,7 @@
     - name: Pack results and logs
       community.general.archive:
         path: "/tmp/{{ archive_dirname }}"
-        dest: "{{ dir|default('.') }}/logs.tar.gz"
+        dest: "{{ dir | default('.') }}/logs.tar.gz"
         remove: true
         mode: 0640
       delegate_to: localhost
diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
index 5ac47b00c18c916bd26e2c331e8ef12271a04f19..68eb6cf8184baf69c25af37ad2efb9c438ad22f0 100644
--- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
+++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml
@@ -12,29 +12,29 @@
     dest: "{{ images_dir }}/{{ item.value.filename }}"
     checksum: "{{ item.value.checksum }}"
     mode: 0644
-  loop: "{{ images|dict2items }}"
+  loop: "{{ images | dict2items }}"
 
 - name: Unxz compressed images
   command: unxz --force {{ images_dir }}/{{ item.value.filename }}
-  loop: "{{ images|dict2items }}"
+  loop: "{{ images | dict2items }}"
   when:
     - item.value.filename.endswith('.xz')
 
 - name: Convert images which is not in qcow2 format
   command: qemu-img convert -O qcow2 {{ images_dir }}/{{ item.value.filename.rstrip('.xz') }} {{ images_dir }}/{{ item.key }}.qcow2
-  loop: "{{ images|dict2items }}"
+  loop: "{{ images | dict2items }}"
   when:
-    - not (item.value.converted|bool)
+    - not (item.value.converted | bool)
 
 - name: Make sure all images are ending with qcow2
   command: cp {{ images_dir }}/{{ item.value.filename.rstrip('.xz') }} {{ images_dir }}/{{ item.key }}.qcow2
-  loop: "{{ images|dict2items }}"
+  loop: "{{ images | dict2items }}"
   when:
-    - item.value.converted|bool
+    - item.value.converted | bool
 
 - name: Resize images
   command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G
-  loop: "{{ images|dict2items }}"
+  loop: "{{ images | dict2items }}"
 
 # STEP 2: Include the images inside a container
 - name: Template default Dockerfile
@@ -45,14 +45,14 @@
 
 - name: Create docker images for each OS
   command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
-  loop: "{{ images|dict2items }}"
+  loop: "{{ images | dict2items }}"
 
 - name: docker login
   command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}"
 
 - name: docker push image
   command: docker push {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }}
-  loop: "{{ images|dict2items }}"
+  loop: "{{ images | dict2items }}"
 
 - name: docker logout
   command: docker logout -u="{{ docker_user }}" "{{ docker_host }}"
diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml
index b4d2125ac5e9fcdba395de346f6fce1299df0299..d675527f6fbeeabbb437e232a4e9cc108a86bb31 100644
--- a/tests/cloud_playbooks/create-do.yml
+++ b/tests/cloud_playbooks/create-do.yml
@@ -49,7 +49,7 @@
   tasks:
     - name: replace_test_id
       set_fact:
-        test_name: "{{ test_id |regex_replace('\\.', '-') }}"
+        test_name: "{{ test_id | regex_replace('\\.', '-') }}"
 
     - name: show vars
       debug:
@@ -57,6 +57,7 @@
 
     - name: set instance names
       set_fact:
+        # noqa: jinja[spacing]
         instance_names: >-
           {%- if mode in ['separate', 'ha'] -%}
           ["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2", "k8s-{{ test_name }}-3"]
@@ -67,7 +68,7 @@
     - name: Manage DO instances | {{ state }}
       community.digitalocean.digital_ocean:
         unique_name: yes
-        api_token: "{{ lookup('env','DO_API_TOKEN') }}"
+        api_token: "{{ lookup('env', 'DO_API_TOKEN') }}"
         command: "droplet"
         image_id: "{{ cloud_image }}"
         name: "{{ item }}"
diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml
index ccb4bce1da045fa0b4236a905566533b361b8146..c3f17f4507e6ca7da1b96eca8cf1e0273b7dac5d 100644
--- a/tests/cloud_playbooks/create-gce.yml
+++ b/tests/cloud_playbooks/create-gce.yml
@@ -14,10 +14,11 @@
 
     - name: replace_test_id
       set_fact:
-        test_name: "{{ test_id |regex_replace('\\.', '-') }}"
+        test_name: "{{ test_id | regex_replace('\\.', '-') }}"
 
     - name: set instance names
       set_fact:
+        # noqa: jinja[spacing]
         instance_names: >-
           {%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%}
           k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3
@@ -39,7 +40,7 @@
         credentials_file: "{{ gce_credentials_file | default(omit) }}"
         project_id: "{{ gce_project_id }}"
         zone: "{{ cloud_region }}"
-        metadata: '{"test_id": "{{ test_id }}", "network": "{{ kube_network_plugin }}", "startup-script": "{{ startup_script|default("") }}"}'
+        metadata: '{"test_id": "{{ test_id }}", "network": "{{ kube_network_plugin }}", "startup-script": "{{ startup_script | default("") }}"}'
         tags: "build-{{ test_name }},{{ kube_network_plugin }}"
         ip_forward: yes
         service_account_permissions: ['compute-rw']
@@ -59,7 +60,7 @@
 
     - name: Make group_vars directory
       file:
-        path: "{{ inventory_path|dirname }}/group_vars"
+        path: "{{ inventory_path | dirname }}/group_vars"
         state: directory
         mode: 0755
       when: mode in ['scale', 'separate-scale', 'ha-scale']
@@ -67,13 +68,13 @@
     - name: Template fake hosts group vars  # noqa no-relative-paths - CI templates are not in role_path
       template:
         src: ../templates/fake_hosts.yml.j2
-        dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
+        dest: "{{ inventory_path | dirname }}/group_vars/fake_hosts.yml"
         mode: 0644
       when: mode in ['scale', 'separate-scale', 'ha-scale']
 
     - name: Delete group_vars directory
       file:
-        path: "{{ inventory_path|dirname }}/group_vars"
+        path: "{{ inventory_path | dirname }}/group_vars"
         state: absent
         recurse: yes
       when: delete_group_vars
diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml
index 4d118711b75ee774866bd879c6edf6a92bf42706..f8c5d6e9443bc9ec01429b62711a1cb1cf6d18e7 100644
--- a/tests/cloud_playbooks/delete-gce.yml
+++ b/tests/cloud_playbooks/delete-gce.yml
@@ -8,10 +8,11 @@
   tasks:
     - name: replace_test_id
       set_fact:
-        test_name: "{{ test_id |regex_replace('\\.', '-') }}"
+        test_name: "{{ test_id | regex_replace('\\.', '-') }}"
 
     - name: set instance names
       set_fact:
+        # noqa: jinja[spacing]
         instance_names: >-
           {%- if mode in ['separate', 'ha'] -%}
           k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3
diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
index 8ccf5adc5b04f003c9eb0cf35ec4af4468776e67..688b580cd1e66685dd83c611418adcafb4074c70 100644
--- a/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
+++ b/tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
@@ -17,7 +17,7 @@
     vms_files: "{{ vms_files + [lookup('ansible.builtin.template', 'vm.yml.j2') | from_yaml] }}"
   vars:
     vms_files: []
-  loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
+  loop: "{{ range(1, vm_count | int + 1, 1) | list }}"
   loop_control:
     index_var: vm_id
 
@@ -33,7 +33,7 @@
     executable: /bin/bash
   changed_when: false
   register: vm_ips
-  loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
+  loop: "{{ range(1, vm_count | int + 1, 1) | list }}"
   loop_control:
     index_var: vm_id
   retries: 20
diff --git a/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml b/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml
index 9d8e105db94a18336f292a24527551e64473bf23..37e61cd62b799011536bd986f3076658e3dd14c2 100644
--- a/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml
+++ b/tests/cloud_playbooks/roles/packet-ci/tasks/main.yml
@@ -5,7 +5,7 @@
 
 - name: Set VM count needed for CI test_id
   set_fact:
-    vm_count: "{%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale', 'ha-recover', 'ha-recover-noquorum'] -%}{{ 3|int }}{%- elif mode == 'aio' -%}{{ 1|int }}{%- else -%}{{ 2|int }}{%- endif -%}"
+    vm_count: "{%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale', 'ha-recover', 'ha-recover-noquorum'] -%}{{ 3 | int }}{%- elif mode == 'aio' -%}{{ 1 | int }}{%- else -%}{{ 2 | int }}{%- endif -%}"
 
 - import_tasks: cleanup-old-vms.yml
 
diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml
index bf0ffd9e9df3969639ed77850962e7ed59daca11..73ae6c5f5c6ced7397ae6df9bd05281745b4d0a4 100644
--- a/tests/cloud_playbooks/upload-logs-gcs.yml
+++ b/tests/cloud_playbooks/upload-logs-gcs.yml
@@ -73,7 +73,7 @@
         headers: '{"Content-Encoding": "x-gzip"}'
         gs_access_key: "{{ gs_key }}"
         gs_secret_key: "{{ gs_skey }}"
-        expiration: "{{ expire_days * 36000|int }}"
+        expiration: "{{ expire_days * 36000 | int }}"
       failed_when: false
       no_log: True
 
diff --git a/tests/common/_docker_hub_registry_mirror.yml b/tests/common/_docker_hub_registry_mirror.yml
index 87570f71c3f4b04f4b900725372fce432b45befe..e6298b70ed8f05e337f2bfc2333a2c64c5859dcb 100644
--- a/tests/common/_docker_hub_registry_mirror.yml
+++ b/tests/common/_docker_hub_registry_mirror.yml
@@ -27,8 +27,8 @@ netcheck_server_image_repo: "{{ quay_image_repo }}/kubespray/k8s-netchecker-serv
 
 nginx_image_repo: "{{ quay_image_repo }}/kubespray/nginx"
 
-flannel_image_repo: "{{ quay_image_repo}}/kubespray/flannel"
-flannel_init_image_repo: "{{ quay_image_repo}}/kubespray/flannel-cni-plugin"
+flannel_image_repo: "{{ quay_image_repo }}/kubespray/flannel"
+flannel_init_image_repo: "{{ quay_image_repo }}/kubespray/flannel-cni-plugin"
 
 # Kubespray settings for tests
 deploy_netchecker: true
diff --git a/tests/templates/fake_hosts.yml.j2 b/tests/templates/fake_hosts.yml.j2
index 673109213ab1325a95e02cc29bfa343d4cf362b0..c172b78b070cda860c11853c7f367d3b0f09118f 100644
--- a/tests/templates/fake_hosts.yml.j2
+++ b/tests/templates/fake_hosts.yml.j2
@@ -1,3 +1,3 @@
 ansible_default_ipv4:
   address: 255.255.255.255
-ansible_hostname: "{{ '{{' }}inventory_hostname}}"
+ansible_hostname: "{{ '{{' }}inventory_hostname }}"
diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml
index 78a1021d60b1ddbebbac80cd82f8b0c90edf44c7..b5f1c2b6e0b5d9000248005eddae12ef32522e6e 100644
--- a/tests/testcases/030_check-network.yml
+++ b/tests/testcases/030_check-network.yml
@@ -168,8 +168,8 @@
   - name: Set networking facts
     set_fact:
       kube_pods_subnet: 10.233.64.0/18
-      pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'metadata.name') | list }}"
-      pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}"
+      pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute='metadata.name') | list }}"
+      pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute='status.podIP') | list }}"
       pods_hostnet: |
         {% set list = hostnet_pods.stdout.split(" ") %}
         {{ list }}
diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml
index 50a8136d29380663e020643654675398529e6391..0542e124506bf7f9e9aed4823507116b6ad6763f 100644
--- a/tests/testcases/040_check-network-adv.yml
+++ b/tests/testcases/040_check-network-adv.yml
@@ -7,7 +7,7 @@
         executable: /bin/bash
       when:
         - (calico_ipip_mode is defined and calico_ipip_mode != 'Never' or cloud_provider is defined)
-        - kube_network_plugin|default('calico') == 'calico'
+        - kube_network_plugin | default('calico') == 'calico'
 
 - hosts: k8s_cluster
   vars:
@@ -44,7 +44,7 @@
       args:
         executable: /bin/bash
       register: nca_pod
-      until: nca_pod.stdout_lines|length >= groups['k8s_cluster']|intersect(ansible_play_hosts)|length * 2
+      until: nca_pod.stdout_lines | length >= groups['k8s_cluster'] | intersect(ansible_play_hosts) | length * 2
       retries: 3
       delay: 10
       failed_when: false
@@ -73,9 +73,9 @@
       register: agents
       retries: 18
       delay: "{{ agent_report_interval }}"
-      until: agents.content|length > 0 and
+      until: agents.content | length > 0 and
         agents.content[0] == '{' and
-        agents.content|from_json|length >= groups['k8s_cluster']|intersect(ansible_play_hosts)|length * 2
+        agents.content | from_json | length >= groups['k8s_cluster'] | intersect(ansible_play_hosts) | length * 2
       failed_when: false
       no_log: false
 
@@ -89,7 +89,7 @@
       register: connectivity_check
       retries: 3
       delay: "{{ agent_report_interval }}"
-      until: connectivity_check.content|length > 0 and
+      until: connectivity_check.content | length > 0 and
         connectivity_check.content[0] == '{'
       no_log: false
       failed_when: false
@@ -200,7 +200,7 @@
         executable: /bin/bash
       when:
         - inventory_hostname == groups['kube_control_plane'][0]
-        - kube_network_plugin_multus|default(false)|bool
+        - kube_network_plugin_multus | default(false) | bool
 
     - name: Annotate pod with macvlan network
       # We cannot use only shell: below because Ansible will render the text
@@ -226,7 +226,7 @@
         executable: /bin/bash
       when:
         - inventory_hostname == groups['kube_control_plane'][0]
-        - kube_network_plugin_multus|default(false)|bool
+        - kube_network_plugin_multus | default(false) | bool
 
     - name: Check secondary macvlan interface
       command: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
@@ -236,4 +236,4 @@
       changed_when: false
       when:
         - inventory_hostname == groups['kube_control_plane'][0]
-        - kube_network_plugin_multus|default(false)|bool
+        - kube_network_plugin_multus | default(false) | bool