Skip to content
Snippets Groups Projects
Unverified Commit f8f197e2 authored by Arthur Outhenin-Chalandre's avatar Arthur Outhenin-Chalandre Committed by GitHub
Browse files

Fix outdated tag and experimental ansible-lint rules (#10254)


* project: fix outdated tag and experimental

Signed-off-by: default avatarArthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: remove no longer useful noqa 301

Signed-off-by: default avatarArthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: replace unnamed-task by name[missing]

Signed-off-by: default avatarArthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix daemon-reload -> daemon_reload

Signed-off-by: default avatarArthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

---------

Signed-off-by: default avatarArthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
parent 4f85b750
Branches
Tags
No related merge requests found
Showing
with 37 additions and 38 deletions
......@@ -43,7 +43,6 @@
- has_quorum
- name: Delete old certificates
# noqa 302 ignore-error - rm is ok here for now
shell: "rm {{ etcd_cert_dir }}/*{{ item }}*"
with_items: "{{ groups['broken_etcd'] }}"
register: delete_old_cerificates
......
......@@ -26,7 +26,7 @@
path: "{{ etcd_data_dir }}"
state: absent
- name: Restore etcd snapshot # noqa 301 305
- name: Restore etcd snapshot # noqa command-instead-of-shell
shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}"
environment:
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
......
......@@ -9,7 +9,7 @@
changed_when: false
run_once: true
- name: remove-node | Drain node except daemonsets resource # noqa 301
- name: remove-node | Drain node except daemonsets resource
command: >-
{{ kubectl }} drain
--force
......
......@@ -38,7 +38,7 @@
tags:
- docker
- name: reset | systemctl daemon-reload # noqa 503
- name: reset | systemctl daemon-reload # noqa no-handler
systemd:
daemon_reload: true
when: services_removed.changed
......@@ -174,7 +174,7 @@
tags:
- services
- name: reset | gather mounted kubelet dirs # noqa 301
- name: reset | gather mounted kubelet dirs
shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
args:
executable: /bin/bash
......@@ -185,7 +185,7 @@
tags:
- mounts
- name: reset | unmount kubelet dirs # noqa 301
- name: reset | unmount kubelet dirs
command: umount -f {{ item }}
with_items: "{{ mounted_dirs.stdout_lines }}"
register: umount_dir
......
......@@ -29,11 +29,11 @@
register: patch_kube_proxy_state
when: current_kube_proxy_state.stdout | trim | lower != "linux"
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ patch_kube_proxy_state.stdout_lines }}"
when: patch_kube_proxy_state is not skipped
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ patch_kube_proxy_state.stderr_lines }}"
when: patch_kube_proxy_state is not skipped
tags: init
......
......@@ -32,7 +32,7 @@
when:
- item.value.converted|bool
- name: Resize images # noqa 301
- name: Resize images
command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G
loop: "{{ images|dict2items }}"
......@@ -43,16 +43,16 @@
dest: "{{ images_dir }}/Dockerfile"
mode: 0644
- name: Create docker images for each OS # noqa 301
- name: Create docker images for each OS
command: docker build -t {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
loop: "{{ images|dict2items }}"
- name: docker login # noqa 301
- name: docker login
command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}"
- name: docker push image # noqa 301
- name: docker push image
command: docker push {{ registry }}/vm-{{ item.key }}:{{ item.value.tag }}
loop: "{{ images|dict2items }}"
- name: docker logout # noqa 301
- name: docker logout
command: docker logout -u="{{ docker_user }}" "{{ docker_host }}"
......@@ -20,6 +20,6 @@
- name: Template the inventory
template:
src: ../templates/inventory-aws.j2 # noqa 404 CI inventory templates are not in role_path
src: ../templates/inventory-aws.j2 # noqa no-relative-paths - CI inventory templates are not in role_path
dest: "{{ inventory_path }}"
mode: 0644
......@@ -86,7 +86,7 @@
- name: Template the inventory
template:
src: ../templates/inventory-do.j2 # noqa 404 CI templates are not in role_path
src: ../templates/inventory-do.j2 # noqa no-relative-paths - CI templates are not in role_path
dest: "{{ inventory_path }}"
mode: 0644
when: state == 'present'
......@@ -28,7 +28,7 @@
{%- endif -%}
- name: Create gce instances
google.cloud.gcp_compute_instance:
google.cloud.gcp_compute_instance: # noqa args[module] - Probably doesn't work
instance_names: "{{ instance_names }}"
machine_type: "{{ cloud_machine_type }}"
image: "{{ cloud_image | default(omit) }}"
......@@ -51,7 +51,7 @@
groupname: "waitfor_hosts"
with_items: '{{ gce.instance_data }}'
- name: Template the inventory # noqa 404 CI inventory templates are not in role_path
- name: Template the inventory # noqa no-relative-paths - CI inventory templates are not in role_path
template:
src: ../templates/inventory-gce.j2
dest: "{{ inventory_path }}"
......@@ -64,7 +64,7 @@
mode: 0755
when: mode in ['scale', 'separate-scale', 'ha-scale']
- name: Template fake hosts group vars # noqa 404 CI templates are not in role_path
- name: Template fake hosts group vars # noqa no-relative-paths - CI templates are not in role_path
template:
src: ../templates/fake_hosts.yml.j2
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
......
......@@ -19,7 +19,7 @@
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
{%- endif -%}
- name: stop gce instances
- name: stop gce instances # noqa args[module] - Probably doesn't work
google.cloud.gcp_compute_instance:
instance_names: "{{ instance_names }}"
image: "{{ cloud_image | default(omit) }}"
......@@ -33,7 +33,7 @@
poll: 3
register: gce
- name: delete gce instances
- name: delete gce instances # noqa args[module] - Probably doesn't work
google.cloud.gcp_compute_instance:
instance_names: "{{ instance_names }}"
image: "{{ cloud_image | default(omit) }}"
......
......@@ -56,7 +56,7 @@
no_log: True
failed_when: false
- name: Apply the lifecycle rules # noqa 301
- name: Apply the lifecycle rules
command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
changed_when: false
environment:
......@@ -77,5 +77,5 @@
failed_when: false
no_log: True
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"
......@@ -12,7 +12,7 @@
delay: 5
until: apiserver_response is success
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ apiserver_response.json }}"
- name: Check API servers version
......
......@@ -12,7 +12,7 @@
bin_dir: "/usr/local/bin"
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- import_role: # noqa unnamed-task
- import_role: # noqa name[missing]
name: cluster-dump
- name: Check kubectl output
......@@ -21,7 +21,7 @@
register: get_nodes
no_log: true
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ get_nodes.stdout.split('\n') }}"
- name: Check that all nodes are running and ready
......
......@@ -12,7 +12,7 @@
bin_dir: "/usr/local/bin"
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- import_role: # noqa unnamed-task
- import_role: # noqa name[missing]
name: cluster-dump
- name: Check kubectl output
......@@ -21,7 +21,7 @@
register: get_pods
no_log: true
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ get_pods.stdout.split('\n') }}"
- name: Check that all pods are running and ready
......@@ -44,6 +44,6 @@
register: get_pods
no_log: true
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ get_pods.stdout.split('\n') }}"
failed_when: not run_pods_log is success
......@@ -23,7 +23,7 @@
register: get_csr
changed_when: false
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ get_csr.stdout.split('\n') }}"
- name: Check there are csrs
......@@ -63,7 +63,7 @@
when: get_csr.stdout_lines | length > 0
changed_when: certificate_approve.stdout
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ certificate_approve.stdout.split('\n') }}"
when:
......@@ -114,7 +114,7 @@
- agnhost1
- agnhost2
- import_role: # noqa unnamed-task
- import_role: # noqa name[missing]
name: cluster-dump
- name: Check that all pods are running and ready
......@@ -137,7 +137,7 @@
register: pods
no_log: true
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ pods.stdout.split('\n') }}"
failed_when: not run_pods_log is success
......@@ -162,7 +162,7 @@
register: get_pods
no_log: true
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ get_pods.stdout.split('\n') }}"
- name: Set networking facts
......
......@@ -26,7 +26,7 @@
bin_dir: "/usr/local/bin"
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- import_role: # noqa unnamed-task
- import_role: # noqa name[missing]
name: cluster-dump
- name: Wait for netchecker server
......@@ -60,7 +60,7 @@
- netchecker-agent-hostnet
when: not nca_pod is success
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
var: nca_pod.stdout_lines
when: inventory_hostname == groups['kube_control_plane'][0]
......@@ -96,7 +96,7 @@
when:
- agents.content != '{}'
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
var: ncs_pod
run_once: true
......@@ -130,7 +130,7 @@
- agents.content is defined
- agents.content[0] == '{'
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
var: agents_check_result
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
......@@ -147,7 +147,7 @@
- connectivity_check.content is defined
- connectivity_check.content[0] == '{'
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
var: connectivity_check_result
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment