Skip to content
Snippets Groups Projects
Unverified Commit e70f27dd authored by Maxime Guyot's avatar Maxime Guyot Committed by GitHub
Browse files

Add noqa and disable .ansible-lint global exclusions (#6410)

parent b680cdd0
No related branches found
No related tags found
No related merge requests found
Showing
with 38 additions and 45 deletions
...@@ -2,15 +2,8 @@ ...@@ -2,15 +2,8 @@
parseable: true parseable: true
skip_list: skip_list:
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules # see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
# The following rules throw errors.
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose. # DO NOT add any other rules to this skip_list, instead use local `# noqa` with a comment explaining WHY it is necessary
- '301'
- '302'
- '303'
- '305'
- '306'
- '404'
- '503'
# These rules are intentionally skipped: # These rules are intentionally skipped:
# #
......
--- ---
- name: Query Azure VMs - name: Query Azure VMs # noqa 301
command: azure vm list-ip-address --json {{ azure_resource_group }} command: azure vm list-ip-address --json {{ azure_resource_group }}
register: vm_list_cmd register: vm_list_cmd
......
--- ---
- name: Query Azure VMs IPs - name: Query Azure VMs IPs # noqa 301
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }} command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
register: vm_ip_list_cmd register: vm_ip_list_cmd
- name: Query Azure VMs Roles - name: Query Azure VMs Roles # noqa 301
command: az vm list -o json --resource-group {{ azure_resource_group }} command: az vm list -o json --resource-group {{ azure_resource_group }}
register: vm_list_cmd register: vm_list_cmd
- name: Query Azure Load Balancer Public IP - name: Query Azure Load Balancer Public IP # noqa 301
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
register: lb_pubip_cmd register: lb_pubip_cmd
......
...@@ -69,7 +69,7 @@ ...@@ -69,7 +69,7 @@
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian, # Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
# handle manually # handle manually
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) - name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
raw: | raw: |
echo {{ item | hash('sha1') }} > /etc/machine-id.new echo {{ item | hash('sha1') }} > /etc/machine-id.new
mv -b /etc/machine-id.new /etc/machine-id mv -b /etc/machine-id.new /etc/machine-id
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
register: glusterfs_ppa_added register: glusterfs_ppa_added
when: glusterfs_ppa_use when: glusterfs_ppa_use
- name: Ensure GlusterFS client will reinstall if the PPA was just added. - name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
apt: apt:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
register: glusterfs_ppa_added register: glusterfs_ppa_added
when: glusterfs_ppa_use when: glusterfs_ppa_use
- name: Ensure GlusterFS will reinstall if the PPA was just added. - name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
apt: apt:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
- name: "Delete bootstrap Heketi." - name: "Delete bootstrap Heketi."
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\"" command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0" when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
- name: "Ensure there is nothing left over." - name: "Ensure there is nothing left over." # noqa 301
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json" command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
register: "heketi_result" register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
- name: "Copy topology configuration into container." - name: "Copy topology configuration into container."
changed_when: false changed_when: false
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json" command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology." - name: "Load heketi topology." # noqa 503
when: "render.changed" when: "render.changed"
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json" command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
register: "load_heketi" register: "load_heketi"
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
- name: "Provision database volume." - name: "Provision database volume."
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage" command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
when: "heketi_database_volume_exists is undefined" when: "heketi_database_volume_exists is undefined"
- name: "Copy configuration from pod." - name: "Copy configuration from pod." # noqa 301
become: true become: true
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json" command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
- name: "Get heketi volume ids." - name: "Get heketi volume ids."
......
...@@ -10,10 +10,10 @@ ...@@ -10,10 +10,10 @@
template: template:
src: "topology.json.j2" src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json" dest: "{{ kube_config_dir }}/topology.json"
- name: "Copy topology configuration into container." - name: "Copy topology configuration into container." # noqa 503
when: "rendering.changed" when: "rendering.changed"
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json" command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology." - name: "Load heketi topology." # noqa 503
when: "rendering.changed" when: "rendering.changed"
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json" command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
- name: "Get heketi topology." - name: "Get heketi topology."
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
ignore_errors: true ignore_errors: true
changed_when: false changed_when: false
- name: "Remove volume groups." - name: "Remove volume groups." # noqa 301
environment: environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true become: true
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
with_items: "{{ volume_groups.stdout_lines }}" with_items: "{{ volume_groups.stdout_lines }}"
loop_control: { loop_var: "volume_group" } loop_control: { loop_var: "volume_group" }
- name: "Remove physical volume from cluster disks." - name: "Remove physical volume from cluster disks." # noqa 301
environment: environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true become: true
......
--- ---
- name: "Remove storage class." - name: "Remove storage class." # noqa 301
command: "{{ bin_dir }}/kubectl delete storageclass gluster" command: "{{ bin_dir }}/kubectl delete storageclass gluster"
ignore_errors: true ignore_errors: true
- name: "Tear down heketi." - name: "Tear down heketi." # noqa 301
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\"" command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
ignore_errors: true ignore_errors: true
- name: "Tear down heketi." - name: "Tear down heketi." # noqa 301
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\"" command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
ignore_errors: true ignore_errors: true
- name: "Tear down bootstrap." - name: "Tear down bootstrap."
include_tasks: "../provision/tasks/bootstrap/tear-down.yml" include_tasks: "../provision/tasks/bootstrap/tear-down.yml"
- name: "Ensure there is nothing left over." - name: "Ensure there is nothing left over." # noqa 301
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json" command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
register: "heketi_result" register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60 retries: 60
delay: 5 delay: 5
- name: "Ensure there is nothing left over." - name: "Ensure there is nothing left over." # noqa 301
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json" command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
register: "heketi_result" register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60 retries: 60
delay: 5 delay: 5
- name: "Tear down glusterfs." - name: "Tear down glusterfs." # noqa 301
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs" command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
ignore_errors: true ignore_errors: true
- name: "Remove heketi storage service." - name: "Remove heketi storage service." # noqa 301
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints" command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
ignore_errors: true ignore_errors: true
- name: "Remove heketi gluster role binding" - name: "Remove heketi gluster role binding" # noqa 301
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin" command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
ignore_errors: true ignore_errors: true
- name: "Remove heketi config secret" - name: "Remove heketi config secret" # noqa 301
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret" command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
ignore_errors: true ignore_errors: true
- name: "Remove heketi db backup" - name: "Remove heketi db backup" # noqa 301
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup" command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
ignore_errors: true ignore_errors: true
- name: "Remove heketi service account" - name: "Remove heketi service account" # noqa 301
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account" command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
ignore_errors: true ignore_errors: true
- name: "Get secrets" - name: "Get secrets"
......
...@@ -16,13 +16,13 @@ ...@@ -16,13 +16,13 @@
src: get_cinder_pvs.sh src: get_cinder_pvs.sh
dest: /tmp dest: /tmp
mode: u+rwx mode: u+rwx
- name: Get PVs provisioned by in-tree cloud provider - name: Get PVs provisioned by in-tree cloud provider # noqa 301
command: /tmp/get_cinder_pvs.sh command: /tmp/get_cinder_pvs.sh
register: pvs register: pvs
- name: Remove get_cinder_pvs.sh - name: Remove get_cinder_pvs.sh
file: file:
path: /tmp/get_cinder_pvs.sh path: /tmp/get_cinder_pvs.sh
state: absent state: absent
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation - name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation # noqa 301
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org" command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
loop: "{{ pvs.stdout_lines | list }}" loop: "{{ pvs.stdout_lines | list }}"
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
vars: vars:
download: "{{ download_defaults | combine(downloads.crictl) }}" download: "{{ download_defaults | combine(downloads.crictl) }}"
- name: Install crictl config - name: Install crictl config # noqa 404
template: template:
src: ../templates/crictl.yaml.j2 src: ../templates/crictl.yaml.j2
dest: /etc/crictl.yaml dest: /etc/crictl.yaml
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
tags: tags:
- facts - facts
- name: disable unified_cgroup_hierarchy in Fedora 31+ - name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305
shell: shell:
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0" cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
when: when:
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
vars: vars:
download: "{{ download_defaults | combine(downloads.crictl) }}" download: "{{ download_defaults | combine(downloads.crictl) }}"
- name: Install crictl config - name: Install crictl config # noqa 404
template: template:
src: ../templates/crictl.yaml.j2 src: ../templates/crictl.yaml.j2
dest: /etc/crictl.yaml dest: /etc/crictl.yaml
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
group: no group: no
delegate_to: "{{ inventory_hostname }}" delegate_to: "{{ inventory_hostname }}"
- name: Get crictl completion - name: Get crictl completion # noqa 305
shell: "{{ bin_dir }}/crictl completion" shell: "{{ bin_dir }}/crictl completion"
changed_when: False changed_when: False
register: cri_completion register: cri_completion
......
...@@ -59,7 +59,7 @@ ...@@ -59,7 +59,7 @@
- ansible_distribution == "CentOS" - ansible_distribution == "CentOS"
- ansible_distribution_major_version == "8" - ansible_distribution_major_version == "8"
- name: Ensure latest version of libseccom installed - name: Ensure latest version of libseccom installed # noqa 303
command: "yum update -y libseccomp" command: "yum update -y libseccomp"
when: when:
- ansible_distribution == "CentOS" - ansible_distribution == "CentOS"
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
tags: tags:
- facts - facts
- name: disable unified_cgroup_hierarchy in Fedora 31+ - name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305
shell: shell:
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0" cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
when: when:
......
...@@ -28,13 +28,13 @@ ...@@ -28,13 +28,13 @@
set_fact: set_fact:
docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}" docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}"
- name: check system nameservers - name: check system nameservers # noqa 306
shell: grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/' shell: grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
changed_when: False changed_when: False
register: system_nameservers register: system_nameservers
check_mode: no check_mode: no
- name: check system search domains - name: check system search domains # noqa 306
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/' shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
changed_when: False changed_when: False
register: system_search_domains register: system_search_domains
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
notify: restart docker notify: restart docker
when: http_proxy is defined or https_proxy is defined when: http_proxy is defined or https_proxy is defined
- name: get systemd version - name: get systemd version # noqa 306
# noqa 303 - systemctl is called intentionally here # noqa 303 - systemctl is called intentionally here
shell: systemctl --version | head -n 1 | cut -d " " -f 2 shell: systemctl --version | head -n 1 | cut -d " " -f 2
register: systemd_version register: systemd_version
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment