From 398a99579825df265f5563f50a0070d330e14930 Mon Sep 17 00:00:00 2001
From: Kenichi Omichi <ken1ohmichi@gmail.com>
Date: Wed, 30 Dec 2020 05:07:49 -0800
Subject: [PATCH] Fix markdownlint failures under ./roles/ (#7089)

This fixes markdownlint failures under roles/
---
 .gitlab-ci/lint.yml                           |  3 +-
 contrib/terraform/gcp/README.md               |  2 +-
 roles/bootstrap-os/README.md                  | 19 +++++-----
 .../cephfs_provisioner/README.md              | 22 ++++++------
 .../local_volume_provisioner/README.md        | 13 ++++---
 .../rbd_provisioner/README.md                 |  4 +--
 .../alb_ingress_controller/README.md          |  1 +
 .../ingress_controller/ambassador/README.md   |  6 ++--
 .../ingress_controller/cert_manager/README.md |  4 +--
 .../ingress_nginx/README.md                   | 35 +++++++++++--------
 roles/kubernetes-apps/metallb/README.md       |  2 +-
 roles/kubernetes-apps/registry/README.md      | 14 ++++----
 12 files changed, 65 insertions(+), 60 deletions(-)

diff --git a/.gitlab-ci/lint.yml b/.gitlab-ci/lint.yml
index 8ce37580f..51cb490a9 100644
--- a/.gitlab-ci/lint.yml
+++ b/.gitlab-ci/lint.yml
@@ -66,8 +66,7 @@ markdownlint:
   before_script:
     - npm install -g markdownlint-cli@0.22.0
   script:
-    # TODO: Remove "grep -v" part to enable markdownlint for all md files
-    - markdownlint $(find . -name "*.md" | grep -v .github | grep -v roles) --ignore docs/_sidebar.md --ignore contrib/dind/README.md
+    - markdownlint $(find . -name "*.md" | grep -v .github) --ignore docs/_sidebar.md --ignore contrib/dind/README.md
 
 ci-matrix:
   stage: unit-tests
diff --git a/contrib/terraform/gcp/README.md b/contrib/terraform/gcp/README.md
index b2d74d940..b036c5b71 100644
--- a/contrib/terraform/gcp/README.md
+++ b/contrib/terraform/gcp/README.md
@@ -6,7 +6,7 @@ Provision a Kubernetes cluster on GCP using Terraform and Kubespray
 
 The setup looks like following
 
-```
+```text
                            Kubernetes cluster
                         +-----------------------+
 +---------------+       |   +--------------+    |
diff --git a/roles/bootstrap-os/README.md b/roles/bootstrap-os/README.md
index d202d5533..c791850a0 100644
--- a/roles/bootstrap-os/README.md
+++ b/roles/bootstrap-os/README.md
@@ -3,15 +3,16 @@
 Bootstrap an Ansible host to be able to run Ansible modules.
 
 This role will:
-  * configure the package manager (if applicable) to be able to fetch packages
-  * install Python
-  * install the necessary packages to use Ansible's package manager modules
-  * set the hostname of the host to `{{ inventory_hostname }}` when requested
+
+* configure the package manager (if applicable) to be able to fetch packages
+* install Python
+* install the necessary packages to use Ansible's package manager modules
+* set the hostname of the host to `{{ inventory_hostname }}` when requested
 
 ## Requirements
 
 A host running an operating system that is supported by Kubespray.
-See https://github.com/kubernetes-sigs/kubespray#supported-linux-distributions for a current list.
+See [Supported Linux Distributions](https://github.com/kubernetes-sigs/kubespray#supported-linux-distributions) for a current list.
 
 SSH access to the host.
 
@@ -21,11 +22,11 @@ Variables are listed with their default values, if applicable.
 
 ### General variables
 
-  * `http_proxy`/`https_proxy`
-    The role will configure the package manager (if applicable) to download packages via a proxy.
+* `http_proxy`/`https_proxy`
+  The role will configure the package manager (if applicable) to download packages via a proxy.
 
-  * `override_system_hostname: true`
-    The role will set the hostname of the machine to the name it has according to Ansible's inventory (the variable `{{ inventory_hostname }}`).
+* `override_system_hostname: true`
+  The role will set the hostname of the machine to the name it has according to Ansible's inventory (the variable `{{ inventory_hostname }}`).
 
 ### Per distribution variables
 
diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md
index 57454f705..8af1d0e6b 100644
--- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md
+++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md
@@ -23,11 +23,11 @@ make push
 Test instruction
 ----------------
 
--   Start Kubernetes local cluster
+- Start Kubernetes local cluster
 
-See <a href="https://kubernetes.io/" class="uri" class="uri">https://kubernetes.io/</a>.
+See [Kubernetes](https://kubernetes.io/)
 
--   Create a Ceph admin secret
+- Create a Ceph admin secret
 
 ``` bash
 ceph auth get client.admin 2>&1 |grep "key = " |awk '{print  $3'} |xargs echo -n > /tmp/secret
@@ -35,7 +35,7 @@ kubectl create ns cephfs
 kubectl create secret generic ceph-secret-admin --from-file=/tmp/secret --namespace=cephfs
 ```
 
--   Start CephFS provisioner
+- Start CephFS provisioner
 
 The following example uses `cephfs-provisioner-1` as the identity for the instance and assumes kubeconfig is at `/root/.kube`. The identity should remain the same if the provisioner restarts. If there are multiple provisioners, each should have a different identity.
 
@@ -45,21 +45,21 @@ docker run -ti -v /root/.kube:/kube -v /var/run/kubernetes:/var/run/kubernetes -
 
 Alternatively, deploy it in kubernetes, see [deployment](deploy/README.md).
 
--   Create a CephFS Storage Class
+- Create a CephFS Storage Class
 
-Replace Ceph monitor's IP in <a href="example/class.yaml" class="uri" class="uri">example/class.yaml</a> with your own and create storage class:
+Replace Ceph monitor's IP in [example class](example/class.yaml) with your own and create storage class:
 
 ``` bash
 kubectl create -f example/class.yaml
 ```
 
--   Create a claim
+- Create a claim
 
 ``` bash
 kubectl create -f example/claim.yaml
 ```
 
--   Create a Pod using the claim
+- Create a Pod using the claim
 
 ``` bash
 kubectl create -f example/test-pod.yaml
@@ -68,9 +68,9 @@ kubectl create -f example/test-pod.yaml
 Known limitations
 -----------------
 
--   Kernel CephFS doesn't work with SELinux, setting SELinux label in Pod's securityContext will not work.
--   Kernel CephFS doesn't support quota or capacity, capacity requested by PVC is not enforced or validated.
--   Currently each Ceph user created by the provisioner has `allow r` MDS cap to permit CephFS mount.
+- Kernel CephFS doesn't work with SELinux, setting SELinux label in Pod's securityContext will not work.
+- Kernel CephFS doesn't support quota or capacity, capacity requested by PVC is not enforced or validated.
+- Currently each Ceph user created by the provisioner has `allow r` MDS cap to permit CephFS mount.
 
 Acknowledgement
 ---------------
diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md
index c8b935b06..d6440deb0 100644
--- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md
+++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md
@@ -50,7 +50,7 @@ the rest of this doc will use that path as an example.
 Examples to create local storage volumes
 ----------------------------------------
 
-### tmpfs method:
+1. tmpfs method:
 
 ``` bash
 for vol in vol1 vol2 vol3; do
@@ -62,7 +62,7 @@ done
 The tmpfs method is not recommended for production because the mount is not
 persistent and data will be deleted on reboot.
 
-### Mount physical disks
+1. Mount physical disks
 
 ``` bash
 mkdir /mnt/disks/ssd1
@@ -72,8 +72,7 @@ mount /dev/vdb1 /mnt/disks/ssd1
 Physical disks are recommended for production environments because it offers
 complete isolation in terms of I/O and capacity.
 
-### Mount unpartitioned physical devices
-
+1. Mount unpartitioned physical devices
 
 ``` bash
 for disk in /dev/sdc /dev/sdd /dev/sde; do
@@ -85,7 +84,7 @@ This saves time of precreatnig filesystems. Note that your storageclass must hav
 volume_mode set to "Filesystem" and fs_type defined. If either is not set, the
 disk will be added as a raw block device.
 
-### File-backed sparsefile method
+1. File-backed sparsefile method
 
 ``` bash
 truncate /mnt/disks/disk5 --size 2G
@@ -97,12 +96,12 @@ mount /mnt/disks/disk5 /mnt/disks/vol5
 If you have a development environment and only one disk, this is the best way
 to limit the quota of persistent volumes.
 
-### Simple directories
+1. Simple directories
 
 In a development environment using `mount --bind` works also, but there is no capacity
 management.
 
-### Block volumeMode PVs
+1. Block volumeMode PVs
 
 Create a symbolic link under discovery directory to the block device on the node. To use
 raw block devices in pods, volume_type should be set to "Block".
diff --git a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/README.md b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/README.md
index ef844380e..dcb883dc8 100644
--- a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/README.md
+++ b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/README.md
@@ -26,7 +26,7 @@ make push
 
 * Start Kubernetes local cluster
 
-See https://kubernetes.io/.
+See [Kubernetes](https://kubernetes.io/).
 
 * Create a Ceph admin secret
 
@@ -76,4 +76,4 @@ kubectl create -f examples/test-pod.yaml
 
 ## Acknowledgements
 
-- This provisioner is extracted from [Kubernetes core](https://github.com/kubernetes/kubernetes) with some modifications for this project.
+* This provisioner is extracted from [Kubernetes core](https://github.com/kubernetes/kubernetes) with some modifications for this project.
diff --git a/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/README.md b/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/README.md
index 84f036b36..05edbee6e 100644
--- a/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/README.md
+++ b/roles/kubernetes-apps/ingress_controller/alb_ingress_controller/README.md
@@ -17,6 +17,7 @@ Checkout our [Live Docs](https://kubernetes-sigs.github.io/aws-alb-ingress-contr
 To get started with the controller, see our [walkthrough](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/walkthrough/echoserver/).
 
 ## Setup
+
 - See [controller setup](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/controller/setup/) on how to install ALB ingress controller
 - See [external-dns setup](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/external-dns/setup/) for how to setup the external-dns to manage route 53 records.
 
diff --git a/roles/kubernetes-apps/ingress_controller/ambassador/README.md b/roles/kubernetes-apps/ingress_controller/ambassador/README.md
index 7149c498e..3602aaa34 100644
--- a/roles/kubernetes-apps/ingress_controller/ambassador/README.md
+++ b/roles/kubernetes-apps/ingress_controller/ambassador/README.md
@@ -24,10 +24,10 @@ versions of Ambassador as they become available.
 
 ## Configuration
 
-* `ingress_ambassador_namespace` (default `ambassador`): namespace for installing Ambassador.
-* `ingress_ambassador_update_window` (default `0 0 * * SUN`): _crontab_-like expression
+- `ingress_ambassador_namespace` (default `ambassador`): namespace for installing Ambassador.
+- `ingress_ambassador_update_window` (default `0 0 * * SUN`): _crontab_-like expression
   for specifying when the Operator should try to update the Ambassador API Gateway.
-* `ingress_ambassador_version` (defaulkt: `*`): SemVer rule for versions allowed for
+- `ingress_ambassador_version` (defaulkt: `*`): SemVer rule for versions allowed for
   installation/updates.
 
 ## Ingress annotations
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/README.md b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md
index 99501f292..47969d5f4 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/README.md
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md
@@ -87,12 +87,12 @@ For further information, read the official [Cert-Manager Ingress](https://cert-m
 
 ### Create New TLS Root CA Certificate and Key
 
-#### Install Cloudflare PKI/TLS `cfssl` Toolkit.
+#### Install Cloudflare PKI/TLS `cfssl` Toolkit
 
 e.g. For Ubuntu/Debian distibutions, the toolkit is part of the `golang-cfssl` package.
 
 ```shell
-$ sudo apt-get install -y golang-cfssl
+sudo apt-get install -y golang-cfssl
 ```
 
 #### Create Root Certificate Authority (CA) Configuration File
diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/README.md b/roles/kubernetes-apps/ingress_controller/ingress_nginx/README.md
index 3d59dabd2..a3c972516 100644
--- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/README.md
+++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/README.md
@@ -25,11 +25,12 @@
 
 !!! attention
     If you're using GKE you need to initialize your user as a cluster-admin with the following command:
-    ```console
-    kubectl create clusterrolebinding cluster-admin-binding \
-      --clusterrole cluster-admin \
-      --user $(gcloud config get-value account)
-    ```
+
+```console
+kubectl create clusterrolebinding cluster-admin-binding \
+--clusterrole cluster-admin \
+--user $(gcloud config get-value account)
+```
 
 The following **Mandatory Command** is required for all deployments except for AWS. See below for the AWS version.
 
@@ -60,6 +61,7 @@ For standard usage:
 ```console
 minikube addons enable ingress
 ```
+
 For development:
 
 1. Disable the ingress addon:
@@ -68,8 +70,8 @@ For development:
 minikube addons disable ingress
 ```
 
-2. Execute `make dev-env`
-3. Confirm the `nginx-ingress-controller` deployment exists:
+1. Execute `make dev-env`
+1. Confirm the `nginx-ingress-controller` deployment exists:
 
 ```console
 $ kubectl get pods -n ingress-nginx
@@ -115,20 +117,23 @@ This example creates an ELB with just two listeners, one in port 80 and another
 
 ##### ELB Idle Timeouts
 
-In some scenarios users will need to modify the value of the ELB idle timeout. Users need to ensure the idle timeout is less than the [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) that is configured for NGINX. By default NGINX `keepalive_timeout` is set to `75s`.	
+In some scenarios users will need to modify the value of the ELB idle timeout.
+Users need to ensure the idle timeout is less than the [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) that is configured for NGINX.
+By default NGINX `keepalive_timeout` is set to `75s`.
 
-The default ELB idle timeout will work for most scenarios, unless the NGINX [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) has been modified, in which case `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` will need to be modified to ensure it is less than the `keepalive_timeout` the user has configured.	
+The default ELB idle timeout will work for most scenarios, unless the NGINX [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) has been modified,
+in which case `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` will need to be modified to ensure it is less than the `keepalive_timeout` the user has configured.
 
-_Please Note: An idle timeout of `3600s` is recommended when using WebSockets._	
+_Please Note: An idle timeout of `3600s` is recommended when using WebSockets._
 
-More information with regards to idle timeouts for your Load Balancer can be found in the [official AWS documentation](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html).	
+More information with regards to idle timeouts for your Load Balancer can be found in the [official AWS documentation](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html).
 
-##### Network Load Balancer (NLB)	
+##### Network Load Balancer (NLB)
 
-This type of load balancer is supported since v1.10.0 as an ALPHA feature.	
+This type of load balancer is supported since v1.10.0 as an ALPHA feature.
 
-```console	
-kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/service-nlb.yaml	
+```console
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/service-nlb.yaml
 ```
 
 #### GCE-GKE
diff --git a/roles/kubernetes-apps/metallb/README.md b/roles/kubernetes-apps/metallb/README.md
index 71f1b5bd9..a898d096c 100644
--- a/roles/kubernetes-apps/metallb/README.md
+++ b/roles/kubernetes-apps/metallb/README.md
@@ -4,7 +4,7 @@ MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer
 In short, it allows you to create Kubernetes services of type "LoadBalancer" in clusters that
 don't run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
 This addon aims to automate [MetalLB in layer 2 mode](https://metallb.universe.tf/concepts/layer2/)
-or [MetalLB in BGP mode][https://metallb.universe.tf/concepts/bgp/].
+or [MetalLB in BGP mode](https://metallb.universe.tf/concepts/bgp/).
 It deploys MetalLB into Kubernetes and sets up a layer 2 or BGP load-balancer.
 
 ## Install
diff --git a/roles/kubernetes-apps/registry/README.md b/roles/kubernetes-apps/registry/README.md
index 0a943eddd..27395afec 100644
--- a/roles/kubernetes-apps/registry/README.md
+++ b/roles/kubernetes-apps/registry/README.md
@@ -24,7 +24,7 @@ whether the registry is run or not. To set this flag, you can specify
 does not include this flag, the following steps should work. Note that some of
 this is cloud-provider specific, so you may have to customize it a bit.
 
-### Make some storage
+- Make some storage
 
 The primary job of the registry is to store data. To do that we have to decide
 where to store it. For cloud environments that have networked storage, we can
@@ -58,7 +58,7 @@ If, for example, you wanted to use NFS you would just need to change the
 Note that in any case, the storage (in the case the GCE PersistentDisk) must be
 created independently - this is not something Kubernetes manages for you (yet).
 
-### I don't want or don't have persistent storage
+- I don't want or don't have persistent storage
 
 If you are running in a place that doesn't have networked storage, or if you
 just want to kick the tires on this without committing to it, you can easily
@@ -260,13 +260,13 @@ Now you can build and push images on your local computer as
 your kubernetes cluster with the same name.
 
 More Extensions
-===============
+---------------
 
--   [Use GCS as storage backend](gcs/README.md)
--   [Enable TLS/SSL](tls/README.md)
--   [Enable Authentication](auth/README.md)
+- [Use GCS as storage backend](gcs/README.md)
+- [Enable TLS/SSL](tls/README.md)
+- [Enable Authentication](auth/README.md)
 
 Future improvements
 -------------------
 
--   Allow port-forwarding to a Service rather than a pod (\#15180)
+- Allow port-forwarding to a Service rather than a pod (\#15180)
-- 
GitLab