From a086686e9f9695b55871eecffc99dfd9c16e1e15 Mon Sep 17 00:00:00 2001
From: Wong Hoi Sing Edison <hswong3i@gmail.com>
Date: Fri, 16 Feb 2018 20:53:35 +0800
Subject: [PATCH] Support multiple artifacts under individual inventory
 directory

---
 .gitignore                                  |  2 +-
 docs/getting-started.md                     | 70 ++++++++++-----------
 inventory/sample/group_vars/k8s-cluster.yml |  4 +-
 roles/kubernetes/client/defaults/main.yml   |  2 +-
 roles/kubernetes/client/tasks/main.yml      | 16 ++++-
 roles/kubespray-defaults/defaults/main.yaml |  4 +-
 6 files changed, 56 insertions(+), 42 deletions(-)

diff --git a/.gitignore b/.gitignore
index 66c9b4867..fcbcd1da1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,7 +23,7 @@ __pycache__/
 
 # Distribution / packaging
 .Python
-artifacts/
+inventory/*/artifacts/
 env/
 build/
 credentials/
diff --git a/docs/getting-started.md b/docs/getting-started.md
index 961d1a9cf..78d3f49d1 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -18,11 +18,9 @@ certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` hel
 
 Example inventory generator usage:
 
-```
-cp -r inventory/sample inventory/mycluster
-declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
-CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
-```
+    cp -r inventory/sample inventory/mycluster
+    declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
+    CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
 
 Starting custom deployment
 --------------------------
@@ -30,12 +28,10 @@ Starting custom deployment
 Once you have an inventory, you may want to customize deployment data vars
 and start the deployment:
 
-**IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars**
+**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars:
 
-```
-ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
-  --private-key=~/.ssh/private_key
-```
+    ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
+      --private-key=~/.ssh/private_key
 
 See more details in the [ansible guide](ansible.md).
 
@@ -44,31 +40,31 @@ Adding nodes
 
 You may want to add **worker** nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
 
-- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
-- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
-```
-ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
-  --private-key=~/.ssh/private_key
-```
+-   Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
+-   Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
+
+        ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
+          --private-key=~/.ssh/private_key
 
 Connecting to Kubernetes
 ------------------------
+
 By default, Kubespray configures kube-master hosts with insecure access to
 kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
-because kubectl will use http://localhost:8080 to connect. The kubeconfig files
+because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
 generated will point to localhost (on kube-masters) and kube-node hosts will
 connect either to a localhost nginx proxy or to a loadbalancer if configured.
 More details on this process are in the [HA guide](ha-mode.md).
 
-Kubespray permits connecting to the cluster remotely on any IP of any 
-kube-master host on port 6443 by default. However, this requires 
-authentication. One could generate a kubeconfig based on one installed 
+Kubespray permits connecting to the cluster remotely on any IP of any
+kube-master host on port 6443 by default. However, this requires
+authentication. One could generate a kubeconfig based on one installed
 kube-master hosts (needs improvement) or connect with a username and password.
 By default, a user with admin rights is created, named `kube`.
-The password can be viewed after deployment by looking at the file 
+The password can be viewed after deployment by looking at the file
 `PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
 password. If you wish to set your own password, just precreate/modify this
-file yourself. 
+file yourself.
 
 For more information on kubeconfig and accessing a Kubernetes cluster, refer to
 the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
@@ -77,29 +73,33 @@ Accessing Kubernetes Dashboard
 ------------------------------
 
 As of kubernetes-dashboard v1.7.x:
-* New login options that use apiserver auth proxying of token/basic/kubeconfig by default
-* Requires RBAC in authorization_modes
-* Only serves over https
-* No longer available at https://first_master:6443/ui until apiserver is updated with the https proxy URL
+
+-   New login options that use apiserver auth proxying of token/basic/kubeconfig by default
+-   Requires RBAC in authorization\_modes
+-   Only serves over https
+-   No longer available at <https://first_master:6443/ui> until apiserver is updated with the https proxy URL
 
 If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials:
-https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login
+<https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
 
 Or you can run 'kubectl proxy' from your local machine to access dashboard in your browser from:
-http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login
+<http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
 
-It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above
+It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: <https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above>
 
 Accessing Kubernetes API
 ------------------------
 
 The main client of Kubernetes is `kubectl`. It is installed on each kube-master
 host and can optionally be configured on your ansible host by setting
-`kubeconfig_localhost: true` in the configuration. If enabled, kubectl and
-admin.conf will appear in the artifacts/ directory after deployment. You can
-see a list of nodes by running the following commands:
+`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:
+
+-   If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`.
+-   If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment.
+
+You can see a list of nodes by running the following commands:
 
-    cd artifacts/
-    ./kubectl --kubeconfig admin.conf get nodes
+    cd inventory/mycluster/artifacts
+    ./kubectl.sh get nodes
 
-If desired, copy kubectl to your bin dir and admin.conf to ~/.kube/config.
+If desired, copy admin.conf to ~/.kube/config.
diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml
index a31963f16..d642646fe 100644
--- a/inventory/sample/group_vars/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s-cluster.yml
@@ -203,9 +203,9 @@ ingress_nginx_enabled: false
 # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
 persistent_volumes_enabled: false
 
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
 # kubeconfig_localhost: false
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
 # kubectl_localhost: false
 
 # dnsmasq
diff --git a/roles/kubernetes/client/defaults/main.yml b/roles/kubernetes/client/defaults/main.yml
index 5864e991f..32870df01 100644
--- a/roles/kubernetes/client/defaults/main.yml
+++ b/roles/kubernetes/client/defaults/main.yml
@@ -1,7 +1,7 @@
 ---
 kubeconfig_localhost: false
 kubectl_localhost: false
-artifacts_dir: "./artifacts"
+artifacts_dir: "{{ inventory_dir }}/artifacts"
 
 kube_config_dir: "/etc/kubernetes"
 kube_apiserver_port: "6443"
diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml
index 3b66c5e1c..cf70b4995 100644
--- a/roles/kubernetes/client/tasks/main.yml
+++ b/roles/kubernetes/client/tasks/main.yml
@@ -55,9 +55,23 @@
 - name: Copy kubectl binary to ansible host
   fetch:
     src: "{{ bin_dir }}/kubectl"
-    dest: "{{ artifacts_dir }}/kubectl"
+    dest: "{{ bin_dir }}/kubectl"
     flat: yes
     validate_checksum: no
   become: no
   run_once: yes
   when: kubectl_localhost|default(false)
+
+- name: create helper script kubectl.sh on ansible host
+  copy:
+    content: |
+      #!/bin/bash
+      kubectl --kubeconfig=admin.conf $@
+    dest: "{{ artifacts_dir }}/kubectl.sh"
+    owner: root
+    group: root
+    mode: 0755
+  become: no
+  run_once: yes
+  delegate_to: localhost
+  when: kubectl_localhost|default(false) and kubeconfig_localhost|default(false)
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 61f11e97f..21fb044ec 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -152,9 +152,9 @@ helm_deployment_type: host
 # Enable kubeadm deployment (experimental)
 kubeadm_enabled: false
 
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
 kubeconfig_localhost: false
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
 kubectl_localhost: false
 
 # K8s image pull policy (imagePullPolicy)
-- 
GitLab