diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml index 538fc22fcf1195b27669d23f68358662a4decaa9..382fb6602191fef1f7555081809f7b26df1b632a 100644 --- a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml +++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml @@ -1,6 +1,6 @@ --- - name: Kubernetes Apps | Register coredns deployment annotation `createdby` - shell: "{{ bin_dir }}/kubectl get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'" + command: "{{ kubectl }} get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'" register: createdby_annotation changed_when: false ignore_errors: true # noqa ignore-errors diff --git a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml index 58688ae4a4d9cc50a07ceb7067fd6f84ba47bae5..269d2ec18ffbf1789fc9298e82490e186d14b338 100644 --- a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml +++ b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml @@ -29,7 +29,7 @@ tags: vsphere-csi-driver - name: vSphere CSI Driver | Generate a CSI secret manifest - command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml" + command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml" register: vsphere_csi_secret_manifest when: inventory_hostname == groups['kube_control_plane'][0] no_log: true @@ -37,7 +37,7 @@ - name: vSphere CSI Driver | Apply a CSI secret manifest command: - cmd: "{{ bin_dir }}/kubectl apply -f -" + cmd: "{{ kubectl }} apply -f -" stdin: "{{ vsphere_csi_secret_manifest.stdout }}" when: inventory_hostname == groups['kube_control_plane'][0] no_log: true diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml index 1c1534698dd3e129670b5ae5ef03627b1c3b7d37..0f03dbb313dc5523e4e3a03efd103adfb443f8a0 100644 --- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml @@ -10,8 +10,8 @@ - upgrade - name: CephFS Provisioner | Remove legacy namespace - shell: | - {{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }} + command: > + {{ kubectl }} delete namespace {{ cephfs_provisioner_namespace }} ignore_errors: true # noqa ignore-errors when: - inventory_hostname == groups['kube_control_plane'][0] @@ -19,8 +19,8 @@ - upgrade - name: CephFS Provisioner | Remove legacy storageclass - shell: | - {{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }} + command: > + {{ kubectl }} delete storageclass {{ cephfs_provisioner_storage_class }} ignore_errors: true # noqa ignore-errors when: - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml index 06bc1884900f0605059dcf3c97cbb6a501bc880c..e1c1241a6e23338ef713852842e1fdd3fdd5f4c6 100644 --- a/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/rbd_provisioner/tasks/main.yml @@ -10,8 +10,8 @@ - upgrade - name: RBD Provisioner | Remove legacy namespace - shell: | - {{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }} + command: > + {{ kubectl }} delete namespace {{ rbd_provisioner_namespace }} ignore_errors: true # noqa ignore-errrors when: - inventory_hostname == groups['kube_control_plane'][0] @@ -19,8 +19,8 @@ - upgrade - name: RBD Provisioner | Remove legacy storageclass - shell: | - {{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }} + command: > + {{ kubectl }} delete storageclass {{ rbd_provisioner_storage_class }} ignore_errors: true # noqa ignore-errrors when: - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml index ce46aada5128bcedac7d7d50fbf45fc8788fe40d..33f2dbcf84499fee926236cf9549ba8ced9c1794 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml @@ -10,8 +10,8 @@ - upgrade - name: Cert Manager | Remove legacy namespace - shell: | - {{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }} + command: > + {{ kubectl }} delete namespace {{ cert_manager_namespace }} ignore_errors: true # noqa ignore-errors when: - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml index 45a64d2b2a96186f81d9adad2781257ab113653f..25f9a7132652813a5b39580998664eca66bfe908 100644 --- a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml @@ -12,7 +12,7 @@ run_once: true - name: kube-router | Wait for kube-router pods to be ready - command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors + command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors register: pods_not_ready until: pods_not_ready.stdout.find("kube-router")==-1 retries: 30 diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml index ef6a7ac737a827a4c85b36bb9c6db2ec782f4d9b..e99f2f8405e8763683652bfb2aa0ed407be9fa1c 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -190,7 +190,7 @@ # FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file. - name: kubeadm | Remove taint for master with node role - command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} {{ item }}" + command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}" delegate_to: "{{ first_kube_control_plane }}" with_items: - "node-role.kubernetes.io/master:NoSchedule-" diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml index fe690fc3f61ffac9c2752da790d6efd7e3c962d6..769ff310774a2d06c4683f238fc8f3eff249820f 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml @@ -61,8 +61,7 @@ # FIXME: https://github.com/kubernetes/kubeadm/issues/1318 - name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode command: >- - {{ bin_dir }}/kubectl - --kubeconfig {{ kube_config_dir }}/admin.conf + {{ kubectl }} -n kube-system scale deployment/coredns --replicas 0 register: scale_down_coredns diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index 8db58d34f6b323740b488ffe4c9f4e41cd66123c..840a9cd68a53ae3fcc599949b686c07fc9703cd6 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -115,9 +115,9 @@ # incorrectly to first master, creating SPoF. - name: Update server field in kube-proxy kubeconfig shell: >- - set -o pipefail && {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml + set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml | sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g' - | {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf replace -f - + | {{ kubectl }} replace -f - args: executable: /bin/bash run_once: true @@ -139,7 +139,7 @@ mode: "0644" - name: Restart all kube-proxy pods to ensure that they load the new configmap - command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" + command: "{{ kubectl }} delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" run_once: true delegate_to: "{{ groups['kube_control_plane']|first }}" delegate_facts: false diff --git a/roles/kubernetes/node-label/tasks/main.yml b/roles/kubernetes/node-label/tasks/main.yml index b7f8138a66e9479be78a88510773826258aca26e..f91e7f459fbb14a2870279e607b1da05890f8375 100644 --- a/roles/kubernetes/node-label/tasks/main.yml +++ b/roles/kubernetes/node-label/tasks/main.yml @@ -42,7 +42,7 @@ - name: Set label to node command: >- - {{ bin_dir }}/kubectl label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true + {{ kubectl }} label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true loop: "{{ role_node_labels + inventory_node_labels }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" changed_when: false diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 106deb21f3764e84875703478dc37f5e083fc422..556766a1648f04f4832dfd742f9638358f72090a 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -138,6 +138,10 @@ kube_config_dir: /etc/kubernetes kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_manifest_dir: "{{ kube_config_dir }}/manifests" +# Kubectl command +# This is for consistency when using kubectl command in roles, and ensure +kubectl: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf" + # This is where all the cert scripts and certs will be located kube_cert_dir: "{{ kube_config_dir }}/ssl" diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml index 74d3f7915db296f30635828aedb26d38e1336f0d..162aca150dd21b6ab1f305bf87765913a109e991 100644 --- a/roles/network_plugin/calico/tasks/pre.yml +++ b/roles/network_plugin/calico/tasks/pre.yml @@ -19,7 +19,7 @@ - name: Calico | Get kubelet hostname shell: >- - set -o pipefail && {{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address' + set -o pipefail && {{ kubectl }} get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address' | egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1 args: executable: /bin/bash diff --git a/roles/network_plugin/calico/tasks/typha_certs.yml b/roles/network_plugin/calico/tasks/typha_certs.yml index c2647a1cb988f9b9965d759b66a136939cb08cad..9f94067bcb854c39ee75f8cd5acc82f07a2ca508 100644 --- a/roles/network_plugin/calico/tasks/typha_certs.yml +++ b/roles/network_plugin/calico/tasks/typha_certs.yml @@ -1,6 +1,6 @@ --- - name: Calico | Check if typha-server exists - command: "{{ bin_dir }}/kubectl -n kube-system get secret typha-server" + command: "{{ kubectl }} -n kube-system get secret typha-server" register: typha_server_secret changed_when: false failed_when: false @@ -35,7 +35,7 @@ - name: Calico | Create typha tls secrets command: >- - {{ bin_dir }}/kubectl -n kube-system + {{ kubectl }} -n kube-system create secret tls {{ item.name }} --cert {{ item.cert }} --key {{ item.key }} diff --git a/roles/network_plugin/cilium/tasks/apply.yml b/roles/network_plugin/cilium/tasks/apply.yml index 2a967adbc23189f2603b6438c56883864e254b5e..89ccb1e563830f4180af2e8a4b45751a0819cf89 100644 --- a/roles/network_plugin/cilium/tasks/apply.yml +++ b/roles/network_plugin/cilium/tasks/apply.yml @@ -11,7 +11,7 @@ when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped - name: Cilium | Wait for pods to run - command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 + command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 register: pods_not_ready until: pods_not_ready.stdout.find("cilium")==-1 retries: 30 diff --git a/roles/network_plugin/kube-ovn/tasks/main.yml b/roles/network_plugin/kube-ovn/tasks/main.yml index 2efafa4cd6341dffcfd0185667040464c3d88aaf..3278642b112b336efc2332edfd718023bbda6c50 100644 --- a/roles/network_plugin/kube-ovn/tasks/main.yml +++ b/roles/network_plugin/kube-ovn/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Kube-OVN | Label ovn-db node command: >- - {{ bin_dir }}/kubectl label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master + {{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master when: - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml index 30190124d13e0fc240a2a5811c5401ec1b570f72..e91249f7d45a7bc07d9e3f24141096b96660879c 100644 --- a/roles/network_plugin/kube-router/tasks/annotate.yml +++ b/roles/network_plugin/kube-router/tasks/annotate.yml @@ -1,20 +1,20 @@ --- - name: kube-router | Add annotations on kube_control_plane - command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_master }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane'] - name: kube-router | Add annotations on kube_node - command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_node }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node'] - name: kube-router | Add common annotations on all servers - command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_all }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml index a014fd436577fcdaa9184821cc21d2d704a593aa..0c381c79e8abe1d3a3ad272e2e6c2dd5f0ee765c 100644 --- a/roles/network_plugin/macvlan/tasks/main.yml +++ b/roles/network_plugin/macvlan/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Macvlan | Retrieve Pod Cidr - command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'" + command: "{{ kubectl }} get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'" changed_when: false register: node_pod_cidr_cmd delegate_to: "{{ groups['kube_control_plane'][0] }}" diff --git a/roles/network_plugin/ovn4nfv/tasks/main.yml b/roles/network_plugin/ovn4nfv/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..51f9eaa293be0dbdc02c33a43236f9005860ff85 --- /dev/null +++ b/roles/network_plugin/ovn4nfv/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: ovn4nfv | Label control-plane node + command: >- + {{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: ovn4nfv | Create ovn4nfv-k8s manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + with_items: + - {name: ovn-daemonset, file: ovn-daemonset.yml} + - {name: ovn4nfv-k8s-plugin, file: ovn4nfv-k8s-plugin.yml} + register: ovn4nfv_node_manifests diff --git a/roles/recover_control_plane/control-plane/tasks/main.yml b/roles/recover_control_plane/control-plane/tasks/main.yml index 450e6f36d946888b81c462eeedeb7043ae26cba7..4a4e3eb7ec8dc34ff676cc858dba81e797efc5d4 100644 --- a/roles/recover_control_plane/control-plane/tasks/main.yml +++ b/roles/recover_control_plane/control-plane/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Wait for apiserver - command: "{{ bin_dir }}/kubectl get nodes" + command: "{{ kubectl }} get nodes" environment: - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" register: apiserver_is_ready @@ -11,7 +11,7 @@ when: groups['broken_kube_control_plane'] - name: Delete broken kube_control_plane nodes from cluster - command: "{{ bin_dir }}/kubectl delete node {{ item }}" + command: "{{ kubectl }} delete node {{ item }}" environment: - KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config" with_items: "{{ groups['broken_kube_control_plane'] }}" diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml index 6ca8c2a687ee714b6606d47ee4c834477b6c56f2..31dd462f49199a2c05f5ca04d7b6ffa1b5f7628b 100644 --- a/roles/remove-node/post-remove/tasks/main.yml +++ b/roles/remove-node/post-remove/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Delete node - command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}" + command: "{{ kubectl }} delete node {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube_control_plane']|first }}" when: inventory_hostname in groups['k8s_cluster'] retries: 10 diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml index d920048094cb5428b0da54bce7e1a713b322cfd3..32d4f9831aa6d529972da232b0df2093e54e2af3 100644 --- a/roles/remove-node/pre-remove/tasks/main.yml +++ b/roles/remove-node/pre-remove/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: remove-node | List nodes command: >- - {{ bin_dir }}/kubectl get nodes -o go-template={% raw %}'{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}'{% endraw %} + {{ kubectl }} get nodes -o go-template={% raw %}'{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}'{% endraw %} register: nodes delegate_to: "{{ groups['kube_control_plane']|first }}" changed_when: false @@ -9,7 +9,7 @@ - name: remove-node | Drain node except daemonsets resource # noqa 301 command: >- - {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf drain + {{ kubectl }} drain --force --ignore-daemonsets --grace-period {{ drain_grace_period }} diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml index c69dd906957917ca22b31af1811c95b247552e0a..3d01f332b67abf9d34fa5c24a42ae99b4df740a7 100644 --- a/roles/remove-node/remove-etcd-node/tasks/main.yml +++ b/roles/remove-node/remove-etcd-node/tasks/main.yml @@ -1,8 +1,8 @@ --- - name: Lookup node IP in kubernetes - shell: >- - {{ bin_dir }}/kubectl get nodes {{ node }} - -o jsonpath='{range.status.addresses[?(@.type=="InternalIP")]}{.address}{"\n"}{end}' + command: > + {{ kubectl }} get nodes {{ node }} + -o jsonpath={range.status.addresses[?(@.type=="InternalIP")]}{.address}{"\n"}{end} register: remove_node_ip when: - inventory_hostname in groups['etcd'] diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml index f19ecafb1d4b86632eee53bdc17b3c21aad053d0..f460d086378ce170924d4828dcc125570b4ae3ed 100644 --- a/roles/upgrade/post-upgrade/tasks/main.yml +++ b/roles/upgrade/post-upgrade/tasks/main.yml @@ -4,7 +4,7 @@ - needs_cordoning|default(false) - kube_network_plugin == 'cilium' command: > - {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf + {{ kubectl }} wait pod -n kube-system -l k8s-app=cilium --field-selector 'spec.nodeName=={{ kube_override_hostname|default(inventory_hostname) }}' --for=condition=Ready @@ -12,7 +12,7 @@ delegate_to: "{{ groups['kube_control_plane'][0] }}" - name: Uncordon node - command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf uncordon {{ kube_override_hostname|default(inventory_hostname) }}" + command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: - needs_cordoning|default(false) diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml index 36d06224eec7977918566a40f9f4636ae1144422..9aad57e0e27357d13bbdf5e957b83a023dea6bbc 100644 --- a/roles/upgrade/pre-upgrade/tasks/main.yml +++ b/roles/upgrade/pre-upgrade/tasks/main.yml @@ -17,9 +17,9 @@ # Node Ready: type = ready, status = True # Node NotReady: type = ready, status = Unknown - name: See if node is in ready state - shell: >- - {{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }} - -o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }' + command: > + {{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }} + -o jsonpath={ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end } register: kubectl_node_ready delegate_to: "{{ groups['kube_control_plane'][0] }}" failed_when: false @@ -28,9 +28,9 @@ # SchedulingDisabled: unschedulable = true # else unschedulable key doesn't exist - name: See if node is schedulable - shell: >- - {{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }} - -o jsonpath='{ .spec.unschedulable }' + command: > + {{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }} + -o jsonpath={ .spec.unschedulable } register: kubectl_node_schedulable delegate_to: "{{ groups['kube_control_plane'][0] }}" failed_when: false @@ -48,11 +48,11 @@ - name: Node draining block: - name: Cordon node - command: "{{ bin_dir }}/kubectl cordon {{ kube_override_hostname|default(inventory_hostname) }}" + command: "{{ kubectl }} cordon {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" - name: Check kubectl version - command: "{{ bin_dir }}/kubectl version --client --short" + command: "{{ kubectl }} version --client --short" register: kubectl_version delegate_to: "{{ groups['kube_control_plane'][0] }}" run_once: yes @@ -70,7 +70,7 @@ - name: Drain node command: >- - {{ bin_dir }}/kubectl drain + {{ kubectl }} drain --force --ignore-daemonsets --grace-period {{ hostvars['localhost']['drain_grace_period_after_failure'] | default(drain_grace_period) }} @@ -98,7 +98,7 @@ - name: Drain node - fallback with disabled eviction command: >- - {{ bin_dir }}/kubectl drain + {{ kubectl }} drain --force --ignore-daemonsets --grace-period {{ drain_fallback_grace_period }} @@ -117,7 +117,7 @@ rescue: - name: Set node back to schedulable - command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf uncordon {{ inventory_hostname }}" + command: "{{ kubectl }} uncordon {{ inventory_hostname }}" when: upgrade_node_uncordon_after_drain_failure - name: Fail after rescue fail: diff --git a/roles/win_nodes/kubernetes_patch/tasks/main.yml b/roles/win_nodes/kubernetes_patch/tasks/main.yml index 77da68352fb472d92873fc0a086248a5473747e6..a6c70edbda4a875cc5776c40631e4c282a9cb29f 100644 --- a/roles/win_nodes/kubernetes_patch/tasks/main.yml +++ b/roles/win_nodes/kubernetes_patch/tasks/main.yml @@ -12,9 +12,9 @@ # Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch" - name: Check current nodeselector for kube-proxy daemonset command: >- - {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf + {{ kubectl }} get ds kube-proxy --namespace=kube-system - -o jsonpath='{.spec.template.spec.nodeSelector.{{ kube_proxy_nodeselector | regex_replace('\.', '\\.') }}}' + -o jsonpath={.spec.template.spec.nodeSelector.{{ kube_proxy_nodeselector | regex_replace('\.', '\\.') }}} register: current_kube_proxy_state retries: 60 delay: 5 @@ -22,8 +22,8 @@ changed_when: false - name: Apply nodeselector patch for kube-proxy daemonset - shell: >- - {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf + command: > + {{ kubectl }} patch ds kube-proxy --namespace=kube-system --type=strategic -p '{"spec":{"template":{"spec":{"nodeSelector":{"{{ kube_proxy_nodeselector }}":"linux"} }}}}' register: patch_kube_proxy_state