Skip to content
Snippets Groups Projects
Commit ddffdb63 authored by Andreas Krüger's avatar Andreas Krüger Committed by Kubernetes Prow Robot
Browse files

Remove non-kubeadm deployment (#3811)

* Remove non-kubeadm deployment

* More cleanup

* More cleanup

* More cleanup

* More cleanup

* Fix gitlab

* Try stop gce first before absent to make the delete process work

* More cleanup

* Fix bug with checking if kubeadm has already run

* Fix bug with checking if kubeadm has already run

* More fixes

* Fix test

* fix

* Fix gitlab checkout untill kubespray 2.8 is on quay

* Fixed

* Add upgrade path from non-kubeadm to kubeadm. Revert ssl path

* Readd secret checking

* Do gitlab checks from v2.7.0 test upgrade path to 2.8.0

* fix typo

* Fix CI jobs to kubeadm again. Fix broken hyperkube path

* Fix gitlab

* Fix rotate tokens

* More fixes

* More fixes

* Fix tokens
parent 0d1be39a
No related branches found
No related tags found
No related merge requests found
Showing
with 45 additions and 193 deletions
......@@ -24,7 +24,6 @@ variables:
IDEMPOT_CHECK: "false"
RESET_CHECK: "false"
UPGRADE_TEST: "false"
KUBEADM_ENABLED: "false"
LOG_LEVEL: "-vv"
# asia-east1-a
......@@ -89,11 +88,11 @@ before_script:
- echo ${PWD}
- echo "${STARTUP_SCRIPT}"
- cd tests && make create-${CI_PLATFORM} -s ; cd -
#- git fetch --all && git checkout v2.7.0
# Check out latest tag if testing upgrade
# Uncomment when gitlab kubespray repo has tags
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
- test "${UPGRADE_TEST}" != "false" && git checkout 53d87e53c5899d4ea2904ab7e3883708dd6363d3
- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
# Checkout the CI vars file so it is available
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
# Workaround https://github.com/kubernetes-sigs/kubespray/issues/2021
......@@ -137,9 +136,7 @@ before_script:
# Tests Cases
## Test Master API
- >
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
## Ping the between 2 pod
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
......
......@@ -13,20 +13,6 @@
vars:
ansible_connection: local
- hosts: localhost
gather_facts: false
tasks:
- name: deploy warning for non kubeadm
debug:
msg: "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
when: not kubeadm_enabled and not skip_non_kubeadm_warning
- name: deploy cluster for non kubeadm
pause:
prompt: "Are you sure you want to deploy cluster using the deprecated non-kubeadm mode."
echo: no
when: not kubeadm_enabled and not skip_non_kubeadm_warning
- hosts: bastion[0]
gather_facts: False
roles:
......@@ -96,7 +82,7 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
- { role: kubernetes/kubeadm, tags: kubeadm}
- { role: network_plugin, tags: network }
- hosts: kube-master[0]
......@@ -104,7 +90,7 @@
roles:
- { role: kubespray-defaults}
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"], when: "kubeadm_enabled" }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"]}
- hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
......
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
# See contrib/dind/README.md
kube_api_anonymous_auth: true
kubeadm_enabled: true
kubelet_fail_swap_on: false
......
DISTROS=(debian centos)
NETCHECKER_HOST=${NODES[0]}
EXTRAS=(
'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":false}'
'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":true}'
'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":false}'
'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":true}'
'kube_network_plugin=kube-router {"kube_router_run_service_proxy":false}'
'kube_network_plugin=kube-router {"kube_router_run_service_proxy":true}'
)
DISTROS=(debian centos)
EXTRAS=(
'kube_network_plugin=calico {"kubeadm_enabled":true}'
'kube_network_plugin=canal {"kubeadm_enabled":true}'
'kube_network_plugin=cilium {"kubeadm_enabled":true}'
'kube_network_plugin=flannel {"kubeadm_enabled":true}'
'kube_network_plugin=weave {"kubeadm_enabled":true}'
'kube_network_plugin=calico {}'
'kube_network_plugin=canal {}'
'kube_network_plugin=cilium {}'
'kube_network_plugin=flannel {}'
'kube_network_plugin=weave {}'
)
......@@ -15,8 +15,6 @@ Use cri-o instead of docker, set following variable:
#### all.yml
```
kubeadm_enabled: true
...
download_container: false
skip_downloads: false
```
......@@ -28,4 +26,3 @@ etcd_deployment_type: host
kubelet_deployment_type: host
container_manager: crio
```
......@@ -62,34 +62,6 @@ You can change the default configuration by overriding `kube_router_...` variabl
these are named to follow `kube-router` command-line options as per
<https://www.kube-router.io/docs/user-guide/#try-kube-router-with-cluster-installers>.
## Caveats
### kubeadm_enabled: true
If you want to set `kube-router` to replace `kube-proxy`
(`--run-service-proxy=true`) while using `kubeadm_enabled`,
then 'kube-proxy` DaemonSet will be removed *after* kubeadm finishes
running, as it's not possible to skip kube-proxy install in kubeadm flags
and/or config, see https://github.com/kubernetes/kubeadm/issues/776.
Given above, if `--run-service-proxy=true` is needed it would be
better to void `kubeadm_enabled` i.e. set:
```
kubeadm_enabled: false
kube_router_run_service_proxy: true
```
If for some reason you do want/need to set `kubeadm_enabled`, removing
it afterwards behave better if kube-proxy is set to ipvs mode, i.e. set:
```
kubeadm_enabled: true
kube_router_run_service_proxy: true
kube_proxy_mode: ipvs
```
## Advanced BGP Capabilities
https://github.com/cloudnativelabs/kube-router#advanced-bgp-capabilities
......
......@@ -20,9 +20,6 @@ Some variables of note include:
string)
* *etcd_version* - Specify version of ETCD to use
* *ipip* - Enables Calico ipip encapsulation by default
* *hyperkube_image_repo* - Specify the Docker repository where Hyperkube
resides
* *hyperkube_image_tag* - Specify the Docker tag where Hyperkube resides
* *kube_network_plugin* - Sets k8s network plugin (default Calico)
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
* *kube_version* - Specify a given Kubernetes hyperkube version
......
......@@ -46,12 +46,6 @@ bin_dir: /usr/local/bin
## TODO(riverzhang): https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager
#cloud_provider:
## kubeadm deployment mode
kubeadm_enabled: true
# Skip alert information
skip_non_kubeadm_warning: false
## Set these proxy values in order to update package manager and docker daemon to use proxies
#http_proxy: ""
#https_proxy: ""
......
......@@ -136,8 +136,6 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
calico_policy_image_tag: "{{ calico_policy_version }}"
calico_rr_image_repo: "quay.io/calico/routereflector"
calico_rr_image_tag: "{{ calico_rr_version }}"
hyperkube_image_repo: "{{ kube_image_repo }}/hyperkube-{{ image_arch }}"
hyperkube_image_tag: "{{ kube_version }}"
pod_infra_image_repo: "gcr.io/google_containers/pause-{{ image_arch }}"
pod_infra_image_tag: "{{ pod_infra_version }}"
install_socat_image_repo: "xueshanf/install-socat"
......@@ -272,7 +270,7 @@ downloads:
- k8s-cluster
kubeadm:
enabled: "{{ kubeadm_enabled }}"
enabled: true
file: true
version: "{{ kubeadm_version }}"
dest: "{{local_release_dir}}/kubeadm"
......@@ -284,15 +282,6 @@ downloads:
groups:
- k8s-cluster
hyperkube:
enabled: "{{ kubeadm_enabled == false }}"
container: true
repo: "{{ hyperkube_image_repo }}"
tag: "{{ hyperkube_image_tag }}"
sha256: "{{ hyperkube_digest_checksum|default(None) }}"
groups:
- k8s-cluster
hyperkube_file:
enabled: true
file: true
......
......@@ -21,7 +21,6 @@
resource: "deploy"
state: absent
when:
- kubeadm_enabled|default(false)
- kubeadm_init is defined
- kubeadm_init.changed|default(false)
- inventory_hostname == groups['kube-master'][0]
......@@ -50,7 +49,6 @@
- 'deploy'
- 'svc'
when:
- kubeadm_enabled|default(false)
- kubeadm_init is defined
- kubeadm_init.changed|default(false)
- inventory_hostname == groups['kube-master'][0]
......
---
- name: Rotate Tokens | Get default token name
shell: "{{ bin_dir }}/kubectl get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token"
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token"
register: default_token
changed_when: false
until: default_token.rc == 0
......@@ -8,7 +8,7 @@
retries: 5
- name: Rotate Tokens | Get default token data
command: "{{ bin_dir }}/kubectl get secrets {{ default_token.stdout }} -ojson"
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets {{ default_token.stdout }} -ojson"
register: default_token_data
changed_when: false
......@@ -31,7 +31,7 @@
# instead of filtering manually
- name: Rotate Tokens | Get all serviceaccount tokens to expire
shell: >-
{{ bin_dir }}/kubectl get secrets --all-namespaces
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets --all-namespaces
-o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
| grep kubernetes.io/service-account-token
| egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|tiller|local-volume-provisioner'
......@@ -39,10 +39,10 @@
when: needs_rotation
- name: Rotate Tokens | Delete expired tokens
command: "{{ bin_dir }}/kubectl delete secrets -n {{ item.split(' ')[0] }} {{ item.split(' ')[1] }}"
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete secrets -n {{ item.split(' ')[0] }} {{ item.split(' ')[1] }}"
with_items: "{{ tokens_to_delete.stdout_lines }}"
when: needs_rotation
- name: Rotate Tokens | Delete pods in system namespace
command: "{{ bin_dir }}/kubectl delete pods -n kube-system --all"
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete pods -n kube-system --all"
when: needs_rotation
......@@ -10,25 +10,6 @@
tags:
- facts
- name: Gather certs for admin kubeconfig
slurp:
src: "{{ item }}"
register: admin_certs
with_items:
- "{{ kube_cert_dir }}/ca.pem"
- "{{ kube_cert_dir }}/admin-{{ inventory_hostname }}.pem"
- "{{ kube_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
when: not kubeadm_enabled|d(false)|bool
- name: Write admin kubeconfig
template:
src: admin.conf.j2
dest: "{{ kube_config_dir }}/admin.conf"
owner: root
group: "{{ kube_cert_group }}"
mode: 0640
when: not kubeadm_enabled|d(false)|bool
- name: Create kube config dir
file:
path: "/root/.kube"
......
......@@ -124,7 +124,7 @@
# FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776
# is fixed
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
shell: "{{ bin_dir }}/kubectl delete daemonset -n kube-system kube-proxy"
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
delegate_to: "{{groups['kube-master']|first}}"
run_once: true
when:
......
......@@ -91,13 +91,16 @@
command: /bin/true
notify:
- Master | set secret_changed to true
- Master | clear kubeconfig for root user
- Master | Copy new kubeconfig for root user
- name: Master | set secret_changed to true
set_fact:
secret_changed: true
- name: Master | clear kubeconfig for root user
file:
path: /root/.kube/config
state: absent
- name: Master | Copy new kubeconfig for root user
copy:
src: "{{ kube_config_dir }}/admin.conf"
dest: "/root/.kube/config"
remote_src: yes
mode: "0600"
backup: yes
......@@ -26,19 +26,22 @@
file:
path: "{{ kube_config_dir }}/admin.conf"
state: absent
when: not kubeadm_already_run.stat.exists
when:
- not kubeadm_already_run.stat.exists
- name: kubeadm | Delete old static pods
file:
path: "{{ kube_config_dir }}/manifests/{{item}}.manifest"
state: absent
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler", "kube-proxy"]
when: old_apiserver_cert.stat.exists
when:
- old_apiserver_cert.stat.exists
- name: kubeadm | Forcefully delete old static pods
shell: "docker ps -f name=k8s_{{item}} -q | xargs --no-run-if-empty docker rm -f"
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
when: old_apiserver_cert.stat.exists
when:
- old_apiserver_cert.stat.exists
- name: kubeadm | aggregate all SANs
set_fact:
......@@ -220,7 +223,8 @@
- name: kubeadm | cleanup old certs if necessary
import_tasks: kubeadm-cleanup-old-certs.yml
when: old_apiserver_cert.stat.exists
when:
- old_apiserver_cert.stat.exists
- name: kubeadm | Remove taint for master with node role
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"
......
......@@ -4,12 +4,14 @@
- k8s-pre-upgrade
- import_tasks: users-file.yml
when: kube_basic_auth|default(true)
when:
- kube_basic_auth|default(true)
- import_tasks: encrypt-at-rest.yml
when: kube_encrypt_secret_data
when:
- kube_encrypt_secret_data
- name: install | Copy kubectl binary from download dir
- name: Install | Copy kubectl binary from download dir
synchronize:
src: "{{ local_release_dir }}/hyperkube"
dest: "{{ bin_dir }}/kubectl"
......@@ -57,10 +59,5 @@
kube_apiserver_enable_admission_plugins: "{{ kube_apiserver_enable_admission_plugins | difference(['SecurityContextDeny']) | union(['PodSecurityPolicy']) | unique }}"
when: podsecuritypolicy_enabled
- name: Include kubeadm setup if enabled
- name: Include kubeadm setup
import_tasks: kubeadm-setup.yml
when: kubeadm_enabled|bool|default(false)
- name: Include static pod setup if not using kubeadm
import_tasks: static-pod-setup.yml
when: not kubeadm_enabled|bool|default(false)
---
- name: Create audit-policy directory
file:
path: "{{ audit_policy_file | dirname }}"
state: directory
tags:
- kube-apiserver
when: kubernetes_audit|default(false)
- name: Write api audit policy yaml
template:
src: apiserver-audit-policy.yaml.j2
dest: "{{ audit_policy_file }}"
notify: Master | Restart apiserver
tags:
- kube-apiserver
when: kubernetes_audit|default(false)
- name: Write kube-apiserver manifest
template:
src: manifests/kube-apiserver.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
notify: Master | Restart apiserver
tags:
- kube-apiserver
- meta: flush_handlers
- name: Write kube-scheduler kubeconfig
template:
src: kube-scheduler-kubeconfig.yaml.j2
dest: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
tags:
- kube-scheduler
- name: Write kube-scheduler manifest
template:
src: manifests/kube-scheduler.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest"
notify: Master | Restart kube-scheduler
tags:
- kube-scheduler
- name: Write kube-controller-manager kubeconfig
template:
src: kube-controller-manager-kubeconfig.yaml.j2
dest: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
tags:
- kube-controller-manager
- name: Write kube-controller-manager manifest
template:
src: manifests/kube-controller-manager.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
notify: Master | Restart kube-controller-manager
tags:
- kube-controller-manager
- meta: flush_handlers
......@@ -12,4 +12,3 @@
dest: "{{ kube_users_dir }}/known_users.csv"
mode: 0640
backup: yes
notify: Master | set secret_changed
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment