Skip to content
Snippets Groups Projects
Commit fc072300 authored by Matthew Mosesohn's avatar Matthew Mosesohn Committed by Kubernetes Prow Robot
Browse files

Purge legacy cleanup tasks from older than 1 year (#4450)

We don't need to support upgrades from 2 year old installs,
just from the last major version.

Also changed most retried tasks to 1s delay instead of longer.
parent d25ecfe1
No related branches found
No related tags found
No related merge requests found
Showing with 23 additions and 170 deletions
......@@ -5,7 +5,6 @@
- Docker | reload systemd
- Docker | reload docker.socket
- Docker | reload docker
- Docker | pause while Docker restarts
- Docker | wait for docker
- name: Docker | reload systemd
......@@ -23,14 +22,9 @@
name: docker
state: restarted
- name: Docker | pause while Docker restarts
pause:
seconds: 10
prompt: "Waiting for docker restart"
- name: Docker | wait for docker
command: "{{ docker_bin_dir }}/docker images"
register: docker_ready
retries: 10
delay: 5
retries: 20
delay: 1
until: docker_ready.rc == 0
......@@ -40,8 +40,8 @@
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
register: result
until: result.status is defined and result.status == 200
retries: 10
delay: 5
retries: 60
delay: 1
- name: wait for etcd-events up
uri:
......@@ -51,8 +51,8 @@
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
register: result
until: result.status is defined and result.status == 200
retries: 10
delay: 5
retries: 60
delay: 1
- name: set etcd_secret_changed
set_fact:
......
---
- name: Kubernetes Apps | Delete old CoreDNS resources
kube:
name: "coredns"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item }}"
state: absent
with_items:
- 'deploy'
- 'configmap'
- 'svc'
tags:
- upgrade
- name: Kubernetes Apps | Delete old nodelocalDNS resources
kube:
name: "nodelocaldns"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item }}"
state: absent
with_items:
- 'deamonset'
- 'configmap'
tags:
- upgrade
- name: Kubernetes Apps | Delete kubeadm CoreDNS
kube:
name: "coredns"
......@@ -37,41 +10,3 @@
- kubeadm_init is defined
- kubeadm_init.changed|default(false)
- inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Delete old KubeDNS resources
kube:
name: "kube-dns"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item }}"
state: absent
with_items:
- 'deploy'
- 'svc'
tags:
- upgrade
- name: Kubernetes Apps | Delete kubeadm KubeDNS
kube:
name: "kube-dns"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item }}"
state: absent
with_items:
- 'deploy'
- 'svc'
when:
- kubeadm_init is defined
- kubeadm_init.changed|default(false)
- inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Delete old KubeDNS Autoscaler deployment
kube:
name: "kubedns-autoscaler"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "deploy"
state: absent
tags:
- upgrade
---
- name: Kubernetes Apps | Delete old kubernetes-dashboard resources
kube:
name: "kubernetes-dashboard"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item }}"
state: absent
with_items:
- 'ClusterRoleBinding'
tags:
- upgrade
- name: Kubernetes Apps | Lay down dashboard template
template:
src: "{{ item.file }}.j2"
......
......@@ -7,8 +7,8 @@
client_key: "{{ kube_apiserver_client_key }}"
register: result
until: result.status == 200
retries: 10
delay: 2
retries: 20
delay: 1
when: inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Cleanup DNS
......
......@@ -51,15 +51,6 @@
when:
- inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Purge old Netchecker server
kube:
name: "netchecker-server"
namespace: "{{ netcheck_namespace }}"
kubectl: "{{bin_dir}}/kubectl"
resource: "po"
state: absent
when: inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Start Netchecker Resources
kube:
name: "{{item.item.name}}"
......
---
- name: NGINX Ingress Controller | Remove legacy addon dir and manifests
file:
path: "{{ kube_config_dir }}/addons/ingress_nginx"
state: absent
when:
- inventory_hostname == groups['kube-master'][0]
tags:
- upgrade
- name: NGINX Ingress Controller | Remove legacy namespace
shell: |
{{ bin_dir }}/kubectl delete namespace {{ ingress_nginx_namespace }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
tags:
- upgrade
- name: NGINX Ingress Controller | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/ingress_nginx"
......
......@@ -43,23 +43,23 @@
- name: Master | Remove apiserver container
shell: "docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f"
register: remove_apiserver_container
retries: 4
retries: 10
until: remove_apiserver_container.rc == 0
delay: 5
delay: 1
- name: Master | Remove scheduler container
shell: "docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty docker rm -f"
register: remove_scheduler_container
retries: 4
retries: 10
until: remove_scheduler_container.rc == 0
delay: 5
delay: 1
- name: Master | Remove controller manager container
shell: "docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty docker rm -f"
register: remove_cm_container
retries: 4
retries: 10
until: remove_cm_container.rc == 0
delay: 5
delay: 1
- name: Master | wait for kube-scheduler
uri:
......@@ -67,15 +67,15 @@
register: scheduler_result
until: scheduler_result.status == 200
retries: 60
delay: 5
delay: 1
- name: Master | wait for kube-controller-manager
uri:
url: http://localhost:10252/healthz
register: controller_manager_result
until: controller_manager_result.status == 200
retries: 15
delay: 5
retries: 60
delay: 1
- name: Master | wait for the apiserver to be running
uri:
......@@ -85,8 +85,8 @@
client_key: "{{ kube_apiserver_client_key }}"
register: result
until: result.status == 200
retries: 30
delay: 10
retries: 60
delay: 1
- name: Master | set secret_changed
command: /bin/true
......
---
- name: "Pre-upgrade | etcd3 upgrade | see if old config exists"
command: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} ls /registry/minions"
environment:
ETCDCTL_API: 2
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}"
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/{{ kube_etcd_key_file }}"
register: old_data_exists
delegate_to: "{{groups['etcd'][0]}}"
changed_when: false
when: kube_apiserver_storage_backend == "etcd3"
failed_when: false
- name: "Pre-upgrade | etcd3 upgrade | use etcd2 unless forced to etcd3"
set_fact:
kube_apiserver_storage_backend: "etcd2"
when: old_data_exists.rc == 0 and not force_etcd3|bool
- name: "Pre-upgrade | Delete master manifests"
- name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
file:
path: "/etc/kubernetes/manifests/{{item}}.manifest"
state: absent
with_items:
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
register: kube_apiserver_manifest_replaced
when: (secret_changed|default(false) or etcd_secret_changed|default(false))
when: etcd_secret_changed|default(false)
- name: "Pre-upgrade | Delete master containers forcefully"
shell: "docker ps -af name=k8s_{{item}}* -q | xargs --no-run-if-empty docker rm -f"
......@@ -31,6 +14,6 @@
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
when: kube_apiserver_manifest_replaced.changed
register: remove_master_container
retries: 4
retries: 10
until: remove_master_container.rc == 0
delay: 5
delay: 1
---
- import_tasks: pre-upgrade.yml
- name: Flannel | Create Flannel manifests
template:
src: "{{item.file}}.j2"
......
---
- name: Flannel pre-upgrade | Purge legacy flannel systemd unit file
file:
path: "/etc/systemd/system/docker.service.d/flannel-options.conf"
state: absent
notify:
- Flannel | delete default docker bridge
- name: Flannel pre-upgrade | Purge legacy Flannel static pod manifest
file:
path: "{{ kube_manifest_dir }}/flannel-pod.manifest"
state: absent
notify:
- Flannel | delete flannel interface
- name: Flannel pre-upgrade | Remove Flannel's certificate directory not required by CNI
file:
dest: "{{ flannel_cert_dir }}"
state: absent
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment