diff --git a/cluster.yml b/cluster.yml
index e363c3b01e120eddaffec3611a98d4c089f02758..cf6942a6ed5dcc98b462511450151cfcaea3983b 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -100,7 +100,6 @@
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults }
-    - { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
     - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
 
 - hosts: kube-master
diff --git a/docs/upgrades.md b/docs/upgrades.md
index 38548057614b138219f2607557b4b1ffca13147b..15aac57fef6e649f78e27cc2a006eb28f6c6801b 100644
--- a/docs/upgrades.md
+++ b/docs/upgrades.md
@@ -289,20 +289,6 @@ follows:
 * kube-apiserver, kube-scheduler, and kube-controller-manager
 * Add-ons (such as KubeDNS)
 
-## Upgrade considerations
-
-Kubespray supports rotating certificates used for etcd and Kubernetes
-components, but some manual steps may be required. If you have a pod that
-requires use of a service token and is deployed in a namespace other than
-`kube-system`, you will need to manually delete the affected pods after
-rotating certificates. This is because all service account tokens are dependent
-on the apiserver token that is used to generate them. When the certificate
-rotates, all service account tokens must be rotated as well. During the
-kubernetes-apps/rotate_tokens role, only pods in kube-system are destroyed and
-recreated. All other invalidated service account tokens are cleaned up
-automatically, but other pods are not deleted out of an abundance of caution
-for impact to user deployed pods.
-
 ### Component-based upgrades
 
 A deployer may want to upgrade specific components in order to minimize risk
diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
deleted file mode 100644
index e9de24b52771cbe9d693802cc481ffb48649a6a5..0000000000000000000000000000000000000000
--- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-- name: Rotate Tokens | Get default token name  # noqa 306
-  shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token"
-  register: default_token
-  changed_when: false
-  until: default_token.rc == 0
-  delay: 4
-  retries: 10
-
-- name: Rotate Tokens | Get default token data
-  command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets {{ default_token.stdout }} -ojson"
-  register: default_token_data
-  changed_when: false
-
-- name: Rotate Tokens | Test if default certificate is expired
-  uri:
-    url: https://{{ kube_apiserver_ip }}/api/v1/nodes
-    method: GET
-    return_content: no
-    validate_certs: no
-    headers:
-      Authorization: "Bearer {{ (default_token_data.stdout|from_json)['data']['token']|b64decode }}"
-  register: check_secret
-  failed_when: false
-
-- name: Rotate Tokens | Determine if certificate is expired
-  set_fact:
-    needs_rotation: '{{ check_secret.status not in [200, 403] }}'
-
-# FIXME(mattymo): Exclude built in secrets that were automatically rotated,
-# instead of filtering manually
-- name: Rotate Tokens | Get all serviceaccount tokens to expire  # noqa 306
-  shell: >-
-    {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets --all-namespaces
-    -o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
-    | grep kubernetes.io/service-account-token
-    | egrep 'default-token|kube-proxy|coredns|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|tiller|local-volume-provisioner'
-  register: tokens_to_delete
-  when: needs_rotation
-
-- name: Rotate Tokens | Delete expired tokens
-  command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete secrets -n {{ item.split(' ')[0] }} {{ item.split(' ')[1] }}"
-  with_items: "{{ tokens_to_delete.stdout_lines }}"
-  when: needs_rotation
-
-- name: Rotate Tokens | Delete pods in system namespace
-  command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete pods -n kube-system --all --grace-period=0 --force"
-  when: needs_rotation
diff --git a/roles/kubernetes/control-plane/handlers/main.yml b/roles/kubernetes/control-plane/handlers/main.yml
index 577b433b5361d6b46db0ae1602396ede7741dbc9..e6bc321e20a534f4362006fa477da1a2cd0674bb 100644
--- a/roles/kubernetes/control-plane/handlers/main.yml
+++ b/roles/kubernetes/control-plane/handlers/main.yml
@@ -121,21 +121,3 @@
   until: result.status == 200
   retries: 60
   delay: 1
-
-- name: Master | set secret_changed
-  command: /bin/true
-  notify:
-    - Master | set secret_changed to true
-    - Master | Copy new kubeconfig for root user
-
-- name: Master | set secret_changed to true
-  set_fact:
-    secret_changed: true
-
-- name: Master | Copy new kubeconfig for root user
-  copy:
-    src: "{{ kube_config_dir }}/admin.conf"
-    dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
-    remote_src: yes
-    mode: "0600"
-    backup: yes
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
index b81c42212682f4684ab380d947e8d2625344d88b..6f961f2bcbe24f99ae77bafe4cfb7299b3d61475 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml
@@ -66,7 +66,3 @@
   when:
     - inventory_hostname != groups['kube-master']|first
     - kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
-
-- name: Set secret_changed to false to avoid extra token rotation
-  set_fact:
-    secret_changed: false
diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
index 27d64885c456e9bb7df49ebc25737b1d9a747302..55dbac6953baa95ab64cb3573ddd306c0a482b77 100644
--- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
+++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
@@ -10,15 +10,6 @@
     - kube_oidc_auth
     - kube_oidc_ca_cert is defined
 
-- name: kubeadm | Check serviceaccount key
-  stat:
-    path: "{{ kube_cert_dir }}/sa.key"
-    get_attributes: no
-    get_checksum: yes
-    get_mime: no
-  register: sa_key_before
-  run_once: true
-
 - name: kubeadm | Check if kubeadm has already run
   stat:
     path: "/var/lib/kubelet/config.yaml"
@@ -180,20 +171,6 @@
     - upgrade_cluster_setup
     - kubeadm_already_run.stat.exists
 
-- name: kubeadm | Check serviceaccount key again
-  stat:
-    path: "{{ kube_cert_dir }}/sa.key"
-    get_attributes: no
-    get_checksum: yes
-    get_mime: no
-  register: sa_key_after
-  run_once: true
-
-- name: kubeadm | Set secret_changed if service account key was updated
-  command: /bin/true
-  notify: Master | set secret_changed
-  when: sa_key_before.stat.checksum|default("") != sa_key_after.stat.checksum
-
 # FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
 - name: kubeadm | Remove taint for master with node role
   command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} {{ item }}"
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index 872244cfa9fe9783f79243bd1f57afd974072f8c..b53668408e567f9992c4b11609ccfbc15cfc0d74 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -134,7 +134,6 @@
   environment: "{{ proxy_disable_env }}"
   roles:
     - { role: kubespray-defaults }
-    - { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
     - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
 
 - hosts: calico-rr