diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index b82daadde7ab0f7712c0965a57a07ac341301d54..0f81faeac30c2e51dac1ad90c53c0da7728796c4 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -24,7 +24,7 @@ explain why.
 - **Version of Ansible** (`ansible --version`):
 
 
-**Kargo version (commit) (`git rev-parse --short HEAD`):**
+**Kubespray version (commit) (`git rev-parse --short HEAD`):**
 
 
 **Network plugin used**:
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 1cd4199515ee44ec354945948b114b2e04d1cc21..259c45614f61ada2ffa5a2261f8600af94a5332e 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -90,8 +90,9 @@ before_script:
     - pwd
     - ls
     - echo ${PWD}
+    - echo "${STARTUP_SCRIPT}"
     - >
-      ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local 
+      ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
       ${LOG_LEVEL}
       -e cloud_image=${CLOUD_IMAGE}
       -e cloud_region=${CLOUD_REGION}
@@ -103,6 +104,7 @@ before_script:
       -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
       -e mode=${CLUSTER_MODE}
       -e test_id=${TEST_ID}
+      -e startup_script="'${STARTUP_SCRIPT}'"
 
     # Check out latest tag if testing upgrade
     # Uncomment when gitlab kargo repo has tags
@@ -116,7 +118,7 @@ before_script:
       ${SSH_ARGS}
       ${LOG_LEVEL}
       -e ansible_python_interpreter=${PYPATH}
-      -e ansible_ssh_user=${SSH_USER} 
+      -e ansible_ssh_user=${SSH_USER}
       -e bootstrap_os=${BOOTSTRAP_OS}
       -e cert_management=${CERT_MGMT:-script}
       -e cloud_provider=gce
@@ -125,6 +127,7 @@ before_script:
       -e download_run_once=${DOWNLOAD_RUN_ONCE}
       -e etcd_deployment_type=${ETCD_DEPLOYMENT}
       -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
+      -e kubedns_min_replicas=1
       -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
       -e local_release_dir=${PWD}/downloads
       -e resolvconf_mode=${RESOLVCONF_MODE}
@@ -134,30 +137,31 @@ before_script:
 
     # Repeat deployment if testing upgrade
     - >
-      if [ "${UPGRADE_TEST}" != "false" ]; then 
+      if [ "${UPGRADE_TEST}" != "false" ]; then
       test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
       test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
-      pip install ansible==2.3.0; 
-      git checkout "${CI_BUILD_REF}"; 
-      ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER 
-      ${SSH_ARGS} 
-      ${LOG_LEVEL} 
-      -e ansible_python_interpreter=${PYPATH} 
-      -e ansible_ssh_user=${SSH_USER} 
-      -e bootstrap_os=${BOOTSTRAP_OS} 
-      -e cloud_provider=gce 
-      -e deploy_netchecker=true 
-      -e download_localhost=${DOWNLOAD_LOCALHOST} 
-      -e download_run_once=${DOWNLOAD_RUN_ONCE} 
-      -e etcd_deployment_type=${ETCD_DEPLOYMENT} 
-      -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} 
-      -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} 
-      -e local_release_dir=${PWD}/downloads 
-      -e resolvconf_mode=${RESOLVCONF_MODE} 
-      -e weave_cpu_requests=${WEAVE_CPU_LIMIT} 
-      -e weave_cpu_limit=${WEAVE_CPU_LIMIT} 
-      --limit "all:!fake_hosts" 
-      $PLAYBOOK; 
+      pip install ansible==2.3.0;
+      git checkout "${CI_BUILD_REF}";
+      ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
+      ${SSH_ARGS}
+      ${LOG_LEVEL}
+      -e ansible_python_interpreter=${PYPATH}
+      -e ansible_ssh_user=${SSH_USER}
+      -e bootstrap_os=${BOOTSTRAP_OS}
+      -e cloud_provider=gce
+      -e deploy_netchecker=true
+      -e download_localhost=${DOWNLOAD_LOCALHOST}
+      -e download_run_once=${DOWNLOAD_RUN_ONCE}
+      -e etcd_deployment_type=${ETCD_DEPLOYMENT}
+      -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
+      -e kubedns_min_replicas=1
+      -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
+      -e local_release_dir=${PWD}/downloads
+      -e resolvconf_mode=${RESOLVCONF_MODE}
+      -e weave_cpu_requests=${WEAVE_CPU_LIMIT}
+      -e weave_cpu_limit=${WEAVE_CPU_LIMIT}
+      --limit "all:!fake_hosts"
+      $PLAYBOOK;
       fi
 
     # Tests Cases
@@ -173,40 +177,41 @@ before_script:
     ## Idempotency checks 1/5 (repeat deployment)
     - >
       if [ "${IDEMPOT_CHECK}" = "true" ]; then
-      ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS 
-      -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} 
-      --private-key=${HOME}/.ssh/id_rsa 
-      -e bootstrap_os=${BOOTSTRAP_OS} 
-      -e ansible_python_interpreter=${PYPATH} 
-      -e download_localhost=${DOWNLOAD_LOCALHOST} 
-      -e download_run_once=${DOWNLOAD_RUN_ONCE} 
-      -e deploy_netchecker=true 
-      -e resolvconf_mode=${RESOLVCONF_MODE} 
-      -e local_release_dir=${PWD}/downloads 
-      -e etcd_deployment_type=${ETCD_DEPLOYMENT} 
-      -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} 
-      --limit "all:!fake_hosts" 
+      ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
+      -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
+      --private-key=${HOME}/.ssh/id_rsa
+      -e bootstrap_os=${BOOTSTRAP_OS}
+      -e ansible_python_interpreter=${PYPATH}
+      -e download_localhost=${DOWNLOAD_LOCALHOST}
+      -e download_run_once=${DOWNLOAD_RUN_ONCE}
+      -e deploy_netchecker=true
+      -e resolvconf_mode=${RESOLVCONF_MODE}
+      -e local_release_dir=${PWD}/downloads
+      -e etcd_deployment_type=${ETCD_DEPLOYMENT}
+      -e kubedns_min_replicas=1
+      -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
+      --limit "all:!fake_hosts"
       cluster.yml;
       fi
 
     ## Idempotency checks 2/5 (Advanced DNS checks)
     - >
       if [ "${IDEMPOT_CHECK}" = "true" ]; then
-      ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} 
-      -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root 
-      --limit "all:!fake_hosts" 
+      ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
+      -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
+      --limit "all:!fake_hosts"
       tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
       fi
 
     ## Idempotency checks 3/5 (reset deployment)
     - >
       if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
-      ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS 
-      -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} 
-      --private-key=${HOME}/.ssh/id_rsa 
-      -e bootstrap_os=${BOOTSTRAP_OS} 
-      -e ansible_python_interpreter=${PYPATH} 
-      -e reset_confirmation=yes 
+      ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
+      -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
+      --private-key=${HOME}/.ssh/id_rsa
+      -e bootstrap_os=${BOOTSTRAP_OS}
+      -e ansible_python_interpreter=${PYPATH}
+      -e reset_confirmation=yes
       --limit "all:!fake_hosts"
       reset.yml;
       fi
@@ -214,28 +219,29 @@ before_script:
     ## Idempotency checks 4/5 (redeploy after reset)
     - >
       if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
-      ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS 
-      -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} 
-      --private-key=${HOME}/.ssh/id_rsa 
-      -e bootstrap_os=${BOOTSTRAP_OS} 
-      -e ansible_python_interpreter=${PYPATH} 
-      -e download_localhost=${DOWNLOAD_LOCALHOST} 
-      -e download_run_once=${DOWNLOAD_RUN_ONCE} 
-      -e deploy_netchecker=true 
-      -e resolvconf_mode=${RESOLVCONF_MODE} 
-      -e local_release_dir=${PWD}/downloads 
-      -e etcd_deployment_type=${ETCD_DEPLOYMENT} 
-      -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} 
-      --limit "all:!fake_hosts" 
+      ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
+      -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
+      --private-key=${HOME}/.ssh/id_rsa
+      -e bootstrap_os=${BOOTSTRAP_OS}
+      -e ansible_python_interpreter=${PYPATH}
+      -e download_localhost=${DOWNLOAD_LOCALHOST}
+      -e download_run_once=${DOWNLOAD_RUN_ONCE}
+      -e deploy_netchecker=true
+      -e resolvconf_mode=${RESOLVCONF_MODE}
+      -e local_release_dir=${PWD}/downloads
+      -e etcd_deployment_type=${ETCD_DEPLOYMENT}
+      -e kubedns_min_replicas=1
+      -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
+      --limit "all:!fake_hosts"
       cluster.yml;
       fi
 
     ## Idempotency checks 5/5 (Advanced DNS checks)
     - >
       if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
-      ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} 
-      -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root 
-      --limit "all:!fake_hosts" 
+      ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
+      -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
+      --limit "all:!fake_hosts"
       tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
       fi
 
@@ -261,6 +267,8 @@ before_script:
   CLUSTER_MODE: separate
   BOOTSTRAP_OS: coreos
   RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
+  ##User-data to simply turn off coreos upgrades
+  STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
 
 .ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
 # stage: deploy-gce-part1
@@ -271,6 +279,7 @@ before_script:
   UPGRADE_TEST: "basic"
   CLUSTER_MODE: ha
   UPGRADE_TEST: "graceful"
+  STARTUP_SCRIPT: ""
 
 .rhel7_weave_variables: &rhel7_weave_variables
 # stage: deploy-gce-part1
@@ -278,6 +287,7 @@ before_script:
   CLOUD_IMAGE: rhel-7
   CLOUD_REGION: europe-west1-b
   CLUSTER_MODE: default
+  STARTUP_SCRIPT: ""
 
 .centos7_flannel_variables: &centos7_flannel_variables
 # stage: deploy-gce-part2
@@ -285,13 +295,15 @@ before_script:
   CLOUD_IMAGE: centos-7
   CLOUD_REGION: us-west1-a
   CLUSTER_MODE: default
-
+  STARTUP_SCRIPT: ""
+  
 .debian8_calico_variables: &debian8_calico_variables
 # stage: deploy-gce-part2
   KUBE_NETWORK_PLUGIN: calico
   CLOUD_IMAGE: debian-8-kubespray
   CLOUD_REGION: us-central1-b
   CLUSTER_MODE: default
+  STARTUP_SCRIPT: ""
 
 .coreos_canal_variables: &coreos_canal_variables
 # stage: deploy-gce-part2
@@ -302,6 +314,7 @@ before_script:
   BOOTSTRAP_OS: coreos
   IDEMPOT_CHECK: "true"
   RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
+  STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
 
 .rhel7_canal_sep_variables: &rhel7_canal_sep_variables
 # stage: deploy-gce-special
@@ -309,6 +322,7 @@ before_script:
   CLOUD_IMAGE: rhel-7
   CLOUD_REGION: us-east1-b
   CLUSTER_MODE: separate
+  STARTUP_SCRIPT: ""
 
 .ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
 # stage: deploy-gce-special
@@ -317,6 +331,7 @@ before_script:
   CLOUD_REGION: us-central1-b
   CLUSTER_MODE: separate
   IDEMPOT_CHECK: "false"
+  STARTUP_SCRIPT: ""
 
 .centos7_calico_ha_variables: &centos7_calico_ha_variables
 # stage: deploy-gce-special
@@ -327,6 +342,7 @@ before_script:
   CLOUD_REGION: europe-west1-b
   CLUSTER_MODE: ha-scale
   IDEMPOT_CHECK: "true"
+  STARTUP_SCRIPT: ""
 
 .coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
 # stage: deploy-gce-special
@@ -336,6 +352,7 @@ before_script:
   CLUSTER_MODE: ha-scale
   BOOTSTRAP_OS: coreos
   RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
+  STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
 
 .ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
 # stage: deploy-gce-part1
@@ -345,6 +362,7 @@ before_script:
   CLUSTER_MODE: separate
   ETCD_DEPLOYMENT: rkt
   KUBELET_DEPLOYMENT: rkt
+  STARTUP_SCRIPT: ""
 
 .ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
 # stage: deploy-gce-part1
@@ -353,6 +371,7 @@ before_script:
   CLOUD_IMAGE: ubuntu-1604-xenial
   CLOUD_REGION: us-central1-b
   CLUSTER_MODE: separate
+  STARTUP_SCRIPT: ""
 
 # Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
 coreos-calico-sep:
@@ -588,7 +607,7 @@ ci-authorized:
   script:
     - /bin/sh scripts/premoderator.sh
   except: ['triggers', 'master']
-  
+
 syntax-check:
   <<: *job
   stage: unit-tests
diff --git a/README.md b/README.md
index ccba240b6597d7f275831bf298c39c1b602a9521..60252cfbae98f74626afc1ff8d1b9906c695834b 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
 
 ## Deploy a production ready kubernetes cluster
 
-If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kargo**.
+If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kubespray**.
 
 - Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
 - **High available** cluster
@@ -13,13 +13,13 @@ If you have questions, join us on the [kubernetes slack](https://slack.k8s.io),
 
 To deploy the cluster you can use :
 
-[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
-**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py) <br>
+[**kubespray-cli**](https://github.com/kubespray/kubespray-cli) <br>
+**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) <br>
 **vagrant** by simply running `vagrant up` (for tests purposes) <br>
 
 
 *  [Requirements](#requirements)
-*  [Kargo vs ...](docs/comparisons.md)
+*  [Kubespray vs ...](docs/comparisons.md)
 *  [Getting started](docs/getting-started.md)
 *  [Ansible inventory and tags](docs/ansible.md)
 *  [Integration with existing ansible repo](docs/integration.md)
@@ -98,22 +98,22 @@ option to leverage built-in cloud provider networking instead.
 See also [Network checker](docs/netcheck.md).
 
 ## Community docs and resources
- - [kubernetes.io/docs/getting-started-guides/kargo/](https://kubernetes.io/docs/getting-started-guides/kargo/)
- - [kargo, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
+ - [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
+ - [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
  - [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
- - [Deploy a Kubernets Cluster with Kargo (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
+ - [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
 
-## Tools and projects on top of Kargo
+## Tools and projects on top of Kubespray
  - [Digital Rebar](https://github.com/digitalrebar/digitalrebar)
- - [Kargo-cli](https://github.com/kubespray/kargo-cli)
+ - [Kubespray-cli](https://github.com/kubespray/kubespray-cli)
  - [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
- - [Terraform Contrib](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/terraform)
+ - [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
 
 ## CI Tests
 
 ![Gitlab Logo](https://s27.postimg.org/wmtaig1wz/gitlabci.png)
 
-[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/badges/master/build.svg)](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/pipelines) </br>
+[![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines) </br>
 
 CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
 See the [test matrix](docs/test_cases.md) for details.
diff --git a/RELEASE.md b/RELEASE.md
index 56f0b2e0f807d76278c3d16fe7d401c0852a425c..0679667a1c5211b6b9e9c3e092f0cf9b732ea33e 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -1,16 +1,16 @@
 # Release Process
 
-The Kargo Project is released on an as-needed basis. The process is as follows:
+The Kubespray Project is released on an as-needed basis. The process is as follows:
 
 1. An issue is proposing a new release with a changelog since the last release
 2. At least on of the [OWNERS](OWNERS) must LGTM this release
 3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
 4. The release issue is closed
-5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
+5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
 
 ## Major/minor releases, merge freezes and milestones
 
-* Kargo does not maintain stable branches for releases. Releases are tags, not
+* Kubespray does not maintain stable branches for releases. Releases are tags, not
   branches, and there are no backports. Therefore, there is no need for merge
   freezes as well.
 
@@ -20,21 +20,21 @@ The Kargo Project is released on an as-needed basis. The process is as follows:
   support lifetime, which ends once the milestone closed. Then only a next major
   or minor release can be done.
 
-* Kargo major and minor releases are bound to the given ``kube_version`` major/minor
+* Kubespray major and minor releases are bound to the given ``kube_version`` major/minor
   version numbers and other components' arbitrary versions, like etcd or network plugins.
   Older or newer versions are not supported and not tested for the given release.
 
-* There is no unstable releases and no APIs, thus Kargo doesn't follow
+* There is no unstable releases and no APIs, thus Kubespray doesn't follow
   [semver](http://semver.org/). Every version describes only a stable release.
   Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
   playbooks, shall be described in the release notes. Other breaking changes, if any in
   the contributed addons or bound versions of Kubernetes and other components, are
-  considered out of Kargo scope and are up to the components' teams to deal with and
+  considered out of Kubespray scope and are up to the components' teams to deal with and
   document.
 
 * Minor releases can change components' versions, but not the major ``kube_version``.
-  Greater ``kube_version`` requires a new major or minor release. For example, if Kargo v2.0.0
+  Greater ``kube_version`` requires a new major or minor release. For example, if Kubespray v2.0.0
   is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
-  then Kargo v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
+  then Kubespray v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
   and *any* changes to other components, like etcd v4, or calico 1.2.3.
-  And Kargo v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
+  And Kubespray v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
diff --git a/Vagrantfile b/Vagrantfile
index a2c2c1c8ff5c290d20a618afffd001cd1e6bcb31..ab80732804c1409ca4e259340247391be43f3f0c 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -13,7 +13,7 @@ SUPPORTED_OS = {
   "coreos-stable" => {box: "coreos-stable",      bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
   "coreos-alpha"  => {box: "coreos-alpha",       bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
   "coreos-beta"   => {box: "coreos-beta",        bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
-  "ubuntu"        => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "ubuntu"},
+  "ubuntu"        => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
 }
 
 # Defaults for config options defined in CONFIG
@@ -100,6 +100,10 @@ Vagrant.configure("2") do |config|
         end
       end
 
+      $shared_folders.each do |src, dst|
+        config.vm.synced_folder src, dst
+      end
+
       config.vm.provider :virtualbox do |vb|
         vb.gui = $vm_gui
         vb.memory = $vm_memory
diff --git a/cluster.yml b/cluster.yml
index 75296646ae2e750af34ad8b28baebef335f899ea..b973d6c14f073b16f2c823ddc431f59184758543 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -2,7 +2,7 @@
 - hosts: localhost
   gather_facts: False
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
 
 - hosts: k8s-cluster:etcd:calico-rr
@@ -13,7 +13,7 @@
     # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
     ansible_ssh_pipelining: false
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: bootstrap-os, tags: bootstrap-os}
 
 - hosts: k8s-cluster:etcd:calico-rr
@@ -25,7 +25,7 @@
 - hosts: k8s-cluster:etcd:calico-rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
     - { role: kubernetes/preinstall, tags: preinstall }
     - { role: docker, tags: docker }
@@ -36,38 +36,38 @@
 - hosts: etcd:k8s-cluster:vault
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults, when: "cert_management == 'vault'" }
+    - { role: kubespray-defaults, when: "cert_management == 'vault'" }
     - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
 
 - hosts: etcd
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: etcd, tags: etcd, etcd_cluster_setup: true }
 
 - hosts: k8s-cluster
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: etcd, tags: etcd, etcd_cluster_setup: false }
 
 - hosts: etcd:k8s-cluster:vault
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: vault, tags: vault, when: "cert_management == 'vault'"}
 
 - hosts: k8s-cluster
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: kubernetes/node, tags: node }
     - { role: network_plugin, tags: network }
 
 - hosts: kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: kubernetes/master, tags: master }
     - { role: kubernetes-apps/network_plugin, tags: network }
     - { role: kubernetes-apps/policy_controller, tags: policy-controller }
@@ -75,18 +75,18 @@
 - hosts: calico-rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: network_plugin/calico/rr, tags: network }
 
 - hosts: k8s-cluster
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
     - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
 
 - hosts: kube-master[0]
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: kubernetes-apps, tags: apps }
diff --git a/contrib/aws_inventory/kargo-aws-inventory.py b/contrib/aws_inventory/kubespray-aws-inventory.py
similarity index 96%
rename from contrib/aws_inventory/kargo-aws-inventory.py
rename to contrib/aws_inventory/kubespray-aws-inventory.py
index d379be349165ff30c2713ed809be80027cf9d342..65741bbda2deb68b0028429eb5e3339aa2c8aa03 100755
--- a/contrib/aws_inventory/kargo-aws-inventory.py
+++ b/contrib/aws_inventory/kubespray-aws-inventory.py
@@ -33,10 +33,10 @@ class SearchEC2Tags(object):
     hosts = {}
     hosts['_meta'] = { 'hostvars': {} }
 
-    ##Search ec2 three times to find nodes of each group type. Relies on kargo-role key/value.
+    ##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
     for group in ["kube-master", "kube-node", "etcd"]:
       hosts[group] = []
-      tag_key = "kargo-role"
+      tag_key = "kubespray-role"
       tag_value = ["*"+group+"*"]
       region = os.environ['REGION']
 
diff --git a/contrib/azurerm/README.md b/contrib/azurerm/README.md
index d8cd28e7fc64629055105e94e8c4da97fab0a63c..ac2548c85027bb653e205217ebc8ec7eda268797 100644
--- a/contrib/azurerm/README.md
+++ b/contrib/azurerm/README.md
@@ -5,7 +5,7 @@ Provision the base infrastructure for a Kubernetes cluster by using [Azure Resou
 ## Status
 
 This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
-Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kargo of course).
+Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course).
 
 ## Requirements
 
@@ -47,7 +47,7 @@ $ ./clear-rg.sh <resource_group_name>
 **WARNING** this really deletes everything from your resource group, including everything that was later created by you!
 
 
-## Generating an inventory for kargo
+## Generating an inventory for kubespray
 
 After you have applied the templates, you can generate an inventory with this call:
 
@@ -55,10 +55,10 @@ After you have applied the templates, you can generate an inventory with this ca
 $ ./generate-inventory.sh <resource_group_name>
 ```
 
-It will create the file ./inventory which can then be used with kargo, e.g.:
+It will create the file ./inventory which can then be used with kubespray, e.g.:
 
 ```shell
-$ cd kargo-root-dir
+$ cd kubespray-root-dir
 $ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
 ```
 
diff --git a/contrib/inventory_builder/inventory.py b/contrib/inventory_builder/inventory.py
index 7e0a89f09f044a779895b2d757b1fc45733bbcfb..04c71aecc61b15367df72f444b596751b054fe57 100644
--- a/contrib/inventory_builder/inventory.py
+++ b/contrib/inventory_builder/inventory.py
@@ -65,7 +65,7 @@ HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
 # Configurable as shell vars end
 
 
-class KargoInventory(object):
+class KubesprayInventory(object):
 
     def __init__(self, changed_hosts=None, config_file=None):
         self.config = configparser.ConfigParser(allow_no_value=True,
@@ -337,7 +337,7 @@ MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
 def main(argv=None):
     if not argv:
         argv = sys.argv[1:]
-    KargoInventory(argv, CONFIG_FILE)
+    KubesprayInventory(argv, CONFIG_FILE)
 
 if __name__ == "__main__":
     sys.exit(main())
diff --git a/contrib/inventory_builder/setup.cfg b/contrib/inventory_builder/setup.cfg
index a099273053501040f974cc817015dfaa818cc94b..a775367e2005ceaefb8eb30e6b655deea26d9bc1 100644
--- a/contrib/inventory_builder/setup.cfg
+++ b/contrib/inventory_builder/setup.cfg
@@ -1,3 +1,3 @@
 [metadata]
-name = kargo-inventory-builder
+name = kubespray-inventory-builder
 version = 0.1
diff --git a/contrib/inventory_builder/tests/test_inventory.py b/contrib/inventory_builder/tests/test_inventory.py
index ad393079d91989c4798ad63b224af703ae663635..43f6b2bb6cffcf2f6ffa613ae75b65cdd548bb54 100644
--- a/contrib/inventory_builder/tests/test_inventory.py
+++ b/contrib/inventory_builder/tests/test_inventory.py
@@ -31,7 +31,7 @@ class TestInventory(unittest.TestCase):
         sys_mock.exit = mock.Mock()
         super(TestInventory, self).setUp()
         self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
-        self.inv = inventory.KargoInventory()
+        self.inv = inventory.KubesprayInventory()
 
     def test_get_ip_from_opts(self):
         optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
diff --git a/contrib/kvm-setup/README.md b/contrib/kvm-setup/README.md
index 61e6265900a7e0024257fe02a5c8f60c8934f0ca..b77299a7892efa0cb0a9db2cf83296b7e81fa0fc 100644
--- a/contrib/kvm-setup/README.md
+++ b/contrib/kvm-setup/README.md
@@ -1,11 +1,11 @@
-# Kargo on KVM Virtual Machines hypervisor preparation
+# Kubespray on KVM Virtual Machines hypervisor preparation
 
-A simple playbook to ensure your system has the right settings to enable Kargo
+A simple playbook to ensure your system has the right settings to enable Kubespray
 deployment on VMs.
 
-This playbook does not create Virtual Machines, nor does it run Kargo itself.
+This playbook does not create Virtual Machines, nor does it run Kubespray itself.
 
 ### User creation
 
-If you want to create a user for running Kargo deployment, you should specify
+If you want to create a user for running Kubespray deployment, you should specify
 both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
diff --git a/contrib/kvm-setup/group_vars/all b/contrib/kvm-setup/group_vars/all
index d08c2c3d3fb3efadc9ec5359dac6e1f1a624b6e8..6edfd8fd146115ad74ff107cc1d1d0938f3c10a6 100644
--- a/contrib/kvm-setup/group_vars/all
+++ b/contrib/kvm-setup/group_vars/all
@@ -1,3 +1,3 @@
-#k8s_deployment_user: kargo
+#k8s_deployment_user: kubespray
 #k8s_deployment_user_pkey_path: /tmp/ssh_rsa
 
diff --git a/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml b/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
index 11f464bdfd23445d188709f6ba2fda7f17067e47..5417708aca6bc1f77564346dde9f4503f50dd887 100644
--- a/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
+++ b/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
@@ -12,9 +12,9 @@
     line: 'br_netfilter'
   when: br_netfilter is defined and ansible_os_family == 'Debian'
 
-- name: Add br_netfilter into /etc/modules-load.d/kargo.conf
+- name: Add br_netfilter into /etc/modules-load.d/kubespray.conf
   copy:
-    dest: /etc/modules-load.d/kargo.conf
+    dest: /etc/modules-load.d/kubespray.conf
     content: |-
       ### This file is managed by Ansible
       br-netfilter
diff --git a/contrib/network-storage/glusterfs/README.md b/contrib/network-storage/glusterfs/README.md
index d09c10c7fcd67c0ad7e8df7832382e8521a21cbf..d7aea26aafde13ba89975c968e4b1a4ddb012251 100644
--- a/contrib/network-storage/glusterfs/README.md
+++ b/contrib/network-storage/glusterfs/README.md
@@ -1,4 +1,4 @@
-# Deploying a Kargo Kubernetes Cluster with GlusterFS
+# Deploying a Kubespray Kubernetes Cluster with GlusterFS
 
 You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible**  section.
 
@@ -6,7 +6,7 @@ You can either deploy using Ansible on its own by supplying your own inventory f
 
 In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
 
-Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kargo root folder, and execute (supposing that the machines are all using ubuntu):
+Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
 
 ```
 ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
@@ -28,7 +28,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
 
 ## Using Terraform and Ansible
 
-First step is to fill in a `my-kargo-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
+First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
 
 ```
 cluster_name = "cluster1"
@@ -65,15 +65,15 @@ $ echo Setting up Terraform creds && \
   export TF_VAR_auth_url=${OS_AUTH_URL}
 ```
 
-Then, standing on the kargo directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
+Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
 
 ```
-terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
+terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
 ```
 
 This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
 
-Then, provision your Kubernetes (Kargo) cluster with the following ansible call:
+Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
 
 ```
 ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
@@ -88,5 +88,5 @@ ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./co
 If you need to destroy the cluster, you can run:
 
 ```
-terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
+terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
 ```
diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md
index de858b2a9ca5cf97468979467968ec699b10157f..451fc58a780e7be6ab16f37231adf6587b6cefc9 100644
--- a/contrib/terraform/aws/README.md
+++ b/contrib/terraform/aws/README.md
@@ -33,7 +33,7 @@ export AWS_DEFAULT_REGION="zzz"
 
 - Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
 
-- Once the infrastructure is created, you can run the kargo playbooks and supply inventory/hosts with the `-i` flag.
+- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
 
 **Troubleshooting**
 
@@ -54,4 +54,4 @@ It could happen that Terraform doesnt create an Ansible Inventory file automatic
 
 Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
 
-![AWS Infrastructure with Terraform  ](docs/aws_kargo.png)
+![AWS Infrastructure with Terraform  ](docs/aws_kubespray.png)
diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf
index 781edea86a504c27620db700b603f1cf71066d72..a58bca53c6f1c54367cee53defc49c7331db9a73 100644
--- a/contrib/terraform/aws/create-infrastructure.tf
+++ b/contrib/terraform/aws/create-infrastructure.tf
@@ -157,7 +157,7 @@ resource "aws_instance" "k8s-worker" {
 
 
 /*
-* Create Kargo Inventory File
+* Create Kubespray Inventory File
 *
 */
 data "template_file" "inventory" {
diff --git a/contrib/terraform/aws/docs/aws_kargo.png b/contrib/terraform/aws/docs/aws_kubespray.png
similarity index 100%
rename from contrib/terraform/aws/docs/aws_kargo.png
rename to contrib/terraform/aws/docs/aws_kubespray.png
diff --git a/docs/ansible.md b/docs/ansible.md
index 4da6edb48cdd97d286fe9aef18761184c7652d31..7cb72706a6f60a102ff96e8065522af1e8684a51 100644
--- a/docs/ansible.md
+++ b/docs/ansible.md
@@ -75,25 +75,25 @@ According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variab
 those cannot be overriden from the group vars. In order to override, one should use
 the `-e ` runtime flags (most simple way) or other layers described in the docs.
 
-Kargo uses only a few layers to override things (or expect them to
+Kubespray uses only a few layers to override things (or expect them to
 be overriden for roles):
 
 Layer | Comment
 ------|--------
-**role defaults** | provides best UX to override things for Kargo deployments
+**role defaults** | provides best UX to override things for Kubespray deployments
 inventory vars | Unused
 **inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
 inventory host_vars | Unused
-playbook group_vars | Unuses
+playbook group_vars | Unused
 playbook host_vars | Unused
-**host facts** | Kargo overrides for internal roles' logic, like state flags
+**host facts** | Kubespray overrides for internal roles' logic, like state flags
 play vars | Unused
 play vars_prompt | Unused
 play vars_files | Unused
 registered vars | Unused
-set_facts | Kargo overrides those, for some places
+set_facts | Kubespray overrides those, for some places
 **role and include vars** | Provides bad UX to override things! Use extra vars to enforce
-block vars (only for tasks in block) | Kargo overrides for internal roles' logic
+block vars (only for tasks in block) | Kubespray overrides for internal roles' logic
 task vars (only for the task) | Unused for roles, but only for helper scripts
 **extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
 
@@ -124,12 +124,12 @@ The following tags are defined in playbooks:
 |          k8s-pre-upgrade | Upgrading K8s cluster
 |              k8s-secrets | Configuring K8s certs/keys
 |                      kpm | Installing K8s apps definitions with KPM
-|           kube-apiserver | Configuring self-hosted kube-apiserver
-|  kube-controller-manager | Configuring self-hosted kube-controller-manager
+|           kube-apiserver | Configuring static pod kube-apiserver
+|  kube-controller-manager | Configuring static pod kube-controller-manager
 |                  kubectl | Installing kubectl and bash completion
 |                  kubelet | Configuring kubelet service
-|               kube-proxy | Configuring self-hosted kube-proxy
-|           kube-scheduler | Configuring self-hosted kube-scheduler
+|               kube-proxy | Configuring static pod kube-proxy
+|           kube-scheduler | Configuring static pod kube-scheduler
 |                localhost | Special steps for the localhost (ansible runner)
 |                   master | Configuring K8s master node role
 |               netchecker | Installing netchecker K8s app
diff --git a/docs/aws.md b/docs/aws.md
index 91bded11ca860172d504d86dfe1943a518e33587..8bdbc06fa4073225006eb6d62907c52576b90d58 100644
--- a/docs/aws.md
+++ b/docs/aws.md
@@ -3,7 +3,7 @@ AWS
 
 To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
 
-Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
+Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
 
 The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
 
@@ -45,12 +45,12 @@ This will produce an inventory that is passed into Ansible that looks like the f
 
 Guide:
 - Create instances in AWS as needed.
-- Either during or after creation, add tags to the instances with a key of `kargo-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
-- Copy the `kargo-aws-inventory.py` script from `kargo/contrib/aws_inventory` to the `kargo/inventory` directory.
+- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
+- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
 - Set the following AWS credentials and info as environment variables in your terminal:
 ```
 export AWS_ACCESS_KEY_ID="xxxxx"
 export AWS_SECRET_ACCESS_KEY="yyyyy"
 export REGION="us-east-2"
 ```
-- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kargo-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
+- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
diff --git a/docs/calico.md b/docs/calico.md
index eefbcb6e22d01b6f7123e4a2b4d8198f13e9cc60..00ff748c1140ae8e2ddeb67b2a98a45486795d62 100644
--- a/docs/calico.md
+++ b/docs/calico.md
@@ -96,7 +96,7 @@ You need to edit your inventory and add:
 * `cluster_id` by route reflector node/group (see details
 [here](https://hub.docker.com/r/calico/routereflector/))
 
-Here's an example of Kargo inventory with route reflectors:
+Here's an example of Kubespray inventory with route reflectors:
 
 ```
 [all]
@@ -145,11 +145,11 @@ cluster_id="1.0.0.1"
 The inventory above will deploy the following topology assuming that calico's
 `global_as_num` is set to `65400`:
 
-![Image](figures/kargo-calico-rr.png?raw=true)
+![Image](figures/kubespray-calico-rr.png?raw=true)
 
 ##### Optional : Define default endpoint to host action
 
-By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kargo) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
+By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
 
 
 To re-define default action please set the following variable in your inventory:
diff --git a/docs/cloud.md b/docs/cloud.md
index f0db21c70a0f5a4afa16de9a9486aa2ca5670c96..7d966bafa9b4ff5ecd4798dbcedcd99a81e9eaf4 100644
--- a/docs/cloud.md
+++ b/docs/cloud.md
@@ -3,17 +3,17 @@ Cloud providers
 
 #### Provisioning
 
-You can use kargo-cli to start new instances on cloud providers
+You can use kubespray-cli to start new instances on cloud providers
 here's an example
 ```
-kargo [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
+kubespray [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
 ```
 
 #### Deploy kubernetes
 
-With kargo-cli
+With kubespray-cli
 ```
-kargo deploy [--aws|--gce] -u admin
+kubespray deploy [--aws|--gce] -u admin
 ```
 
 Or ansible-playbook command
diff --git a/docs/comparisons.md b/docs/comparisons.md
index 63cb6010217b81ed17e28e758c7642a2c40db5d4..cf34e37d5f7ca36a47d2d1c36be94835b84f0fcc 100644
--- a/docs/comparisons.md
+++ b/docs/comparisons.md
@@ -1,25 +1,25 @@
-Kargo vs [Kops](https://github.com/kubernetes/kops)
+Kubespray vs [Kops](https://github.com/kubernetes/kops)
 ---------------
 
-Kargo runs on bare metal and most clouds, using Ansible as its substrate for
+Kubespray runs on bare metal and most clouds, using Ansible as its substrate for
 provisioning and orchestration. Kops performs the provisioning and orchestration
 itself, and as such is less flexible in deployment platforms. For people with
 familiarity with Ansible, existing Ansible deployments or the desire to run a
-Kubernetes cluster across multiple platforms, Kargo is a good choice. Kops,
+Kubernetes cluster across multiple platforms, Kubespray is a good choice. Kops,
 however, is more tightly integrated with the unique features of the clouds it
 supports so it could be a better choice if you know that you will only be using
 one platform for the foreseeable future.
 
-Kargo vs [Kubeadm](https://github.com/kubernetes/kubeadm)
+Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm)
 ------------------
 
 Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
 management, including self-hosted layouts, dynamic discovery services and so
 on. Had it belong to the new [operators world](https://coreos.com/blog/introducing-operators.html),
-it would've likely been named a "Kubernetes cluster operator". Kargo however,
+it would've likely been named a "Kubernetes cluster operator". Kubespray however,
 does generic configuration management tasks from the "OS operators" ansible
 world, plus some initial K8s clustering (with networking plugins included) and
-control plane bootstrapping. Kargo [strives](https://github.com/kubernetes-incubator/kargo/issues/553)
+control plane bootstrapping. Kubespray [strives](https://github.com/kubernetes-incubator/kubespray/issues/553)
 to adopt kubeadm as a tool in order to consume life cycle management domain
 knowledge from it and offload generic OS configuration things from it, which
 hopefully benefits both sides.
diff --git a/docs/coreos.md b/docs/coreos.md
index 546ad0e89137cd52f2283d4a80523427e1f350b2..e8db71b2c3abf2e6fbf03e606275a46e363219cc 100644
--- a/docs/coreos.md
+++ b/docs/coreos.md
@@ -1,10 +1,10 @@
 CoreOS bootstrap
 ===============
 
-Example with **kargo-cli**:
+Example with **kubespray-cli**:
 
 ```
-kargo deploy --gce --coreos
+kubespray deploy --gce --coreos
 ```
 
 Or with Ansible:
diff --git a/docs/dns-stack.md b/docs/dns-stack.md
index 67afb6b43319a19c65444d2c0a2aa8b3122252d9..f4de31544b58fd2362a54a1fb4497a70ddd3590f 100644
--- a/docs/dns-stack.md
+++ b/docs/dns-stack.md
@@ -1,7 +1,7 @@
-K8s DNS stack by Kargo
+K8s DNS stack by Kubespray
 ======================
 
-For K8s cluster nodes, kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
+For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
 [cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
 to serve as an authoritative DNS server for a given ``dns_domain`` and its
 ``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
@@ -44,13 +44,13 @@ DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode``
 DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
 DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
 
-DNS modes supported by kargo
+DNS modes supported by Kubespray
 ============================
 
-You can modify how kargo sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
+You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
 
 ## dns_mode
-``dns_mode`` configures how kargo will setup cluster DNS. There are three modes available:
+``dns_mode`` configures how Kubespray will setup cluster DNS. There are three modes available:
 
 #### dnsmasq_kubedns (default)
 This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
@@ -67,7 +67,7 @@ This does not install any of dnsmasq and kubedns/skydns. This basically disables
 leaves you with a non functional cluster.
 
 ## resolvconf_mode
-``resolvconf_mode`` configures how kargo will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
+``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
 There are three modes available:
 
 #### docker_dns (default)
@@ -100,7 +100,7 @@ used as a backup nameserver. After cluster DNS is running, all queries will be a
 servers, which in turn will forward queries to the system nameserver if required.
 
 #### host_resolvconf
-This activates the classic kargo behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
+This activates the classic Kubespray behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
 configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
 
 As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
@@ -120,7 +120,7 @@ cluster service names.
 Limitations
 -----------
 
-* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
+* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
   not answer with authority to arbitrary recursive resolvers. This task is left
   for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
   for details.
diff --git a/docs/downloads.md b/docs/downloads.md
index 2c3f3085f210f544833bbeb83d6e19d2b1598a5f..50d9ef10736d00dafd5db00d0412b978a0d7a061 100644
--- a/docs/downloads.md
+++ b/docs/downloads.md
@@ -1,7 +1,7 @@
 Downloading binaries and containers
 ===================================
 
-Kargo supports several download/upload modes. The default is:
+Kubespray supports several download/upload modes. The default is:
 
 * Each node downloads binaries and container images on its own, which is
   ``download_run_once: False``.
diff --git a/docs/figures/kargo-calico-rr.png b/docs/figures/kubespray-calico-rr.png
similarity index 100%
rename from docs/figures/kargo-calico-rr.png
rename to docs/figures/kubespray-calico-rr.png
diff --git a/docs/getting-started.md b/docs/getting-started.md
index 6e323d9cdbf9775ca9eaf65cd58725650f4793d5..25bcbfaad87f8037bf7d7a50997d0307ab7fd5bb 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -1,21 +1,21 @@
 Getting started
 ===============
 
-The easiest way to run the deployement is to use the **kargo-cli** tool.
-A complete documentation can be found in its [github repository](https://github.com/kubespray/kargo-cli).
+The easiest way to run the deployement is to use the **kubespray-cli** tool.
+A complete documentation can be found in its [github repository](https://github.com/kubespray/kubespray-cli).
 
 Here is a simple example on AWS:
 
 * Create instances and generate the inventory
 
 ```
-kargo aws --instances 3
+kubespray aws --instances 3
 ```
 
 * Run the deployment
 
 ```
-kargo deploy --aws -u centos -n calico
+kubespray deploy --aws -u centos -n calico
 ```
 
 Building your own inventory
@@ -23,12 +23,12 @@ Building your own inventory
 
 Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
 an example inventory located
-[here](https://github.com/kubernetes-incubator/kargo/blob/master/inventory/inventory.example).
+[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/inventory.example).
 
 You can use an
-[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py)
+[inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
 to create or modify an Ansible inventory. Currently, it is limited in
-functionality and is only use for making a basic Kargo cluster, but it does
+functionality and is only use for making a basic Kubespray cluster, but it does
 support creating large clusters. It now supports
 separated ETCD and Kubernetes master roles from node role if the size exceeds a
 certain threshold. Run inventory.py help for more information.
diff --git a/docs/ha-mode.md b/docs/ha-mode.md
index 20578f705609fb0f46b66f6693a3d7c21aa13faf..5036345b7fec7c31d61f1b774294c689d1723df1 100644
--- a/docs/ha-mode.md
+++ b/docs/ha-mode.md
@@ -22,7 +22,7 @@ Kube-apiserver
 --------------
 
 K8s components require a loadbalancer to access the apiservers via a reverse
-proxy. Kargo includes support for an nginx-based proxy that resides on each
+proxy. Kubespray includes support for an nginx-based proxy that resides on each
 non-master Kubernetes node. This is referred to as localhost loadbalancing. It
 is less efficient than a dedicated load balancer because it creates extra
 health checks on the Kubernetes apiserver, but is more practical for scenarios
@@ -30,12 +30,12 @@ where an external LB or virtual IP management is inconvenient.  This option is
 configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`).
 You may also define the port the local internal loadbalancer users by changing,
 `nginx_kube_apiserver_port`.  This defaults to the value of `kube_apiserver_port`.
-It is also import to note that Kargo will only configure kubelet and kube-proxy
+It is also import to note that Kubespray will only configure kubelet and kube-proxy
 on non-master nodes to use the local internal loadbalancer.
 
 If you choose to NOT use the local internal loadbalancer, you will need to configure
 your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to
-a user and is not covered by ansible roles in Kargo. By default, it only configures
+a user and is not covered by ansible roles in Kubespray. By default, it only configures
 a non-HA endpoint, which points to the `access_ip` or IP address of the first server
 node in the `kube-master` group. It can also configure clients to use endpoints
 for a given loadbalancer type. The following diagram shows how traffic to the
diff --git a/docs/netcheck.md b/docs/netcheck.md
index bee04cbb3f05ec525717d485126e17a60413711e..80679cd7310201eba86380168e57a89559dad600 100644
--- a/docs/netcheck.md
+++ b/docs/netcheck.md
@@ -1,7 +1,7 @@
 Network Checker Application
 ===========================
 
-With the ``deploy_netchecker`` var enabled (defaults to false), Kargo deploys a
+With the ``deploy_netchecker`` var enabled (defaults to false), Kubespray deploys a
 Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker
 images. It consists of the server and agents trying to reach the server by usual
 for Kubernetes applications network connectivity meanings. Therefore, this
@@ -17,7 +17,7 @@ any of the cluster nodes:
 ```
 curl http://localhost:31081/api/v1/connectivity_check
 ```
-Note that Kargo does not invoke the check but only deploys the application, if
+Note that Kubespray does not invoke the check but only deploys the application, if
 requested.
 
 There are related application specifc variables:
diff --git a/docs/roadmap.md b/docs/roadmap.md
index c0a3a752731b35d6bc049381be285591206795da..9b23ffc1c4bed701ecedd6b06fabb18593d7bed6 100644
--- a/docs/roadmap.md
+++ b/docs/roadmap.md
@@ -1,23 +1,23 @@
-Kargo's roadmap
+Kubespray's roadmap
 =================
 
 ### Kubeadm
 - Propose kubeadm as an option in order to setup the kubernetes cluster.
-That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553)
+That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kubespray/issues/553)
 
-### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
+### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
 - the playbook would install and configure docker/rkt and the etcd cluster
 - the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
-- a "kubespray" container would be deployed (kargo-cli, ansible-playbook, kpm)
+- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook, kpm)
 - to be discussed, a way to provide the inventory
-- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kargo/issues/321)
+- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
 
 ### Provisionning and cloud providers
 - [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
 - [ ] On AWS autoscaling, multi AZ
-- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297)
-- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280)
-- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234)
+- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kubespray/issues/297)
+- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kubespray/issues/280)
+- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kubespray/issues/234)
   (related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
    https://github.com/kubernetes/kubernetes/issues/18112)
 
@@ -37,14 +37,14 @@ That would probably improve deployment speed and certs management [#553](https:/
 - [ ] test scale up cluster:  +1 etcd, +1 master, +1 node
 
 ### Lifecycle
-- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
-- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
+- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kubespray/issues/553)
+- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kubespray/issues/154)
 - [ ] Drain worker node when shutting down/deleting an instance
 - [ ] Upgrade granularity: select components to upgrade and skip others
 
 ### Networking
-- [ ] romana.io support [#160](https://github.com/kubespray/kargo/issues/160)
-- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
+- [ ] romana.io support [#160](https://github.com/kubespray/kubespray/issues/160)
+- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kubespray/issues/159)
 - [ ] Opencontrail
 - [x] Canal
 - [x] Cloud Provider native networking (instead of our network plugins)
@@ -53,14 +53,14 @@ That would probably improve deployment speed and certs management [#553](https:/
 - (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
 While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kubernetes/kubernetes/issues/18174) to be fixed.
 
-### Kargo-cli
+### Kubespray-cli
 - Delete instances
-- `kargo vagrant` to setup a test cluster locally
-- `kargo azure` for Microsoft Azure support
+- `kubespray vagrant` to setup a test cluster locally
+- `kubespray azure` for Microsoft Azure support
 - switch to Terraform instead of Ansible for provisionning
 - update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context
 
-### Kargo API
+### Kubespray API
 - Perform all actions through an **API**
 - Store inventories / configurations of mulltiple clusters
 - make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
@@ -87,8 +87,8 @@ Include optionals deployments to init the cluster:
 ### Others
 - remove nodes  (adding is already supported)
 - being able to choose any k8s version (almost done)
-- **rkt** support [#59](https://github.com/kubespray/kargo/issues/59)
+- **rkt** support [#59](https://github.com/kubespray/kubespray/issues/59)
 - Review documentation (split in categories)
 - **consul** -> if officialy supported by k8s
-- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kargo/issues/312)
-- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kargo/issues/329)
+- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kubespray/issues/312)
+- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kubespray/issues/329)
diff --git a/docs/upgrades.md b/docs/upgrades.md
index cb431d4c071547749d43fb011b960b85f4d425a6..9a21cbdc47535ba512b7622543de9a2a47579f58 100644
--- a/docs/upgrades.md
+++ b/docs/upgrades.md
@@ -1,11 +1,11 @@
-Upgrading Kubernetes in Kargo
+Upgrading Kubernetes in Kubespray
 =============================
 
 #### Description
 
-Kargo handles upgrades the same way it handles initial deployment. That is to
+Kubespray handles upgrades the same way it handles initial deployment. That is to
 say that each component is laid down in a fixed order. You should be able to
-upgrade from Kargo tag 2.0 up to the current master without difficulty. You can
+upgrade from Kubespray tag 2.0 up to the current master without difficulty. You can
 also individually control versions of components by explicitly defining their
 versions. Here are all version vars for each component:
 
@@ -35,7 +35,7 @@ ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6
 
 #### Graceful upgrade
 
-Kargo also supports cordon, drain and uncordoning of nodes when performing 
+Kubespray also supports cordon, drain and uncordoning of nodes when performing 
 a cluster upgrade. There is a separate playbook used for this purpose. It is
 important to note that upgrade-cluster.yml can only be used for upgrading an
 existing cluster. That means there must be at least 1 kube-master already
diff --git a/docs/vars.md b/docs/vars.md
index 603a614b269d11aa4be00ae6984b90de83757cce..4b9da186e0e3c3c39480da80bef17ea699f7ea55 100644
--- a/docs/vars.md
+++ b/docs/vars.md
@@ -1,4 +1,4 @@
-Configurable Parameters in Kargo
+Configurable Parameters in Kubespray
 ================================
 
 #### Generic Ansible variables
@@ -12,7 +12,7 @@ Some variables of note include:
 * *ansible_default_ipv4.address*: IP address Ansible automatically chooses.
   Generated based on the output from the command ``ip -4 route get 8.8.8.8``
 
-#### Common vars that are used in Kargo
+#### Common vars that are used in Kubespray
 
 * *calico_version* - Specify version of Calico to use
 * *calico_cni_version* - Specify version of Calico CNI plugin to use
@@ -35,16 +35,16 @@ Some variables of note include:
 * *access_ip* - IP for other hosts to use to connect to. Often required when
   deploying from a cloud, such as OpenStack or GCE and you have separate
   public/floating and private IPs.
-* *ansible_default_ipv4.address* - Not Kargo-specific, but it is used if ip
+* *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip
   and access_ip are undefined
 * *loadbalancer_apiserver* - If defined, all hosts will connect to this
   address instead of localhost for kube-masters and kube-master[0] for
   kube-nodes. See more details in the
-  [HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
+  [HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md).
 * *loadbalancer_apiserver_localhost* - makes all hosts to connect to
   the apiserver internally load balanced endpoint. Mutual exclusive to the
   `loadbalancer_apiserver`. See more details in the
-  [HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
+  [HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md).
 
 #### Cluster variables
 
@@ -79,13 +79,13 @@ other settings from your existing /etc/resolv.conf are lost. Set the following
 variables to match your requirements.
 
 * *upstream_dns_servers* - Array of upstream DNS servers configured on host in
-  addition to Kargo deployed DNS
+  addition to Kubespray deployed DNS
 * *nameservers* - Array of DNS servers configured for use in dnsmasq
 * *searchdomains* - Array of up to 4 search domains
 * *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS)
 
 For more information, see [DNS
-Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.md).
+Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-stack.md).
 
 #### Other service variables
 
@@ -114,5 +114,5 @@ The possible vars are:
 
 #### User accounts
 
-Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
+Kubespray sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
 passwords default to changeme. You can set this by changing ``kube_api_pwd``.
diff --git a/docs/vault.md b/docs/vault.md
index 446d914c9d9df100c82b50a6804cab71cb8c34fc..3850d04b5feee03386aafbf80ab680487bde4277 100644
--- a/docs/vault.md
+++ b/docs/vault.md
@@ -39,7 +39,7 @@ vault group.
 It is *highly* recommended that these secrets are removed from the servers after
 your cluster has been deployed, and kept in a safe location of your choosing.
 Naturally, the seriousness of the situation depends on what you're doing with
-your Kargo cluster, but with these secrets, an attacker will have the ability
+your Kubespray cluster, but with these secrets, an attacker will have the ability
 to authenticate to almost everything in Kubernetes and decode all private
 (HTTPS) traffic on your network signed by Vault certificates.
 
diff --git a/extra_playbooks/upgrade-only-k8s.yml b/extra_playbooks/upgrade-only-k8s.yml
index f10259b0770d3817249f632072ce3f392c3d601d..c2af6a3019785d64ed1a606b1a813da8136f3b00 100644
--- a/extra_playbooks/upgrade-only-k8s.yml
+++ b/extra_playbooks/upgrade-only-k8s.yml
@@ -11,7 +11,7 @@
 - hosts: localhost
   gather_facts: False
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
 
 - hosts: k8s-cluster:etcd:calico-rr
@@ -22,7 +22,7 @@
     # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
     ansible_ssh_pipelining: false
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: bootstrap-os, tags: bootstrap-os}
 
 - hosts: k8s-cluster:etcd:calico-rr
@@ -34,7 +34,7 @@
 - hosts: k8s-cluster:etcd:calico-rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: kubernetes/preinstall, tags: preinstall }
 
 #Handle upgrades to master components first to maintain backwards compat.
@@ -42,7 +42,7 @@
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: 1
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: upgrade/pre-upgrade, tags: pre-upgrade }
     - { role: kubernetes/node, tags: node }
     - { role: kubernetes/master, tags: master }
@@ -53,8 +53,8 @@
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: upgrade/pre-upgrade, tags: pre-upgrade }
     - { role: kubernetes/node, tags: node }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml
index 6388ca7af1c0c42b31ea6c1ae88ba3a39021b37d..a300553671cace24923979ad9ca00228a65adb90 100644
--- a/inventory/group_vars/all.yml
+++ b/inventory/group_vars/all.yml
@@ -83,6 +83,9 @@ bin_dir: /usr/local/bin
 ## Please note that overlay2 is only supported on newer kernels
 #docker_storage_options: -s overlay2
 
+# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+#docker_dns_servers_strict: false
+
 ## Default packages to install within the cluster, f.e:
 #kpm_packages:
 #  - name: kube-system/grafana
diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml
index 5e633d6fe0c1c696ce493943bcdc671c3d2b51ff..65a8661d0d8317c0390f7992dee6e8afb8720ef1 100644
--- a/inventory/group_vars/k8s-cluster.yml
+++ b/inventory/group_vars/k8s-cluster.yml
@@ -133,3 +133,8 @@ efk_enabled: false
 
 # Helm deployment
 helm_enabled: false
+
+# dnsmasq
+# dnsmasq_upstream_dns_servers:
+#  - /resolvethiszone.with/10.0.4.250
+#  - 8.8.8.8
diff --git a/library/kube.py b/library/kube.py
index 2922c62129885156076b640ac275e2234e62d5bd..fdc783fff31b1ef089491c20a2f75f74297cdc8a 100644
--- a/library/kube.py
+++ b/library/kube.py
@@ -66,7 +66,7 @@ options:
     description:
       - present handles checking existence or creating if definition file provided,
         absent handles deleting resource(s) based on other options,
-        latest handles creating ore updating based on existence,
+        latest handles creating or updating based on existence,
         reloaded handles updating resource(s) definition using definition file,
         stopped handles stopping resource(s) based on other options.
 requirements:
diff --git a/reset.yml b/reset.yml
index b6e15d82839ba83c4d56903e45da4a2ed61bbf88..859ca6264f5b1fa9cf7070d048b6a86e9878fe42 100644
--- a/reset.yml
+++ b/reset.yml
@@ -14,5 +14,5 @@
       when: reset_confirmation != "yes"
 
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: reset, tags: reset }
diff --git a/roles/bootstrap-os/defaults/main.yml b/roles/bootstrap-os/defaults/main.yml
index cf40f692dde395f74b55d0dfd93230839e32f2ed..1bc47cf1e293bc06e1de6e82df08c4eb69fc3da1 100644
--- a/roles/bootstrap-os/defaults/main.yml
+++ b/roles/bootstrap-os/defaults/main.yml
@@ -2,3 +2,4 @@
 pypy_version: 2.4.0
 pip_python_modules:
   - httplib2
+  - six
\ No newline at end of file
diff --git a/roles/dnsmasq/defaults/main.yml b/roles/dnsmasq/defaults/main.yml
index 58b1b7f1d72f2cd0219af0a1a59f1ff22e78679e..bf670c788a9b6fe45f2c38373edbaddf901db757 100644
--- a/roles/dnsmasq/defaults/main.yml
+++ b/roles/dnsmasq/defaults/main.yml
@@ -30,3 +30,6 @@ dns_memory_requests: 50Mi
 # Autoscaler parameters
 dnsmasq_nodes_per_replica: 10
 dnsmasq_min_replicas: 1
+
+# Custom name servers
+dnsmasq_upstream_dns_servers: []
diff --git a/roles/dnsmasq/templates/01-kube-dns.conf.j2 b/roles/dnsmasq/templates/01-kube-dns.conf.j2
index dce26d726dabdfbfd903888c37e27a1ddf1648af..483be2090afdc6b5ccb251c9d10e838ff5c11514 100644
--- a/roles/dnsmasq/templates/01-kube-dns.conf.j2
+++ b/roles/dnsmasq/templates/01-kube-dns.conf.j2
@@ -11,6 +11,11 @@ server=/{{ dns_domain }}/{{ skydns_server }}
 local=/{{ bogus_domains }}
 
 #Set upstream dns servers
+{% if dnsmasq_upstream_dns_servers|length > 0 %}
+{% for srv in dnsmasq_upstream_dns_servers %}
+server={{ srv }}
+{% endfor %}
+{% endif %}
 {% if system_and_upstream_dns_servers|length > 0 %}
 {% for srv in system_and_upstream_dns_servers %}
 server={{ srv }}
diff --git a/roles/dnsmasq/templates/dnsmasq-deploy.yml b/roles/dnsmasq/templates/dnsmasq-deploy.yml
index ed74c3e0625fd213a8005dcf8122de280019c5c4..e811e199596de09262f0a79865cd99eaae42daf0 100644
--- a/roles/dnsmasq/templates/dnsmasq-deploy.yml
+++ b/roles/dnsmasq/templates/dnsmasq-deploy.yml
@@ -19,7 +19,7 @@ spec:
       labels:
         k8s-app: dnsmasq
         kubernetes.io/cluster-service: "true"
-        kargo/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
+        kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
     spec:
       containers:
         - name: dnsmasq
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index c771953ff1ed42122eb8365772f625045fd0b719..e262d908a65b0f656f9793172a420013a14a02b3 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -8,3 +8,5 @@ docker_repo_key_info:
 
 docker_repo_info:
   repos:
+
+docker_dns_servers_strict: yes
diff --git a/roles/docker/tasks/set_facts_dns.yml b/roles/docker/tasks/set_facts_dns.yml
index f17c1bde24435b13b36cacfabf43eae0a0dadbf9..64a09bff29d4430979416066fe3d0d2545cbbc45 100644
--- a/roles/docker/tasks/set_facts_dns.yml
+++ b/roles/docker/tasks/set_facts_dns.yml
@@ -52,8 +52,13 @@
 
 - name: check number of nameservers
   fail:
-    msg: "Too many nameservers"
-  when: docker_dns_servers|length > 3
+    msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=no and we will only use the first 3."
+  when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool
+
+- name: rtrim number of nameservers to 3
+  set_fact:
+    docker_dns_servers: "{{ docker_dns_servers[0:3] }}"
+  when: docker_dns_servers|length > 3 and not docker_dns_servers_strict|bool
 
 - name: check number of search domains
   fail:
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index 9284fbbdff4ff437336862323b539a7085eb62a0..334406a14b793db930a320438ce2d5df191d82f5 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -52,9 +52,6 @@ calico_policy_image_repo: "calico/kube-policy-controller"
 calico_policy_image_tag: "{{ calico_policy_version }}"
 calico_rr_image_repo: "quay.io/calico/routereflector"
 calico_rr_image_tag: "v0.3.0"
-exechealthz_version: 1.1
-exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
-exechealthz_image_tag: "{{ exechealthz_version }}"
 hyperkube_image_repo: "quay.io/coreos/hyperkube"
 hyperkube_image_tag: "{{ kube_version }}_coreos.0"
 pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
@@ -74,12 +71,16 @@ nginx_image_tag: 1.11.4-alpine
 dnsmasq_version: 2.72
 dnsmasq_image_repo: "andyshinn/dnsmasq"
 dnsmasq_image_tag: "{{ dnsmasq_version }}"
-kubednsmasq_version: 1.3
-kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64"
-kubednsmasq_image_tag: "{{ kubednsmasq_version }}"
-kubedns_version: 1.7
-kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
+kubedns_version: 1.14.2
+kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
 kubedns_image_tag: "{{ kubedns_version }}"
+dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
+dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
+dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
+dnsmasq_sidecar_image_tag: "{{ kubedns_version }}"
+kubednsautoscaler_version: 1.1.1
+kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64"
+kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
 test_image_repo: busybox
 test_image_tag: latest
 elasticsearch_version: "v2.4.1"
@@ -193,26 +194,31 @@ downloads:
     repo: "{{ dnsmasq_image_repo }}"
     tag: "{{ dnsmasq_image_tag }}"
     sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
-  kubednsmasq:
-    container: true
-    repo: "{{ kubednsmasq_image_repo }}"
-    tag: "{{ kubednsmasq_image_tag }}"
-    sha256: "{{ kubednsmasq_digest_checksum|default(None) }}"
   kubedns:
     container: true
     repo: "{{ kubedns_image_repo }}"
     tag: "{{ kubedns_image_tag }}"
     sha256: "{{ kubedns_digest_checksum|default(None) }}"
+  dnsmasq_nanny:
+    container: true
+    repo: "{{ dnsmasq_nanny_image_repo }}"
+    tag: "{{ dnsmasq_nanny_image_tag }}"
+    sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
+  dnsmasq_sidecar:
+    container: true
+    repo: "{{ dnsmasq_sidecar_image_repo }}"
+    tag: "{{ dnsmasq_sidecar_image_tag }}"
+    sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
+  kubednsautoscaler:
+    container: true
+    repo: "{{ kubednsautoscaler_image_repo }}"
+    tag: "{{ kubednsautoscaler_image_tag }}"
+    sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
   testbox:
     container: true
     repo: "{{ test_image_repo }}"
     tag: "{{ test_image_tag }}"
     sha256: "{{ testbox_digest_checksum|default(None) }}"
-  exechealthz:
-    container: true
-    repo: "{{ exechealthz_image_repo }}"
-    tag: "{{ exechealthz_image_tag }}"
-    sha256: "{{ exechealthz_digest_checksum|default(None) }}"
   elasticsearch:
     container: true
     repo: "{{ elasticsearch_image_repo }}"
diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml
index 6326741b35898f748461a37159eaab74cac99f83..8fd72f3dbc7b9e94957b85b51685457d8bff0684 100644
--- a/roles/etcd/defaults/main.yml
+++ b/roles/etcd/defaults/main.yml
@@ -2,6 +2,7 @@
 # Set to false to only do certificate management
 etcd_cluster_setup: true
 
+etcd_backup_prefix: "/var/backups"
 etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"
 etcd_data_dir: "/var/lib/etcd"
 
diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml
index 9a611296b8265dc50ce27541591bda93fe0e9f13..68fe71f07f47d3d4424375e0ee9d90e358fe32d3 100644
--- a/roles/etcd/handlers/backup.yml
+++ b/roles/etcd/handlers/backup.yml
@@ -3,7 +3,6 @@
   command: /bin/true
   notify:
     - Refresh Time Fact
-    - Set etcd Backup Directory Prefix
     - Set Backup Directory
     - Create Backup Directory
     - Backup etcd v2 data
@@ -13,10 +12,6 @@
 - name: Refresh Time Fact
   setup: filter=ansible_date_time
 
-- name: Set etcd Backup Directory Prefix
-  set_fact:
-    etcd_backup_prefix: '/var/backups'
-
 - name: Set Backup Directory
   set_fact:
     etcd_backup_directory: "{{ etcd_backup_prefix }}/etcd-{{ ansible_date_time.date }}_{{ ansible_date_time.time }}"
diff --git a/roles/kargo-defaults/tasks/main.yaml b/roles/kargo-defaults/tasks/main.yaml
deleted file mode 100644
index 91d0bc46347037734362086cd820e4ace22443ce..0000000000000000000000000000000000000000
--- a/roles/kargo-defaults/tasks/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-- name: Configure defaults
-  debug:
-    msg: "Check roles/kargo-defaults/defaults/main.yml"
-  tags:
-    - always
diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml
index 89bdd4277dfad603bcbfabe7b078bcb94d031297..2787472c878c40c931f777db50e51dab75e27455 100644
--- a/roles/kubernetes-apps/ansible/defaults/main.yml
+++ b/roles/kubernetes-apps/ansible/defaults/main.yml
@@ -1,23 +1,23 @@
 # Versions
-kubedns_version: 1.9
-kubednsmasq_version: 1.3
-exechealthz_version: 1.1
+kubedns_version : 1.14.2
+kubednsautoscaler_version: 1.1.1
 
 # Limits for dnsmasq/kubedns apps
-dns_cpu_limit: 100m
 dns_memory_limit: 170Mi
-dns_cpu_requests: 70m
-dns_memory_requests: 50Mi
-kubedns_min_replicas: 1
+dns_cpu_requests: 100m
+dns_memory_requests: 70Mi
+kubedns_min_replicas: 2
 kubedns_nodes_per_replica: 10
 
 # Images
-kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
+kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
 kubedns_image_tag: "{{ kubedns_version }}"
-kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64"
-kubednsmasq_image_tag: "{{ kubednsmasq_version }}"
-exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
-exechealthz_image_tag: "{{ exechealthz_version }}"
+dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
+dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
+dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64"
+dnsmasq_sidecar_image_tag: "{{ kubedns_version }}"
+kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64"
+kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
 
 # Netchecker
 deploy_netchecker: false
@@ -40,3 +40,4 @@ netchecker_server_memory_requests: 64M
 # SSL
 etcd_cert_dir: "/etc/ssl/etcd/ssl"
 canal_cert_dir: "/etc/canal/certs"
+
diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml
index ed0d11f283032241e14c06d22a61dcfa34ac0813..4e7236df62d32c55a5292431d171bd0b17db4ece 100644
--- a/roles/kubernetes-apps/ansible/tasks/main.yml
+++ b/roles/kubernetes-apps/ansible/tasks/main.yml
@@ -1,7 +1,7 @@
 ---
 - name: Kubernetes Apps | Wait for kube-apiserver
   uri:
-    url: http://localhost:8080/healthz
+    url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz
   register: result
   until: result.status == 200
   retries: 10
@@ -13,8 +13,8 @@
     src: "{{item.file}}"
     dest: "{{kube_config_dir}}/{{item.file}}"
   with_items:
-    - {name: kubedns, file: kubedns-deploy.yml, type: deployment}
-    - {name: kubedns, file: kubedns-svc.yml, type: svc}
+    - {name: kube-dns, file: kubedns-deploy.yml, type: deployment}
+    - {name: kube-dns, file: kubedns-svc.yml, type: svc}
     - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment}
   register: manifests
   when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml
index c0f519e2c578706f3f9807745fa36292c8aee44a..a1d5455adc654b47e793bacc717ec47e6b096cbf 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml
@@ -32,7 +32,7 @@ spec:
     spec:
       containers:
       - name: autoscaler
-        image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
+        image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
         resources:
             requests:
                 cpu: "20m"
@@ -42,7 +42,7 @@ spec:
           - --namespace=kube-system
           - --configmap=kubedns-autoscaler
           # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
-          - --target=Deployment/kubedns
+          - --target=Deployment/kube-dns
           - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
           - --logtostderr=true
           - --v=2
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml
index a2150cc70ee4cb5d477fe321dfe9f6d81722009b..3f07aa9058e7232ceb937bed7d295f8298e0d47e 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml
@@ -1,25 +1,39 @@
 apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:
-  name: kubedns
-  namespace: {{ system_namespace }}
+  name: kube-dns
+  namespace: "{{system_namespace}}"
   labels:
-    k8s-app: kubedns
-    version: v19
+    k8s-app: kube-dns
     kubernetes.io/cluster-service: "true"
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
-  replicas: {{ kubedns_min_replicas }}
+  # replicas: not specified here:
+  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
+  # 2. Default is 1.
+  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
+  strategy:
+    rollingUpdate:
+      maxSurge: 10%
+      maxUnavailable: 0
   selector:
     matchLabels:
-      k8s-app: kubedns
-      version: v19
+      k8s-app: kube-dns
   template:
     metadata:
       labels:
-        k8s-app: kubedns
-        version: v19
-        kubernetes.io/cluster-service: "true"
+        k8s-app: kube-dns
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
+      tolerations:
+      - key: "CriticalAddonsOnly"
+        operator: "Exists"
+      volumes:
+      - name: kube-dns-config
+        configMap:
+          name: kube-dns
+          optional: true
       containers:
       - name: kubedns
         image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
@@ -30,15 +44,14 @@ spec:
           # guaranteed class. Currently, this container falls into the
           # "burstable" category so the kubelet doesn't backoff from restarting it.
           limits:
-            cpu: {{ dns_cpu_limit }}
             memory: {{ dns_memory_limit }}
           requests:
             cpu: {{ dns_cpu_requests }}
             memory: {{ dns_memory_requests }}
         livenessProbe:
           httpGet:
-            path: /healthz
-            port: 8080
+            path: /healthcheck/kubedns
+            port: 10054
             scheme: HTTP
           initialDelaySeconds: 60
           timeoutSeconds: 5
@@ -51,13 +64,16 @@ spec:
             scheme: HTTP
           # we poll on pod startup for the Kubernetes master service and
           # only setup the /readiness HTTP server once that's available.
-          initialDelaySeconds: 30
+          initialDelaySeconds: 3
           timeoutSeconds: 5
         args:
-        # command = "/kube-dns"
         - --domain={{ dns_domain }}.
         - --dns-port=10053
+        - --config-dir=/kube-dns-config
         - --v={{ kube_log_level }}
+        env:
+        - name: PROMETHEUS_PORT
+          value: "10055"
         ports:
         - containerPort: 10053
           name: dns-local
@@ -65,25 +81,36 @@ spec:
         - containerPort: 10053
           name: dns-tcp-local
           protocol: TCP
+        - containerPort: 10055
+          name: metrics
+          protocol: TCP
+        volumeMounts:
+        - name: kube-dns-config
+          mountPath: /kube-dns-config
       - name: dnsmasq
-        image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
+        image: "{{ dnsmasq_nanny_image_repo }}:{{ dnsmasq_nanny_image_tag }}"
         imagePullPolicy: {{ k8s_image_pull_policy }}
-        resources:
-          limits:
-            cpu: {{ dns_cpu_limit }}
-            memory: {{ dns_memory_limit }}
-          requests:
-            cpu: {{ dns_cpu_requests }}
-            memory: {{ dns_memory_requests }}
+        livenessProbe:
+          httpGet:
+            path: /healthcheck/dnsmasq
+            port: 10054
+            scheme: HTTP
+          initialDelaySeconds: 60
+          timeoutSeconds: 5
+          successThreshold: 1
+          failureThreshold: 5
         args:
-        - --log-facility=-
+        - -v={{ kube_log_level }}
+        - -logtostderr
+        - -configDir=/etc/k8s/dns/dnsmasq-nanny
+        - -restartDnsmasq=true
+        - --
+        - -k
         - --cache-size=1000
-        - --no-resolv
-        - --server=127.0.0.1#10053
-{% if kube_log_level == '4' %}
-        - --log-queries
-{% endif %}
-        - --local=/{{ bogus_domains }}
+        - --log-facility=-
+        - --server=/{{ dns_domain }}/127.0.0.1#10053
+        - --server=/in-addr.arpa/127.0.0.1#10053
+        - --server=/ip6.arpa/127.0.0.1#10053
         ports:
         - containerPort: 53
           name: dns
@@ -91,26 +118,37 @@ spec:
         - containerPort: 53
           name: dns-tcp
           protocol: TCP
-      - name: healthz
-        image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}"
-        imagePullPolicy: {{ k8s_image_pull_policy }}
+        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
         resources:
-          # keep request = limit to keep this container in guaranteed class
-          limits:
-            cpu: 10m
-            memory: 50Mi
           requests:
-            cpu: 10m
-            # Note that this container shouldn't really need 50Mi of memory. The
-            # limits are set higher than expected pending investigation on #29688.
-            # The extra memory was stolen from the kubedns container to keep the
-            # net memory requested by the pod constant.
-            memory: 50Mi
+            cpu: 150m
+            memory: 20Mi
+        volumeMounts:
+        - name: kube-dns-config
+          mountPath: /etc/k8s/dns/dnsmasq-nanny
+      - name: sidecar
+        image: "{{ dnsmasq_sidecar_image_repo }}:{{ dnsmasq_sidecar_image_tag }}"
+        livenessProbe:
+          httpGet:
+            path: /metrics
+            port: 10054
+            scheme: HTTP
+          initialDelaySeconds: 60
+          timeoutSeconds: 5
+          successThreshold: 1
+          failureThreshold: 5
         args:
-        - -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1:10053 >/dev/null
-        - -port=8080
-        - -quiet
+        - --v={{ kube_log_level }}
+        - --logtostderr
+        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ dns_domain }},5,A
+        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ dns_domain }},5,A
         ports:
-        - containerPort: 8080
+        - containerPort: 10054
+          name: metrics
           protocol: TCP
+        resources:
+          requests:
+            memory: 20Mi
+            cpu: 10m
       dnsPolicy: Default  # Don't use cluster DNS.
+
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml
index ce87793265701a2701f5a151ad3cfc5974d3f85c..0565a01e87553ad62fea4f6b2c103d67bba58cbf 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml
@@ -1,15 +1,16 @@
 apiVersion: v1
 kind: Service
 metadata:
-  name: kubedns
+  name: kube-dns
   namespace: {{ system_namespace }}
   labels:
-    k8s-app: kubedns
+    k8s-app: kube-dns
     kubernetes.io/cluster-service: "true"
-    kubernetes.io/name: "kubedns"
+    addonmanager.kubernetes.io/mode: Reconcile
+    kubernetes.io/name: "KubeDNS"
 spec:
   selector:
-    k8s-app: kubedns
+    k8s-app: kube-dns
   clusterIP: {{ skydns_server }}
   ports:
   - name: dns
@@ -18,3 +19,4 @@ spec:
   - name: dns-tcp
     port: 53
     protocol: TCP
+
diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml
index 94cec7d1b3cc6c50c63a91fe268787bf7caf624e..e408ce04ebb514bb9860714ab07c133f7f75ccf2 100644
--- a/roles/kubernetes/master/handlers/main.yml
+++ b/roles/kubernetes/master/handlers/main.yml
@@ -39,7 +39,7 @@
 
 - name: Master | wait for the apiserver to be running
   uri:
-    url: http://localhost:8080/healthz
+    url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz
   register: result
   until: result.status == 200
   retries: 20
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
index 851cca0601ad6d36d99511ab030b3b3d431cd65b..bf4979596c34a77646d3cda6781dc02d001eec15 100644
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -5,7 +5,7 @@ metadata:
   namespace: {{system_namespace}}
   labels:
     k8s-app: kube-apiserver
-    kargo: v2
+    kubespray: v2
 spec:
   hostNetwork: true
 {% if kube_version | version_compare('v1.6', '>=')  %}
@@ -92,7 +92,7 @@ spec:
       httpGet:
         host: 127.0.0.1
         path: /healthz
-        port: 8080
+        port: {{ kube_apiserver_insecure_port }}
       initialDelaySeconds: 30
       timeoutSeconds: 10
     volumeMounts:
@@ -124,4 +124,4 @@ spec:
   - hostPath:
       path: /etc/ssl/certs/ca-bundle.crt
     name: rhel-ca-bundle
-{% endif %}
\ No newline at end of file
+{% endif %}
diff --git a/roles/kubernetes/node/meta/main.yml b/roles/kubernetes/node/meta/main.yml
index 2ef549c90cfba7b9b42ec2f722304c34b5d61993..12a7d73b7b8ed91c4434617e7dadb2e04d58b940 100644
--- a/roles/kubernetes/node/meta/main.yml
+++ b/roles/kubernetes/node/meta/main.yml
@@ -23,11 +23,14 @@ dependencies:
     when: deploy_netchecker
     tags: [download, netchecker]
   - role: download
-    file: "{{ downloads.kubednsmasq }}"
+    file: "{{ downloads.kubedns }}"
     tags: [download, dnsmasq]
   - role: download
-    file: "{{ downloads.kubedns }}"
+    file: "{{ downloads.dnsmasq_nanny }}"
     tags: [download, dnsmasq]
   - role: download
-    file: "{{ downloads.exechealthz }}"
+    file: "{{ downloads.dnsmasq_sidecar }}"
     tags: [download, dnsmasq]
+  - role: download
+    file: "{{ downloads.kubednsautoscaler }}"
+    tags: [download, dnsmasq]
\ No newline at end of file
diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml
index dd5cbf810414fd72fe27ed6cfa9e64ef2996fd82..686c0e9bc56eb77f54708faf974495e58075d0b2 100644
--- a/roles/kubernetes/preinstall/defaults/main.yml
+++ b/roles/kubernetes/preinstall/defaults/main.yml
@@ -32,7 +32,7 @@ openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME')  }}"
 
 # For the vsphere integration, kubelet will need credentials to access
 # vsphere apis
-# Documentation regarting these values can be found 
+# Documentation regarding these values can be found
 # https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
 vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
 vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
@@ -49,3 +49,6 @@ vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('')
 # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
 # for hostnet pods and infra needs
 resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
+
+# All inventory hostnames will be written into each /etc/hosts file.
+populate_inventory_to_hosts_file: true
diff --git a/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml b/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml
index 10e5bba68966c79c4fe58118820b7dc0c64369c2..91fb9c694402e59e0869a7d15b7674d37b79968d 100644
--- a/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml
+++ b/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml
@@ -1,9 +1,9 @@
 ---
 
-# These tasks will undo changes done by kargo in the past if needed (e.g. when upgrading from kargo 2.0.x
+# These tasks will undo changes done by kubespray in the past if needed (e.g. when upgrading from kubespray 2.0.x
 # or when changing resolvconf_mode)
 
-- name: Remove kargo specific config from dhclient config
+- name: Remove kubespray specific config from dhclient config
   blockinfile:
     dest: "{{dhclientconffile}}"
     state: absent
@@ -13,7 +13,7 @@
   when: dhclientconffile is defined
   notify: Preinstall | restart network
 
-- name: Remove kargo specific dhclient hook
+- name: Remove kubespray specific dhclient hook
   file:
     path: "{{ dhclienthookfile }}"
     state: absent
diff --git a/roles/kubernetes/preinstall/tasks/etchosts.yml b/roles/kubernetes/preinstall/tasks/etchosts.yml
index df330be088c344181cc3560b9fdf5382497146a7..69496b7c2bf4bce14c8c6888662e2d529620bc53 100644
--- a/roles/kubernetes/preinstall/tasks/etchosts.yml
+++ b/roles/kubernetes/preinstall/tasks/etchosts.yml
@@ -9,6 +9,7 @@
     create: yes
     backup: yes
     marker: "# Ansible inventory hosts {mark}"
+  when: populate_inventory_to_hosts_file
 
 - name: Hosts | populate kubernetes loadbalancer address into hosts file
   lineinfile:
diff --git a/roles/kargo-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
similarity index 100%
rename from roles/kargo-defaults/defaults/main.yaml
rename to roles/kubespray-defaults/defaults/main.yaml
diff --git a/roles/kubespray-defaults/tasks/main.yaml b/roles/kubespray-defaults/tasks/main.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5b2cb96a071093552b61efcbbb43a6a6bb6b388b
--- /dev/null
+++ b/roles/kubespray-defaults/tasks/main.yaml
@@ -0,0 +1,5 @@
+- name: Configure defaults
+  debug:
+    msg: "Check roles/kubespray-defaults/defaults/main.yml"
+  tags:
+    - always
diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml
index 598faf91b78d6a20c19de367ab04bfc5436beaba..d80ea02829a300440ec65ac643b458f81af719ac 100644
--- a/roles/network_plugin/calico/defaults/main.yml
+++ b/roles/network_plugin/calico/defaults/main.yml
@@ -4,6 +4,7 @@ nat_outgoing: true
 
 # Use IP-over-IP encapsulation across hosts
 ipip: true
+ipip_mode: always  # change to "cross-subnet" if you only want ipip encapsulation on traffic going across subnets
 
 # Set to true if you want your calico cni binaries to overwrite the
 # ones from hyperkube while leaving other cni plugins intact.
diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml
index 716086aed75d092dc6c77fecec1e3805a4c6e2b7..38d3ad5db69a6cbac39a37fdd10e7d5468c22d53 100644
--- a/roles/network_plugin/calico/tasks/main.yml
+++ b/roles/network_plugin/calico/tasks/main.yml
@@ -94,7 +94,7 @@
   shell: >
     echo '{
     "kind": "ipPool",
-    "spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}},
+    "spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}, "mode": "{{ ipip_mode }}"},
              "nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}},
     "apiVersion": "v1",
     "metadata": {"cidr": "{{ kube_pods_subnet }}"}
diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml
index 96984f92b9b67d70fe390905d3a9b3af3171d9a0..af3e66601bf54a5936f223828618e8639b559964 100644
--- a/roles/reset/tasks/main.yml
+++ b/roles/reset/tasks/main.yml
@@ -83,6 +83,15 @@
     - /etc/dhcp/dhclient.d/zdnsupdate.sh
     - /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
     - "{{ bin_dir }}/kubelet"
+    - "{{ bin_dir }}/kubernetes-scripts"
+    - /run/flannel
+    - /etc/flannel
+    - /run/kubernetes
+    - /usr/local/share/ca-certificates/kube-ca.crt
+    - /usr/local/share/ca-certificates/etcd-ca.crt
+    - /etc/ssl/certs/kube-ca.pem
+    - /etc/ssl/certs/etcd-ca.pem
+    - /var/log/pods/
   tags: ['files']
 
 
diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml
index c32f42491fcbadb770438931b6257514ba94fca5..e7efa0601db2df910c9911ff9c5e5c4d738b9f1d 100644
--- a/roles/upgrade/post-upgrade/tasks/main.yml
+++ b/roles/upgrade/post-upgrade/tasks/main.yml
@@ -3,4 +3,5 @@
 - name: Uncordon node
   command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}"
   delegate_to: "{{ groups['kube-master'][0] }}"
-  when: needs_cordoning|default(false)
+  when: (needs_cordoning|default(false)) and ( {%- if inventory_hostname in groups['kube-node'] -%} true {%- else -%} false {%- endif -%} )
+
diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml
index a2b34927f55191550d858d1ea8846da0a4d72ba4..decc9d05b20fd289e9d312762c9df26b60223f04 100644
--- a/roles/upgrade/pre-upgrade/tasks/main.yml
+++ b/roles/upgrade/pre-upgrade/tasks/main.yml
@@ -7,11 +7,11 @@
 
 - set_fact:
     needs_cordoning: >-
-      {% if " Ready" in kubectl_nodes.stdout %}
+      {% if " Ready" in kubectl_nodes.stdout -%}
       true
-      {% else %}
+      {%- else -%}
       false
-      {% endif %}
+      {%- endif %}
 
 - name: Cordon node
   command: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }}"
diff --git a/roles/vault/tasks/bootstrap/ca_trust.yml b/roles/vault/tasks/bootstrap/ca_trust.yml
index 63ab256d5710936517ea0b457fa95dcf2da41978..ae67f740556a44d2d131846ae7649ce63cdd90cb 100644
--- a/roles/vault/tasks/bootstrap/ca_trust.yml
+++ b/roles/vault/tasks/bootstrap/ca_trust.yml
@@ -1,6 +1,6 @@
 ---
 
-- name: bootstrap/ca_trust | pull CA from cert from groups.vault|first
+- name: "bootstrap/ca_trust | pull CA from cert from {{groups.vault|first}}"
   command: "cat {{ vault_cert_dir }}/ca.pem"
   register: vault_cert_file_cat
   delegate_to: "{{ groups['vault']|first }}"
diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml
index 0733e86a04ab23e7aaccc4153fc7a88788a315b2..cb3685bf5ea49f05bdb1b57830360ef211794df9 100644
--- a/roles/vault/tasks/shared/issue_cert.yml
+++ b/roles/vault/tasks/shared/issue_cert.yml
@@ -26,7 +26,7 @@
     mode: "{{ issue_cert_dir_mode | d('0755') }}"
     owner: "{{ issue_cert_file_owner | d('root') }}"
 
-- name: issue_cert | Generate the cert
+- name: "issue_cert | Generate the cert for {{ issue_cert_role }}"
   uri:
     url: "{{ issue_cert_url }}/v1/{{ issue_cert_mount|d('pki') }}/issue/{{ issue_cert_role }}"
     headers: "{{ issue_cert_headers }}"
@@ -40,7 +40,7 @@
   register: issue_cert_result
   when: inventory_hostname == issue_cert_hosts|first
 
-- name: issue_cert | Copy the cert to all hosts
+- name: "issue_cert | Copy {{ issue_cert_path }} cert to all hosts"
   copy:
     content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['certificate'] }}"
     dest: "{{ issue_cert_path }}"
@@ -48,7 +48,7 @@
     mode: "{{ issue_cert_file_mode | d('0644') }}"
     owner: "{{ issue_cert_file_owner | d('root') }}"
 
-- name: issue_cert | Copy the key to all hosts
+- name: "issue_cert | Copy key for {{ issue_cert_path }} to all hosts"
   copy:
     content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['private_key'] }}"
     dest: "{{ issue_cert_path.rsplit('.', 1)|first }}-key.{{ issue_cert_path.rsplit('.', 1)|last }}"
diff --git a/roles/vault/tasks/shared/sync.yml b/roles/vault/tasks/shared/sync.yml
index bbfedbc4c6e9d7b2fbcecc766d0a68188c95c9a7..102532f0c8525f562950e3bc8651ea97d6fb16d7 100644
--- a/roles/vault/tasks/shared/sync.yml
+++ b/roles/vault/tasks/shared/sync.yml
@@ -28,7 +28,7 @@
     state: directory
   when: inventory_hostname not in sync_file_srcs
 
-- name: "sync_file | Copy the file to hosts that don't have it"
+- name: "sync_file | Copy {{ sync_file_path }} to hosts that don't have it"
   copy:
     content: "{{ sync_file_contents }}"
     dest: "{{ sync_file_path }}"
@@ -37,7 +37,7 @@
     owner: "{{ sync_file_owner|d('root') }}"
   when: inventory_hostname not in sync_file_srcs
 
-- name: "sync_file | Copy the key file to hosts that don't have it"
+- name: "sync_file | Copy {{ sync_file_key_path }} to hosts that don't have it"
   copy:
     content: "{{ sync_file_key_contents }}"
     dest: "{{ sync_file_key_path }}"
diff --git a/roles/vault/tasks/shared/sync_file.yml b/roles/vault/tasks/shared/sync_file.yml
index ef53e9d9064bc7deadc5d0df0bee1e87a7e1d738..be5284154ff00e03839c06ae215be57df5d26ad1 100644
--- a/roles/vault/tasks/shared/sync_file.yml
+++ b/roles/vault/tasks/shared/sync_file.yml
@@ -19,12 +19,12 @@
   when: >-
         sync_file_is_cert|d() and (sync_file_key_path is not defined or sync_file_key_path == '')
 
-- name: "sync_file | Check if file exists"
+- name: "sync_file | Check if {{sync_file_path}} file exists"
   stat:
     path: "{{ sync_file_path }}"
   register: sync_file_stat
 
-- name: "sync_file | Check if key file exists"
+- name: "sync_file | Check if {{ sync_file_key_path }} key file exists"
   stat:
     path: "{{ sync_file_key_path }}"
   register: sync_file_key_stat
diff --git a/scale.yml b/scale.yml
index 02e79aa37c5230dfcbda82c8c32a2c6fb95b21a6..49445cabcdf7c3848968857e8f223d64a60a59c3 100644
--- a/scale.yml
+++ b/scale.yml
@@ -7,7 +7,7 @@
   vars:
     ansible_ssh_pipelining: false
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: bootstrap-os, tags: bootstrap-os}
 
 ##We still have to gather facts about our masters and etcd nodes
@@ -21,7 +21,7 @@
 - hosts: kube-node
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
     - { role: kubernetes/preinstall, tags: preinstall }
     - { role: docker, tags: docker }
diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml
index d1e7c011e82bb324b546640ab630f928eed2d2ab..1a82c50d7f32e0ce13dbc25fa95ca72c25792749 100644
--- a/tests/cloud_playbooks/create-gce.yml
+++ b/tests/cloud_playbooks/create-gce.yml
@@ -30,7 +30,7 @@
         credentials_file: "{{gce_credentials_file | default(omit)}}"
         project_id: "{{ gce_project_id }}"
         zone: "{{cloud_region}}"
-        metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}"}'
+        metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}", "startup-script": "{{startup_script}}"}'
         tags: "build-{{test_name}},{{kube_network_plugin}}"
       register: gce
 
@@ -52,5 +52,5 @@
       when: mode in ['scale', 'separate-scale', 'ha-scale']
 
     - name: Wait for SSH to come up
-      wait_for: host={{item.public_ip}} port=22 delay=10 timeout=180 state=started
+      wait_for: host={{item.public_ip}} port=22 delay=30 timeout=180 state=started
       with_items: "{{gce.instance_data}}"
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index 0b46138207d09b5b6c1625b50c6c90ed020d4832..1a66904ceba6f83bebab82eafd7e3b76dfe38d3f 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -2,7 +2,7 @@
 - hosts: localhost
   gather_facts: False
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
 
 - hosts: k8s-cluster:etcd:calico-rr
@@ -13,7 +13,7 @@
     # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
     ansible_ssh_pipelining: false
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: bootstrap-os, tags: bootstrap-os}
 
 - hosts: k8s-cluster:etcd:calico-rr
@@ -25,7 +25,7 @@
 - hosts: k8s-cluster:etcd:calico-rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
     - { role: kubernetes/preinstall, tags: preinstall }
     - { role: docker, tags: docker }
@@ -36,25 +36,25 @@
 - hosts: etcd:k8s-cluster:vault
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults, when: "cert_management == 'vault'" }
+    - { role: kubespray-defaults, when: "cert_management == 'vault'" }
     - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
 
 - hosts: etcd
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: etcd, tags: etcd, etcd_cluster_setup: true }
 
 - hosts: k8s-cluster
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: etcd, tags: etcd, etcd_cluster_setup: false }
 
 - hosts: etcd:k8s-cluster:vault
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults, when: "cert_management == 'vault'"}
+    - { role: kubespray-defaults, when: "cert_management == 'vault'"}
     - { role: vault, tags: vault, when: "cert_management == 'vault'"}
 
 #Handle upgrades to master components first to maintain backwards compat.
@@ -62,46 +62,47 @@
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: 1
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: upgrade/pre-upgrade, tags: pre-upgrade }
     - { role: kubernetes/node, tags: node }
     - { role: kubernetes/master, tags: master }
     - { role: network_plugin, tags: network }
+    - { role: upgrade/post-upgrade, tags: post-upgrade }
 
 #Finally handle worker upgrades, based on given batch size
 - hosts: kube-node:!kube-master
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   serial: "{{ serial | default('20%') }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: upgrade/pre-upgrade, tags: pre-upgrade }
     - { role: kubernetes/node, tags: node }
     - { role: network_plugin, tags: network }
     - { role: upgrade/post-upgrade, tags: post-upgrade }
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
 
 - hosts: kube-master
   any_errors_fatal: true
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: kubernetes-apps/network_plugin, tags: network }
     - { role: kubernetes-apps/policy_controller, tags: policy-controller }
 
 - hosts: calico-rr
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: network_plugin/calico/rr, tags: network }
 
 - hosts: k8s-cluster
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
     - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
 
 - hosts: kube-master[0]
   any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
   roles:
-    - { role: kargo-defaults}
+    - { role: kubespray-defaults}
     - { role: kubernetes-apps, tags: apps }