diff --git a/Vagrantfile b/Vagrantfile
index 8d3f2bbddf0296da3f7c861d22f1c8c3167f5c48..b769199b1836bea735d07d5d3dcf971c21eb9526 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -23,6 +23,7 @@ $etcd_instances = $num_instances
 $kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
 # All nodes are kube nodes
 $kube_node_instances = $num_instances
+$local_release_dir = "/vagrant/temp"
 
 host_vars = {}
 
@@ -97,7 +98,7 @@ Vagrant.configure("2") do |config|
         "ip": ip,
         "flannel_interface": ip,
         "flannel_backend_type": "host-gw",
-        "local_release_dir": "/vagrant/temp",
+        "local_release_dir" => $local_release_dir,
         "download_run_once": "False",
         # Override the default 'calico' with flannel.
         # inventory/group_vars/k8s-cluster.yml
diff --git a/cluster.yml b/cluster.yml
index f0c324174fd53e7fabf1efc4569a7c97e9f038f5..01b033b2f85c0df1d7da87457f4a7912757ed46d 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -39,17 +39,17 @@
     - { role: kargo-defaults, when: "cert_management == 'vault'" }
     - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
 
-- hosts: etcd:!k8s-cluster
+- hosts: etcd
   any_errors_fatal: true
   roles:
     - { role: kargo-defaults}
-    - { role: etcd, tags: etcd }
+    - { role: etcd, tags: etcd, etcd_cluster_setup: true }
 
 - hosts: k8s-cluster
   any_errors_fatal: true
   roles:
     - { role: kargo-defaults}
-    - { role: etcd, tags: etcd }
+    - { role: etcd, tags: etcd, etcd_cluster_setup: false }
 
 - hosts: etcd:k8s-cluster:vault
   any_errors_fatal: true
diff --git a/contrib/aws_iam/kubernetes-master-policy.json b/contrib/aws_iam/kubernetes-master-policy.json
new file mode 100644
index 0000000000000000000000000000000000000000..e5cbaea8039596326e496eeee2893ecf3fad7849
--- /dev/null
+++ b/contrib/aws_iam/kubernetes-master-policy.json
@@ -0,0 +1,27 @@
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Action": ["ec2:*"],
+      "Resource": ["*"]
+    },
+    {
+      "Effect": "Allow",
+      "Action": ["elasticloadbalancing:*"],
+      "Resource": ["*"]
+    },
+    {
+      "Effect": "Allow",
+      "Action": ["route53:*"],
+      "Resource": ["*"]
+    },
+    {
+      "Effect": "Allow",
+      "Action": "s3:*",
+      "Resource": [
+        "arn:aws:s3:::kubernetes-*"
+      ]
+    }
+  ]
+}
diff --git a/contrib/aws_iam/kubernetes-master-role.json b/contrib/aws_iam/kubernetes-master-role.json
new file mode 100644
index 0000000000000000000000000000000000000000..66d5de1d5ae1e186daa7a275a2265cd7538e948d
--- /dev/null
+++ b/contrib/aws_iam/kubernetes-master-role.json
@@ -0,0 +1,10 @@
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Principal": { "Service": "ec2.amazonaws.com"},
+      "Action": "sts:AssumeRole"
+    }
+  ]
+}
diff --git a/contrib/aws_iam/kubernetes-minion-policy.json b/contrib/aws_iam/kubernetes-minion-policy.json
new file mode 100644
index 0000000000000000000000000000000000000000..af81e98c824da2ddee6d63536bcfe26f851f13fd
--- /dev/null
+++ b/contrib/aws_iam/kubernetes-minion-policy.json
@@ -0,0 +1,45 @@
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Action": "s3:*",
+      "Resource": [
+        "arn:aws:s3:::kubernetes-*"
+      ]
+    },
+    {
+      "Effect": "Allow",
+      "Action": "ec2:Describe*",
+      "Resource": "*"
+    },
+    {
+      "Effect": "Allow",
+      "Action": "ec2:AttachVolume",
+      "Resource": "*"
+    },
+    {
+      "Effect": "Allow",
+      "Action": "ec2:DetachVolume",
+      "Resource": "*"
+    },
+    {
+      "Effect": "Allow",
+      "Action": ["route53:*"],
+      "Resource": ["*"]
+    },
+    {
+      "Effect": "Allow",
+      "Action": [
+        "ecr:GetAuthorizationToken",
+        "ecr:BatchCheckLayerAvailability",
+        "ecr:GetDownloadUrlForLayer",
+        "ecr:GetRepositoryPolicy",
+        "ecr:DescribeRepositories",
+        "ecr:ListImages",
+        "ecr:BatchGetImage"
+      ],
+      "Resource": "*"
+    }
+  ]
+}
diff --git a/contrib/aws_iam/kubernetes-minion-role.json b/contrib/aws_iam/kubernetes-minion-role.json
new file mode 100644
index 0000000000000000000000000000000000000000..66d5de1d5ae1e186daa7a275a2265cd7538e948d
--- /dev/null
+++ b/contrib/aws_iam/kubernetes-minion-role.json
@@ -0,0 +1,10 @@
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Principal": { "Service": "ec2.amazonaws.com"},
+      "Action": "sts:AssumeRole"
+    }
+  ]
+}
diff --git a/docs/atomic.md b/docs/atomic.md
new file mode 100644
index 0000000000000000000000000000000000000000..cb506a9f3d24b9244b698b980277cdb2d9f7c106
--- /dev/null
+++ b/docs/atomic.md
@@ -0,0 +1,22 @@
+Atomic host bootstrap
+=====================
+
+Atomic host testing has been done with the network plugin flannel. Change the inventory var `kube_network_plugin: flannel`.
+
+Note: Flannel is the only plugin that has currently been tested with atomic
+
+### Vagrant
+
+* For bootstrapping with Vagrant, use box centos/atomic-host 
+* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
+* Update `vm_memory = 2048` and `vm_cpus = 2`
+* Networking on vagrant hosts has to be brought up manually once they are booted.
+
+    ```
+    vagrant ssh
+    sudo /sbin/ifup enp0s8
+    ```
+
+* For users of vagrant-libvirt download qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
+
+Then you can proceed to [cluster deployment](#run-deployment)
\ No newline at end of file
diff --git a/docs/aws.md b/docs/aws.md
index 429e77a5496f91b62f969fc636f5c198cc318f83..b16b8d72508a5c0b28fce55901f93992a2abf1b3 100644
--- a/docs/aws.md
+++ b/docs/aws.md
@@ -3,7 +3,7 @@ AWS
 
 To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
 
-Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes/kubernetes/tree/master/cluster/aws/templates/iam). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
+Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
 
 The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
 
diff --git a/docs/vars.md b/docs/vars.md
index b763f6a34f82744d19569bed17e639de05221f13..966b3ffc831e77ec52f243efaaa14de9e4d12655 100644
--- a/docs/vars.md
+++ b/docs/vars.md
@@ -102,4 +102,3 @@ Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.
 
 Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
 passwords default to changeme. You can set this by changing ``kube_api_pwd``.
-
diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml
index ca46d28171c04b3f4a4fe73522d7dab4b30c27a4..a4f6fbda4474b028557ab2f61267dc47a8a2d1d6 100644
--- a/inventory/group_vars/all.yml
+++ b/inventory/group_vars/all.yml
@@ -47,7 +47,7 @@
 
 ## There are some changes specific to the cloud providers
 ## for instance we need to encapsulate packets with some network plugins
-## If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
+## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', or 'vsphere'
 ## When openstack is used make sure to source in the openstack credentials
 ## like you would do when using nova-client before starting the playbook.
 #cloud_provider:
diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml
index 7f135557776f7c0004737761136a6ab8c3ab458a..4adefb39448bc5cd0392298960cd1236d8292a04 100644
--- a/roles/bootstrap-os/tasks/main.yml
+++ b/roles/bootstrap-os/tasks/main.yml
@@ -8,4 +8,12 @@
 - include: bootstrap-centos.yml
   when: bootstrap_os == "centos"
 
-- include: setup-pipelining.yml
\ No newline at end of file
+- include: setup-pipelining.yml
+
+- name: check if atomic host
+  stat:
+    path: /run/ostree-booted
+  register: ostree
+
+- set_fact:
+    is_atomic: "{{ ostree.stat.exists }}"
\ No newline at end of file
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 3e7b342f2248709045375b6d852122e009c4f47a..cdfae82421bd96cbe2fb04854efc5262ba52c0c5 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -38,7 +38,7 @@
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   with_items: "{{ docker_repo_key_info.repo_keys }}"
-  when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
 
 - name: ensure docker repository is enabled
   action: "{{ docker_repo_info.pkg_repo }}"
@@ -46,13 +46,13 @@
     repo: "{{item}}"
     state: present
   with_items: "{{ docker_repo_info.repos }}"
-  when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (docker_repo_info.repos|length > 0)
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_repo_info.repos|length > 0)
 
 - name: Configure docker repository on RedHat/CentOS
   template:
     src: "rh_docker.repo.j2"
     dest: "/etc/yum.repos.d/docker.repo"
-  when: ansible_distribution in ["CentOS","RedHat"]
+  when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
 
 - name: ensure docker packages are installed
   action: "{{ docker_package_info.pkg_mgr }}"
@@ -66,7 +66,7 @@
   delay: "{{ retry_stagger | random + 3 }}"
   with_items: "{{ docker_package_info.pkgs }}"
   notify: restart docker
-  when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (docker_package_info.pkgs|length > 0)
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_package_info.pkgs|length > 0)
 
 - name: check minimum docker version for docker_dns mode. You need at least docker version >= 1.12 for resolvconf_mode=docker_dns
   command: "docker version -f '{{ '{{' }}.Client.Version{{ '}}' }}'"
diff --git a/roles/docker/tasks/systemd.yml b/roles/docker/tasks/systemd.yml
index 18710ac4983e280a5a16b655eab87afdc97a09bb..1275de5d73041c90eef0317d986773766bd616be 100644
--- a/roles/docker/tasks/systemd.yml
+++ b/roles/docker/tasks/systemd.yml
@@ -15,7 +15,14 @@
     src: docker.service.j2
     dest: /etc/systemd/system/docker.service
   register: docker_service_file
-  when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
+
+- name: Write docker.service systemd file for atomic
+  template:
+    src: docker_atomic.service.j2
+    dest: /etc/systemd/system/docker.service
+  notify: restart docker
+  when: is_atomic
 
 - name: Write docker options systemd drop-in
   template:
diff --git a/roles/docker/templates/docker-dns.conf.j2 b/roles/docker/templates/docker-dns.conf.j2
index 01dbd3b20b973782c46cf1b2e674921f86acfabb..d501a19c07e17854accc9b2312d7d455bd79a4b8 100644
--- a/roles/docker/templates/docker-dns.conf.j2
+++ b/roles/docker/templates/docker-dns.conf.j2
@@ -3,4 +3,4 @@ Environment="DOCKER_DNS_OPTIONS=\
     {% for d in docker_dns_servers %}--dns {{ d }} {% endfor %} \
     {% for d in docker_dns_search_domains %}--dns-search {{ d }} {% endfor %} \
     {% for o in docker_dns_options %}--dns-opt {{ o }} {% endfor %} \
-"
+"
\ No newline at end of file
diff --git a/roles/docker/templates/docker-options.conf.j2 b/roles/docker/templates/docker-options.conf.j2
index 50356a9f41f40bd678f6ee2091679977b805720f..01279589820d38cd893e5e8fa85c06b1499ca07c 100644
--- a/roles/docker/templates/docker-options.conf.j2
+++ b/roles/docker/templates/docker-options.conf.j2
@@ -1,2 +1,2 @@
 [Service]
-Environment="DOCKER_OPTS={% if docker_options is defined %}{{ docker_options }}{% endif %}"
+Environment="DOCKER_OPTS={% if docker_options is defined %}{{ docker_options }}{% endif %}"
\ No newline at end of file
diff --git a/roles/docker/templates/docker_atomic.service.j2 b/roles/docker/templates/docker_atomic.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..ba37bf4c338f00899746686b6d900245f80be27f
--- /dev/null
+++ b/roles/docker/templates/docker_atomic.service.j2
@@ -0,0 +1,38 @@
+[Unit]
+Description=Docker Application Container Engine
+Documentation=http://docs.docker.com
+After=network.target
+Wants=docker-storage-setup.service
+
+[Service]
+Type=notify
+NotifyAccess=all
+EnvironmentFile=-/etc/sysconfig/docker
+EnvironmentFile=-/etc/sysconfig/docker-storage
+EnvironmentFile=-/etc/sysconfig/docker-network
+Environment=GOTRACEBACK=crash
+Environment=DOCKER_HTTP_HOST_COMPAT=1
+Environment=PATH=/usr/libexec/docker:/usr/bin:/usr/sbin
+ExecReload=/bin/kill -s HUP $MAINPID
+Delegate=yes
+KillMode=process
+ExecStart=/usr/bin/dockerd-current \
+          --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current \
+          --default-runtime=docker-runc \
+          --exec-opt native.cgroupdriver=systemd \
+          --userland-proxy-path=/usr/libexec/docker/docker-proxy-current \
+          $DOCKER_OPTS \
+          $DOCKER_STORAGE_OPTIONS \
+          $DOCKER_NETWORK_OPTIONS \
+          $DOCKER_DNS_OPTIONS \
+          $ADD_REGISTRY \
+          $BLOCK_REGISTRY \
+          $INSECURE_REGISTRY
+LimitNOFILE=1048576
+LimitNPROC=1048576
+LimitCORE=infinity
+TimeoutStartSec=1min
+Restart=on-abnormal
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml
index 2282280208925c2693850a0e1157f735ee0fca4b..6326741b35898f748461a37159eaab74cac99f83 100644
--- a/roles/etcd/defaults/main.yml
+++ b/roles/etcd/defaults/main.yml
@@ -1,4 +1,7 @@
 ---
+# Set to false to only do certificate management
+etcd_cluster_setup: true
+
 etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"
 etcd_data_dir: "/var/lib/etcd"
 
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index bff76a129b0c0aba8caa819e966276a41b751004..9bd6f02a31b044013db6f2d79f215a826be8c83a 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -2,7 +2,7 @@
 dependencies:
   - role: adduser
     user: "{{ addusers.etcd }}"
-    when: not ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
+    when: not (ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] or is_atomic)
   - role: download
     file: "{{ downloads.etcd }}"
     tags: download
diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml
index bc14e255fa1d244f1be3bb86c099f05b3a1d9da9..9bb32f16288d8a5860b2a9a4c21164aea2fce740 100644
--- a/roles/etcd/tasks/check_certs.yml
+++ b/roles/etcd/tasks/check_certs.yml
@@ -1,18 +1,11 @@
 ---
 - name: "Check_certs | check if all certs have already been generated on first master"
-  stat:
-    path: "{{ etcd_cert_dir }}/{{ item }}"
-    get_md5: no
+  find:
+    paths: "{{ etcd_cert_dir }}"
+    patterns: "ca.pem,node*.pem"
   delegate_to: "{{groups['etcd'][0]}}"
   register: etcdcert_master
   run_once: true
-  with_items: >-
-       ['ca.pem',
-       {% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique %}
-       {% for host in all_etcd_hosts %}
-       'node-{{ host }}-key.pem'
-       {% if not loop.last %}{{','}}{% endif %}
-       {% endfor %}]
 
 - name: "Check_certs | Set default value for 'sync_certs', 'gen_certs' and 'etcd_secret_changed' to false"
   set_fact:
@@ -20,34 +13,56 @@
     gen_certs: false
     etcd_secret_changed: false
 
-- name: "Check_certs | Set 'gen_certs' to true"
-  set_fact:
-    gen_certs: true
-  when: "not {{item.stat.exists}}"
-  run_once: true
-  with_items: "{{etcdcert_master.results}}"
-
-- name: "Check certs | check if a cert already exists"
+- name: "Check certs | check if a cert already exists on node"
   stat:
     path: "{{ etcd_cert_dir }}/{{ item }}"
-  register: etcdcert
+  register: etcdcert_node
   with_items:
     - ca.pem
     - node-{{ inventory_hostname }}-key.pem
 
+
+- name: "Check_certs | Set 'gen_certs' to true"
+  set_fact:
+    gen_certs: true
+  when: "not '{{ item }}' in etcdcert_master.files|map(attribute='path') | list"
+  run_once: true
+  with_items: >-
+       ['{{etcd_cert_dir}}/ca.pem',
+       {% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort %}
+       {% for host in all_etcd_hosts %}
+       '{{etcd_cert_dir}}/node-{{ host }}-key.pem'
+       {% if not loop.last %}{{','}}{% endif %}
+       {% endfor %}]
+
+
+- name: "Check_certs | Set 'gen_node_certs' to true"
+  set_fact:
+    gen_node_certs: |-
+      {
+      {% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort -%}
+      {% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %}
+      {% for host in all_etcd_hosts -%}
+        {% set host_cert = "%s/node-%s-key.pem"|format(etcd_cert_dir, host) %}
+        {% if host_cert in existing_certs -%}
+        "{{ host }}": False,
+        {% else -%}
+        "{{ host }}": True,
+        {% endif -%}
+      {% endfor %}
+      }
+  run_once: true
+
 - name: "Check_certs | Set 'sync_certs' to true"
   set_fact:
     sync_certs: true
   when: >-
       {%- set certs = {'sync': False} -%}
-      {% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique %}
-      {% for host in all_etcd_hosts %}
-        {% if host == inventory_hostname %}
-          {% if (not etcdcert.results[0].stat.exists|default(False)) or
-              (not etcdcert.results[1].stat.exists|default(False)) or
-              (etcdcert.results[1].stat.checksum|default('') != etcdcert_master.results[loop.index].stat.checksum|default('')) -%}
-            {%- set _ = certs.update({'sync': True}) -%}
-          {% endif %}
-        {% endif %}
-      {%- endfor -%}
+      {% if gen_node_certs[inventory_hostname] or 
+        (not etcdcert_node.results[0].stat.exists|default(False)) or
+          (not etcdcert_node.results[1].stat.exists|default(False)) or
+            (etcdcert_node.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcdcert_node.results[1].stat.path)|first|map(attribute="checksum")|default('')) -%}
+              {%- set _ = certs.update({'sync': True}) -%}
+      {% endif %}
       {{ certs.sync }}
+
diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml
index bb6d55660b9da0de29d8922f01a3a67750fdd1dc..06d86257ccdae518ed96076ea314db2ec9345981 100644
--- a/roles/etcd/tasks/gen_certs_script.yml
+++ b/roles/etcd/tasks/gen_certs_script.yml
@@ -43,15 +43,15 @@
   when: gen_certs|default(false)
 
 - name: Gen_certs | run cert generation script
-  command: "{{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
+  command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
   environment:
     - MASTERS: "{% for m in groups['etcd'] %}
-                  {% if hostvars[m].sync_certs|default(false) %}
+                  {% if gen_node_certs[m] %}
                     {{ m }}
                   {% endif %}
                 {% endfor %}"
     - HOSTS: "{% for h in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
-                {% if hostvars[h].sync_certs|default(false) %}
+                {% if gen_node_certs[h] %}
                     {{ h }}
                 {% endif %}
               {% endfor %}"
@@ -107,14 +107,38 @@
         sync_certs|default(false) and inventory_hostname not in groups['etcd']
   notify: set etcd_secret_changed
 
-- name: Gen_certs | Copy certs on masters
-  shell: "base64 -d <<< '{{etcd_master_cert_data.stdout|quote}}' | tar xz -C {{ etcd_cert_dir }}"
-  args:
-    executable: /bin/bash
-  no_log: true
-  changed_when: false
+#NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k 
+#char limit when using shell command                                            
+                                                                                
+#FIXME(mattymo): Use tempfile module in ansible 2.3                             
+- name: Gen_certs | Prepare tempfile for unpacking certs                        
+  shell: mktemp /tmp/certsXXXXX.tar.gz                                          
+  register: cert_tempfile                                                       
+                                                                                
+- name: Gen_certs | Write master certs to tempfile                              
+  copy:                                                                         
+    content: "{{etcd_master_cert_data.stdout}}"                                      
+    dest: "{{cert_tempfile.stdout}}"                                            
+    owner: root                                                                 
+    mode: "0600"                                                                
   when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
-        inventory_hostname != groups['etcd'][0]
+        inventory_hostname != groups['etcd'][0]                          
+                                                                                
+- name: Gen_certs | Unpack certs on masters                                     
+  shell: "base64 -d < {{ cert_tempfile.stdout }} | tar xz -C {{ etcd_cert_dir }}"
+  no_log: true                                                                  
+  changed_when: false                                                           
+  check_mode: no                                                                
+  when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
+        inventory_hostname != groups['etcd'][0]                          
+  notify: set secret_changed                                                    
+                                                                                
+- name: Gen_certs | Cleanup tempfile                                            
+  file:                                                                         
+    path: "{{cert_tempfile.stdout}}"                                            
+    state: absent                                                               
+  when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
+        inventory_hostname != groups['etcd'][0]  
 
 - name: Gen_certs | Copy certs on nodes
   shell: "base64 -d <<< '{{etcd_node_cert_data.stdout|quote}}' | tar xz -C {{ etcd_cert_dir }}"
@@ -163,4 +187,3 @@
 - name: Gen_certs | update ca-certificates (RedHat)
   command: update-ca-trust extract
   when: etcd_ca_cert.changed and ansible_os_family == "RedHat"
-
diff --git a/roles/etcd/tasks/install_host.yml b/roles/etcd/tasks/install_host.yml
deleted file mode 100644
index 6f588a2f04efe1d5f05f0563d00b908e1fe3446f..0000000000000000000000000000000000000000
--- a/roles/etcd/tasks/install_host.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: Install | Copy etcd binary from downloaddir
-  command: rsync -piu "{{ etcd_bin_dir }}/etcd" "{{ bin_dir }}/etcd"
-  register: etcd_copy
-  changed_when: false
-
-- name: Install | Copy etcdctl binary from downloaddir
-  command: rsync -piu "{{ etcd_bin_dir }}/etcdctl" "{{ bin_dir }}/etcdctl"
-  changed_when: false
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 1af265736755e22b307b0b6b633ba1a589cb069f..02737ea31222530bfec2860b82cd7f06571594be 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -1,5 +1,6 @@
 ---
 - include: pre_upgrade.yml
+  when: etcd_cluster_setup
   tags: etcd-pre-upgrade
 
 - include: check_certs.yml
@@ -27,19 +28,18 @@
   tags: upgrade
 
 - include: set_cluster_health.yml
-  when: is_etcd_master
+  when: is_etcd_master and etcd_cluster_setup
 
 - include: configure.yml
-  when: is_etcd_master
+  when: is_etcd_master and etcd_cluster_setup
 
 - include: refresh_config.yml
-  when: is_etcd_master
+  when: is_etcd_master and etcd_cluster_setup
 
-- name: Restart etcd if binary or certs changed
+- name: Restart etcd if certs changed
   command: /bin/true
   notify: restart etcd
-  when: etcd_deployment_type == "host" and etcd_copy.stdout_lines and is_etcd_master
-    or etcd_secret_changed|default(false)
+  when: is_etcd_master and etcd_secret_changed|default(false)
 
 # reload-systemd
 - meta: flush_handlers
@@ -49,13 +49,13 @@
     name: etcd
     state: started
     enabled: yes
-  when: is_etcd_master
+  when: is_etcd_master and etcd_cluster_setup
 
 # After etcd cluster is assembled, make sure that
 # initial state of the cluster is in `existing`
 # state insted of `new`.
 - include: set_cluster_health.yml
-  when: is_etcd_master
+  when: is_etcd_master and etcd_cluster_setup
 
 - include: refresh_config.yml
-  when: is_etcd_master
+  when: is_etcd_master and etcd_cluster_setup
diff --git a/roles/kernel-upgrade/tasks/main.yml b/roles/kernel-upgrade/tasks/main.yml
index 999eb94aeb6abcb68f50c30329b81b650aa3e195..a16f0f37bbd42261300f52148f91b7bbf8909e5e 100644
--- a/roles/kernel-upgrade/tasks/main.yml
+++ b/roles/kernel-upgrade/tasks/main.yml
@@ -2,4 +2,4 @@
 
 - include: centos-7.yml
   when: ansible_distribution in ["CentOS","RedHat"] and
-        ansible_distribution_major_version >= 7
+        ansible_distribution_major_version >= 7 and not is_atomic
\ No newline at end of file
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
index 86c9e2d78e79ea86b626b27c056dd1e91cc555e3..4619db8d5da1b75553cd0cbb3fc47b10a6bff0d4 100644
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -52,7 +52,7 @@ spec:
 {% endif %}
     - --v={{ kube_log_level }}
     - --allow-privileged=true
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure"] %}
+{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
     - --cloud-provider={{ cloud_provider }}
     - --cloud-config={{ kube_config_dir }}/cloud_config
 {% elif cloud_provider is defined and cloud_provider == "aws" %}
diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
index 7bcd51cc4d0edfc26d74546560a1be8fda6eb909..6faf6dea512a451b357400f896020129503a19e0 100644
--- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
@@ -32,7 +32,7 @@ spec:
     - --node-monitor-period={{ kube_controller_node_monitor_period }}
     - --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }}
     - --v={{ kube_log_level }}
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure"] %}
+{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
     - --cloud-provider={{cloud_provider}}
     - --cloud-config={{ kube_config_dir }}/cloud_config
 {% elif cloud_provider is defined and cloud_provider == "aws" %}
@@ -54,7 +54,7 @@ spec:
     - mountPath: {{ kube_cert_dir }}
       name: ssl-certs-kubernetes
       readOnly: true
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure"] %}
+{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere" ] %}
     - mountPath: {{ kube_config_dir }}/cloud_config
       name: cloudconfig
       readOnly: true
@@ -63,7 +63,7 @@ spec:
   - hostPath:
       path: {{ kube_cert_dir }}
     name: ssl-certs-kubernetes
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure"] %}
+{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
   - hostPath:
       path: {{ kube_config_dir }}/cloud_config
     name: cloudconfig
diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2
index 8ec348a056fea6016dcd5940885e833c5c1ed186..10135c13fb7765e5e87ec47d736b80556449ee9b 100644
--- a/roles/kubernetes/node/templates/kubelet.j2
+++ b/roles/kubernetes/node/templates/kubelet.j2
@@ -42,7 +42,7 @@ KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kuben
 {% endif %}
 # Should this cluster be allowed to run privileged docker containers
 KUBE_ALLOW_PRIV="--allow-privileged=true"
-{% if cloud_provider is defined and cloud_provider in ["openstack", "azure"] %}
+{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
 KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
 {% elif cloud_provider is defined and cloud_provider == "aws" %}
 KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}"
diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
index a965ef792ab49c230001e3ca6b0cecd8401ac2d0..2dbcf74d1e0cac10e9703e33a0513858bb158285 100644
--- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
+++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
@@ -50,7 +50,11 @@ spec:
   volumes:
   - name: ssl-certs-host
     hostPath:
+{% if ansible_os_family == 'RedHat' %}
+      path: /etc/pki/tls
+{% else %}
       path: /usr/share/ca-certificates
+{% endif %}
   - name: "kubeconfig"
     hostPath:
       path: "{{kube_config_dir}}/node-kubeconfig.yaml"
diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml
index 59076c2042b9de80074e3837fa51d51df7ba653f..c775f748dbb591396ab09cc4e22eb81c754ebb8d 100644
--- a/roles/kubernetes/preinstall/defaults/main.yml
+++ b/roles/kubernetes/preinstall/defaults/main.yml
@@ -29,6 +29,22 @@ openstack_password: "{{ lookup('env','OS_PASSWORD')  }}"
 openstack_region: "{{ lookup('env','OS_REGION_NAME')  }}"
 openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true)  }}"
 
+# For the vsphere integration, kubelet will need credentials to access
+# vsphere apis
+# Documentation regarting these values can be found 
+# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
+vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
+vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
+vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
+vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
+vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
+vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
+vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
+vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
+vsphere_scsi_controller_type: pvscsi
+# vsphere_public_network is name of the network the VMs are joined to
+vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
+
 # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
 # for hostnet pods and infra needs
 resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
diff --git a/roles/kubernetes/preinstall/meta/main.yml b/roles/kubernetes/preinstall/meta/main.yml
index cf440f5e222c33d9670142d4309c243fa0610bac..203d968a7f7470439b6a5b2516aa96129a416120 100644
--- a/roles/kubernetes/preinstall/meta/main.yml
+++ b/roles/kubernetes/preinstall/meta/main.yml
@@ -3,3 +3,4 @@ dependencies:
   - role: adduser
     user: "{{ addusers.kube }}"
     tags: kubelet
+    when: not is_atomic
\ No newline at end of file
diff --git a/roles/kubernetes/preinstall/tasks/etchosts.yml b/roles/kubernetes/preinstall/tasks/etchosts.yml
index 08c941e6fb94392ddd1df68e5630e57ee051fcd8..181fbcb0f400b25e522fc02c2b1cd586e263b648 100644
--- a/roles/kubernetes/preinstall/tasks/etchosts.yml
+++ b/roles/kubernetes/preinstall/tasks/etchosts.yml
@@ -17,7 +17,7 @@
     line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name| default('lb-apiserver.kubernetes.local') }}"
     state: present
     backup: yes
-  when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined
+  when: loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and apiserver_loadbalancer_domain_name is defined
 
 - name: Hosts | localhost ipv4 in hosts file
   lineinfile:
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index 5b79c101d0ea9f5ccc1667502ec469147d2ed824..e79d92751d2eeec6fb84482936e88601088a5fb6 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -64,17 +64,13 @@
 
 - name: check cloud_provider value
   fail:
-    msg: "If set the 'cloud_provider' var must be set either to 'generic', 'gce', 'aws', 'azure' or 'openstack'"
-  when: cloud_provider is defined and cloud_provider not in ['generic', 'gce', 'aws', 'openstack', 'azure']
+    msg: "If set the 'cloud_provider' var must be set either to 'generic', 'gce', 'aws', 'azure', 'openstack' or 'vsphere'"
+  when: cloud_provider is defined and cloud_provider not in ['generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere']
   tags: [cloud-provider, facts]
 
-- include: openstack-credential-check.yml
-  when: cloud_provider is defined and cloud_provider == 'openstack'
-  tags: [cloud-provider, openstack, facts]
-
-- include: azure-credential-check.yml
-  when: cloud_provider is defined and cloud_provider == 'azure'
-  tags: [cloud-provider, azure, facts]
+- include: "{{ cloud_provider }}-credential-check.yml"
+  when: cloud_provider is defined and cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
+  tags: [cloud-provider, facts]
 
 - name: Create cni directories
   file:
@@ -91,7 +87,7 @@
   yum:
     update_cache: yes
     name: '*'
-  when: ansible_pkg_mgr == 'yum'
+  when: ansible_pkg_mgr == 'yum' and not is_atomic
   tags: bootstrap-os
 
 - name: Install latest version of python-apt for Debian distribs
@@ -112,7 +108,7 @@
 
 - name: Install epel-release on RedHat/CentOS
   shell: rpm -qa | grep epel-release || rpm -ivh {{ epel_rpm_download_url }}
-  when: ansible_distribution in ["CentOS","RedHat"]
+  when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
   changed_when: False
   check_mode: no
   tags: bootstrap-os
@@ -127,7 +123,7 @@
   retries: 4
   delay: "{{ retry_stagger | random + 3 }}"
   with_items: "{{required_pkgs | default([]) | union(common_required_pkgs|default([]))}}"
-  when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
+  when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
   tags: bootstrap-os
 
 # Todo : selinux configuration
@@ -179,23 +175,14 @@
     state: present
   tags: bootstrap-os
 
-- name: Write openstack cloud-config
-  template:
-    src: openstack-cloud-config.j2
-    dest: "{{ kube_config_dir }}/cloud_config"
-    group: "{{ kube_cert_group }}"
-    mode: 0640
-  when: inventory_hostname in groups['k8s-cluster'] and cloud_provider is defined and cloud_provider == "openstack"
-  tags: [cloud-provider, openstack]
-
-- name: Write azure cloud-config
+- name: Write cloud-config
   template:
-    src: azure-cloud-config.j2
+    src: "{{ cloud_provider }}-cloud-config.j2"
     dest: "{{ kube_config_dir }}/cloud_config"
     group: "{{ kube_cert_group }}"
     mode: 0640
-  when: inventory_hostname in groups['k8s-cluster'] and cloud_provider is defined and cloud_provider == "azure"
-  tags: [cloud-provider, azure]
+  when: inventory_hostname in groups['k8s-cluster'] and cloud_provider is defined and cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
+  tags: [cloud-provider]
 
 - include: etchosts.yml
   tags: [bootstrap-os, etchosts]
diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml
index 2481fcd7fb0e2877a1a257cd75ea9be50826a9ab..03057829d6d68be69ac47d86c421655774787443 100644
--- a/roles/kubernetes/preinstall/tasks/set_facts.yml
+++ b/roles/kubernetes/preinstall/tasks/set_facts.yml
@@ -22,7 +22,7 @@
     kube_apiserver_endpoint: |-
       {% if not is_kube_master and loadbalancer_apiserver_localhost|default(false) -%}
            https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
-      {%- elif is_kube_master and loadbalancer_apiserver is not defined -%}
+      {%- elif is_kube_master -%}
            http://127.0.0.1:{{ kube_apiserver_insecure_port }}
       {%- else -%}
       {%-   if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
@@ -83,5 +83,17 @@
 - set_fact:
     peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
 
+- name: check if atomic host
+  stat:
+    path: /run/ostree-booted
+  register: ostree
+
+- set_fact:
+    is_atomic: "{{ ostree.stat.exists }}"
+
+- set_fact:
+    kube_cert_group: "kube"
+  when: is_atomic
+
 - include: set_resolv_facts.yml
   tags: [bootstrap-os, resolvconf, facts]
diff --git a/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml b/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b91726d50ad722c27781b71aa0937ed0ae03d20f
--- /dev/null
+++ b/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml
@@ -0,0 +1,21 @@
+- name: check vsphere environment variables
+  fail:
+    msg: "{{ item.name }} is missing"
+  when: item.value is not defined or item.value == ''
+  with_items:
+    - name: vsphere_vcenter_ip
+      value: "{{ vsphere_vcenter_ip }}"
+    - name: vsphere_vcenter_port
+      value: "{{ vsphere_vcenter_port }}"
+    - name: vsphere_user
+      value: "{{ vsphere_user }}"
+    - name: vsphere_password
+      value: "{{ vsphere_password }}"
+    - name: vsphere_datacenter
+      value: "{{ vsphere_datacenter }}"
+    - name: vsphere_datastore
+      value: "{{ vsphere_datastore }}"
+    - name: vsphere_working_dir
+      value: "{{ vsphere_working_dir }}"
+    - name: vsphere_insecure
+      value: "{{ vsphere_insecure }}"
diff --git a/roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2 b/roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c68ac0f55c37aaa1619d17bac8c7ead2205b70aa
--- /dev/null
+++ b/roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2
@@ -0,0 +1,20 @@
+[Global]
+datacenter = {{ vsphere_datacenter }}
+datastore = {{ vsphere_datastore }}
+insecure-flag = {{ vsphere_insecure }}
+password = {{ vsphere_password }}
+port = {{ vsphere_vcenter_port }}
+server = {{ vsphere_vcenter_ip }}
+user = {{ vsphere_user }}
+working-dir = {{ vsphere_working_dir }}
+{% if vsphere_vm_uuid is defined %}
+vm-uuid = {{ vsphere_vm_uuid }}
+{% endif %}
+
+[Disk]
+scsicontrollertype = {{ vsphere_scsi_controller_type }}
+
+{% if vsphere_public_network is defined and vsphere_public_network != ""  %}
+[Network]
+public-network = {{ vsphere_public_network }}
+{% endif %}
diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml
index 0d5f238149ba795ce623328ba1fde29cd841f125..41cef85c14ffd449d64b4bc12ca4b2f79ceebd36 100644
--- a/roles/kubernetes/secrets/tasks/check-certs.yml
+++ b/roles/kubernetes/secrets/tasks/check-certs.yml
@@ -1,16 +1,11 @@
 ---
 - name: "Check_certs | check if the certs have already been generated on first master"
-  stat:
-    path: "{{ kube_cert_dir }}/{{ item }}"
+  find:
+    paths: "{{ kube_cert_dir }}"
+    patterns: "*.pem"
   delegate_to: "{{groups['kube-master'][0]}}"
   register: kubecert_master
   run_once: true
-  with_items: >-
-       ['ca.pem',
-       {% for host in groups['k8s-cluster'] %}
-       'node-{{ host }}-key.pem'
-       {% if not loop.last %}{{','}}{% endif %}
-       {% endfor %}]
 
 - name: "Check_certs | Set default value for 'sync_certs', 'gen_certs', and 'secret_changed'  to false"
   set_fact:
@@ -18,33 +13,53 @@
     gen_certs: false
     secret_changed: false
 
-- name: "Check_certs | Set 'gen_certs' to true"
-  set_fact:
-    gen_certs: true
-  when: "not {{ item.stat.exists }}"
-  run_once: true
-  with_items: "{{ kubecert_master.results }}"
-
-- name: "Check certs | check if a cert already exists"
+- name: "Check certs | check if a cert already exists on node"
   stat:
     path: "{{ kube_cert_dir }}/{{ item }}"
-  register: kubecert
+  register: kubecert_node
   with_items:
     - ca.pem
     - node-{{ inventory_hostname }}-key.pem
 
+- name: "Check_certs | Set 'gen_certs' to true"
+  set_fact:
+    gen_certs: true
+  when: "not item in kubecert_master.files|map(attribute='path') | list"
+  run_once: true
+  with_items: >-
+       ['{{ kube_cert_dir }}/ca.pem',
+       {% for host in groups['k8s-cluster'] %}
+       '{{ kube_cert_dir }}/node-{{ host }}-key.pem'
+       {% if not loop.last %}{{','}}{% endif %}
+       {% endfor %}]
+
+- name: "Check_certs | Set 'gen_node_certs' to true"
+  set_fact:
+    gen_node_certs: |-
+      {
+      {% set existing_certs = kubecert_master.files|map(attribute='path')|list|sort %}
+      {% for host in groups['k8s-cluster'] -%}
+        {% set host_cert = "%s/node-%s-key.pem"|format(kube_cert_dir, host) %}
+        {% if host_cert in existing_certs -%}
+        "{{ host }}": False,
+        {% else -%}
+        "{{ host }}": True,
+        {% endif -%}
+      {% endfor %}
+      }
+  run_once: true
+
+
 - name: "Check_certs | Set 'sync_certs' to true"
   set_fact:
     sync_certs: true
   when: >-
       {%- set certs = {'sync': False} -%}
-      {%- for host in groups['k8s-cluster'] %}
-        {% if host == inventory_hostname %}
-          {% if (not kubecert.results[0].stat.exists|default(False)) or
-              (not kubecert.results[1].stat.exists|default(False)) or
-              (kubecert.results[1].stat.checksum|default('') != kubecert_master.results[loop.index].stat.checksum|default('')) -%}
-            {%- set _ = certs.update({'sync': True}) -%}
-          {% endif %}
-        {% endif %}
-      {%- endfor -%}
+      {% if gen_node_certs[inventory_hostname] or    
+        (not kubecert_node.results[0].stat.exists|default(False)) or
+          (not kubecert_node.results[1].stat.exists|default(False)) or
+            (kubecert_node.results[1].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[1].stat.path)|first|map(attribute="checksum")|default('')) -%}
+              {%- set _ = certs.update({'sync': True}) -%}
+      {% endif %}
       {{ certs.sync }}
+
diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
index f75a45d1ae31810dc75fd2a348caf35e898a454a..4a918806562ea2b4187f212309010eb5effac879 100644
--- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml
+++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml
@@ -40,12 +40,12 @@
   command: "{{ kube_script_dir }}/make-ssl.sh -f {{ kube_config_dir }}/openssl.conf -d {{ kube_cert_dir }}"
   environment:
     - MASTERS: "{% for m in groups['kube-master'] %}
-                  {% if hostvars[m].sync_certs|default(true) %}
+                  {% if gen_node_certs[m]|default(false) %}
                     {{ m }}
                   {% endif %}
                 {% endfor %}"
     - HOSTS: "{% for h in groups['k8s-cluster'] %}
-                {% if hostvars[h].sync_certs|default(true) %}
+                {% if gen_node_certs[h]|default(true) %}
                     {{ h }}
                 {% endif %}
               {% endfor %}"
diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml
index ab2cb76b2c0a5dcfd83e752e1d24a84b2827618e..6da1471708277deb49e2e768a6cb6363eee06c67 100644
--- a/roles/kubernetes/secrets/tasks/main.yml
+++ b/roles/kubernetes/secrets/tasks/main.yml
@@ -71,8 +71,7 @@
   delegate_to: "{{groups['kube-master'][0]}}"
   when: gen_tokens|default(false)
 
-- include: gen_certs_script.yml
-  when: cert_management == "script"
+- include: "gen_certs_{{ cert_management }}.yml"
   tags: k8s-secrets
 
 - include: sync_kube_master_certs.yml
@@ -83,9 +82,5 @@
   when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster']
   tags: k8s-secrets
 
-- include: gen_certs_vault.yml
-  when: cert_management == "vault"
-  tags: k8s-secrets
-
 - include: gen_tokens.yml
   tags: k8s-secrets
diff --git a/roles/vault/tasks/main.yml b/roles/vault/tasks/main.yml
index f7414b74fb5865f934ec7f66d2e0c431f0dafff3..4aef875ceadd754c3bb07e0a70047ef4bd1910cc 100644
--- a/roles/vault/tasks/main.yml
+++ b/roles/vault/tasks/main.yml
@@ -12,8 +12,8 @@
 
 ## Bootstrap
 - include: bootstrap/main.yml
-  when: vault_bootstrap | d()
+  when: cert_management == 'vault' and vault_bootstrap | d()
 
 ## Cluster
 - include: cluster/main.yml
-  when: not vault_bootstrap | d()
+  when: cert_management == 'vault' and not vault_bootstrap | d()
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index eea3b69aadcbef7b4c804b21b3d60f4441f455c1..f4f48d543efa8603efedc314c1cc406d18da4d34 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -39,17 +39,17 @@
     - { role: kargo-defaults, when: "cert_management == 'vault'" }
     - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
 
-- hosts: etcd:!k8s-cluster
+- hosts: etcd
   any_errors_fatal: true
   roles:
     - { role: kargo-defaults}
-    - { role: etcd, tags: etcd }
+    - { role: etcd, tags: etcd, etcd_cluster_setup: true }
 
 - hosts: k8s-cluster
   any_errors_fatal: true
   roles:
     - { role: kargo-defaults}
-    - { role: etcd, tags: etcd }
+    - { role: etcd, tags: etcd, etcd_cluster_setup: false }
 
 - hosts: etcd:k8s-cluster:vault
   any_errors_fatal: true