diff --git a/docs/dns-stack.md b/docs/dns-stack.md
index 8d72d9e2c84720351164f7262d6d1f08cde00572..e6df11b7363e319ae3d031081dc9096d8f8a0a64 100644
--- a/docs/dns-stack.md
+++ b/docs/dns-stack.md
@@ -7,7 +7,7 @@ to serve as an authoritative DNS server for a given ``dns_domain`` and its
 ``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
 
 Note, additional search (sub)domains may be defined in the ``searchdomains``
-var. And additional recursive DNS resolvers in the `` upstream_dns_servers``,
+and ``ndots`` vars. And additional recursive DNS resolvers in the `` upstream_dns_servers``,
 ``nameservers`` vars. Intranet DNS resolvers should be specified in the first
 place, followed by external resolvers, for example:
 
@@ -21,17 +21,10 @@ or
 skip_dnsmasq: false
 upstream_dns_servers: [172.18.32.6, 172.18.32.7, 8.8.8.8, 8.8.8.4]
 ```
+The vars are explained below as well.
 
-Remember the limitations (the vars are explained below):
-
-* the ``searchdomains`` have a limitation of a 6 names and 256 chars
-  length. Due to default ``svc, default.svc`` subdomains, the actual
-  limits are a 4 names and 239 chars respectively.
-* the ``nameservers`` have a limitation of a 3 servers, although there
-  is a way to mitigate that with the ``upstream_dns_servers``,
-  see below. Anyway, the ``nameservers`` can take no more than a two
-  custom DNS servers because of one slot is reserved for a Kubernetes
-  cluster needs.
+DNS configuration details
+-------------------------
 
 Here is an approximate picture of how DNS things working and
 being configured by Kargo ansible playbooks:
@@ -73,7 +66,27 @@ Those may be specified either in ``nameservers`` or ``upstream_dns_servers``
 and will be merged together with the ``skydns_server`` IP into the hots'
 ``/etc/resolv.conf``.
 
-Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
-not answer with authority to arbitrary recursive resolvers. This task is left
-for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
-for details.
+Limitations
+-----------
+
+* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
+  not answer with authority to arbitrary recursive resolvers. This task is left
+  for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
+  for details.
+
+* There is
+  [no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
+  for the SkyDNS ``ndots`` param via an
+  [option for KubeDNS](https://github.com/kubernetes/kubernetes/blob/master/cmd/kube-dns/app/options/options.go)
+  add-on, while SkyDNS supports it though. Thus, DNS SRV records may not work
+  as expected as they require the ``ndots:7``.
+
+* the ``searchdomains`` have a limitation of a 6 names and 256 chars
+  length. Due to default ``svc, default.svc`` subdomains, the actual
+  limits are a 4 names and 239 chars respectively.
+
+* the ``nameservers`` have a limitation of a 3 servers, although there
+  is a way to mitigate that with the ``upstream_dns_servers``,
+  see below. Anyway, the ``nameservers`` can take no more than a two
+  custom DNS servers because of one slot is reserved for a Kubernetes
+  cluster needs.
diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml
index 90e0196c561277bb867cbb47dde71850c1dcc3c5..add9fdd2a548ec7b42c1b52aaeb41b4be881b955 100644
--- a/inventory/group_vars/all.yml
+++ b/inventory/group_vars/all.yml
@@ -33,6 +33,8 @@ kube_users:
 
 # Kubernetes cluster name, also will be used as DNS domain
 cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf
+ndots: 5
 
 # For some environments, each node has a pubilcally accessible
 # address and an address it should bind services to.  These are
diff --git a/roles/dnsmasq/handlers/main.yml b/roles/dnsmasq/handlers/main.yml
index 5e24890e717f570448c579de16664d5f8534ccda..4bdfd10f696ec8ad4c6e3d29cfe3ceb93c8bc92b 100644
--- a/roles/dnsmasq/handlers/main.yml
+++ b/roles/dnsmasq/handlers/main.yml
@@ -17,5 +17,18 @@
   when: ansible_os_family != "RedHat" and ansible_os_family != "CoreOS"
 
 - name: Dnsmasq | update resolvconf
+  command: /bin/true
+  notify:
+    - Dnsmasq | reload resolvconf
+    - Dnsmasq | reload kubelet
+
+- name: Dnsmasq | reload resolvconf
   command: /sbin/resolvconf -u
   ignore_errors: true
+
+- name: Dnsmasq | reload kubelet
+  service:
+    name: kubelet
+    state: restarted
+  when: "{{ inventory_hostname in groups['kube-master'] }}"
+  ignore_errors: true
diff --git a/roles/dnsmasq/tasks/resolvconf.yml b/roles/dnsmasq/tasks/resolvconf.yml
index db6f5ec026a0e3d8637ee2dd1e45128a8134b922..c018e977156218b9a94c30924cc133fd21386e2e 100644
--- a/roles/dnsmasq/tasks/resolvconf.yml
+++ b/roles/dnsmasq/tasks/resolvconf.yml
@@ -72,6 +72,7 @@
     backup: yes
     follow: yes
   with_items:
+    - ndots:{{ ndots }}
     - timeout:2
     - attempts:2
   notify: Dnsmasq | update resolvconf
diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml b/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml
index b771a3213d620df679b322c0f28ae80d3ef5333a..3d193d1dc82407440b7eb055e66ebabdc5ccb02f 100644
--- a/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml
+++ b/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml
@@ -21,7 +21,7 @@ spec:
     spec:
       containers:
       - name: kubedns
-        image: gcr.io/google_containers/kubedns-amd64:1.6
+        image: gcr.io/google_containers/kubedns-amd64:1.7
         resources:
           # TODO: Set memory limits when we've profiled the container for large
           # clusters, then set request = limit to keep this container in
diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml
index d1787be2d0bacd50ea015c353b817af77abcd94d..3d69cba7d585c955ee76a26c61d847aebe59e315 100644
--- a/roles/kubernetes/master/handlers/main.yml
+++ b/roles/kubernetes/master/handlers/main.yml
@@ -4,12 +4,14 @@
   notify:
     - Master | reload systemd
     - Master | reload kubelet
+    - Master | wait for master static pods
 
-- name: wait for master static pods
+- name: Master | wait for master static pods
   command: /bin/true
   notify:
-    - wait for kube-scheduler
-    - wait for kube-controller-manager
+    - Master | wait for the apiserver to be running
+    - Master | wait for kube-scheduler
+    - Master | wait for kube-controller-manager
 
 - name: Master | reload systemd
   command: systemctl daemon-reload
@@ -20,16 +22,23 @@
     name: kubelet
     state: restarted
 
-- name: wait for kube-scheduler
+- name: Master | wait for kube-scheduler
   uri: url=http://localhost:10251/healthz
   register: scheduler_result
   until: scheduler_result.status == 200
   retries: 15
   delay: 5
 
-- name: wait for kube-controller-manager
+- name: Master | wait for kube-controller-manager
   uri: url=http://localhost:10252/healthz
   register: controller_manager_result
   until: controller_manager_result.status == 200
   retries: 15
   delay: 5
+
+- name: Master | wait for the apiserver to be running
+  uri: url=http://localhost:8080/healthz
+  register: result
+  until: result.status == 200
+  retries: 10
+  delay: 6
diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml
index ff6abcb13156770698884b50a0cf6e4eb57763de..419be1f5a31789d7024bd449cc31de04a4883a2a 100644
--- a/roles/kubernetes/master/tasks/main.yml
+++ b/roles/kubernetes/master/tasks/main.yml
@@ -19,17 +19,9 @@
   template:
     src: manifests/kube-apiserver.manifest.j2
     dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
-  register: apiserver_manifest
-  notify: Master | restart kubelet
-
-- name: wait for the apiserver to be running
-  uri: url=http://localhost:8080/healthz
-  register: result
-  until: result.status == 200
-  retries: 10
-  delay: 6
-
+  notify: Master | wait for the apiserver to be running
 
+- meta: flush_handlers
 # Create kube-system namespace
 - name: copy 'kube-system' namespace manifest
   copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml
@@ -43,7 +35,6 @@
   failed_when: False
   run_once: yes
 
-
 - name: Create 'kube-system' namespace
   command: "{{ bin_dir }}/kubectl create -f /etc/kubernetes/kube-system-ns.yml"
   changed_when: False
@@ -54,12 +45,10 @@
   template:
     src: manifests/kube-controller-manager.manifest.j2
     dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
-  notify: wait for kube-controller-manager
+  notify: Master | wait for kube-controller-manager
 
 - name: Write kube-scheduler manifest
   template:
     src: manifests/kube-scheduler.manifest.j2
     dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest"
-  notify: wait for kube-scheduler
-
-- meta: flush_handlers
+  notify: Master | wait for kube-scheduler