diff --git a/README.md b/README.md
index c1fb187ad78c7d11c753ea73ed607683482e96c1..f7d4984a81464f4a732ec759a0f8a70c05b16783 100644
--- a/README.md
+++ b/README.md
@@ -1,2 +1,31 @@
-# kubernetes-ansible
-Setup a kubernetes cluster
+This playbook deploys a whole kubernetes cluster, configures network overlay and some addons.
+
+# Download necessary binaries
+Note: a variable 'local_release_dir' defines where the binaries will be downloaded.
+Ensure you've enough disk space
+
+# Kubernetes
+Kubernetes services are configured with the nodePort type.
+eg: each node opoens the same tcp port and forwards the traffic to the target pod wherever it is located.
+
+master :
+  - apiserver :
+  Currently the apiserver listen on both secure and unsecure ports
+  todo, secure everything. Calico especially
+  - scheduler :
+  - controller :
+  - proxy
+node :
+  - kubelet :
+  kubelet is configured to call calico whenever a pod is created/destroyed
+  - proxy
+  configures all the forwarding rules
+
+# Overlay network
+You can choose between 2 network overlays. Only one must be chosen.
+flannel: gre/vxlan (layer 2) networking
+calico: bgp (layer 3) networking.
+
+# Loadbalancer
+The machine where ansible is ran must be allowed to access to the master ip on port 8080 (kubernetes api).
+Indeed it gathered the services definition in order to know which NodePort is configured.
diff --git a/cluster.yml b/cluster.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4b2dbe141f845e8398537a1fddb5029b131f28c5
--- /dev/null
+++ b/cluster.yml
@@ -0,0 +1,21 @@
+---
+- hosts: downloader
+  sudo: no
+  roles:
+    - { role: download, tags: download }
+
+- hosts: k8s-cluster
+  roles:
+    - { role: etcd, tags: etcd }
+    - { role: docker, tags: docker }
+    - { role: overlay_network, tags: ['calico', 'flannel', 'network'] }
+    - { role: dnsmasq, tags: dnsmasq }
+
+- hosts: kube-master
+  roles:
+    - { role: kubernetes/master, tags: master }
+    - { role: addons, tags: addons }
+
+- hosts: kube-node
+  roles:
+    - { role: kubernetes/node, tags: node }
diff --git a/environments/dev/group_vars/all.yml b/environments/dev/group_vars/all.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5cf4c0b5481d2561a0621734caaaa3df98365ae7
--- /dev/null
+++ b/environments/dev/group_vars/all.yml
@@ -0,0 +1,6 @@
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
diff --git a/environments/dev/group_vars/k8s-cluster.yml b/environments/dev/group_vars/k8s-cluster.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f32ef5fe66acd48538f0a2b4a85194af78d72027
--- /dev/null
+++ b/environments/dev/group_vars/k8s-cluster.yml
@@ -0,0 +1,68 @@
+# Users to create for basic auth in Kubernetes API via HTTP
+kube_users:
+  kube:
+    pass: changeme
+    role: admin
+  root:
+    pass: changeme
+    role: admin
+
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+   #
+# set this variable to calico if needed. keep it empty if flannel is used
+overlay_network_plugin: calico
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+overlay_network_subnet: 10.233.64.0/18
+
+# internal network total size (optional). This is the prefix of the
+# entire overlay network.  So the entirety of 4.0.0.0/16 must be
+# unused in your environment.
+# overlay_network_prefix: 18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network.  With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+overlay_network_host_prefix: 24
+
+# Internal DNS configuration.
+# Kubernetes can create and mainatain its own DNS server to resolve service names
+# into appropriate IP addresses. It's highly advisable to run such DNS server,
+# as it greatly simplifies configuration of your applications - you can use
+# service names instead of magic environment variables.
+# You still must manually configure all your containers to use this DNS server,
+# Kubernetes won't do this for you (yet).
+
+# Upstream dns servers used by dnsmasq
+upstream_dns_servers:
+  - 8.8.8.8
+  - 4.4.8.8
+
+# Turn this varable to 'false' to disable whole DNS configuration.
+dns_setup: true
+dns_domain: "{{ cluster_name }}"
+
+# Ip address of the kubernetes dns service
+kube_dns_server: 10.233.0.10
+
+# Number of replicas of DNS instances started on kubernetes
+dns_replicas: 2
+
+# Set to 'false' to disable default Kubernetes UI setup
+enable_ui: true
+
+# Set to 'false' to disable default Elasticsearch + Kibana logging setup
+enable_logging: false
+
+# Set to "false' to disable default Monitoring (cAdvisor + heapster + influxdb + grafana)
+enable_monitoring: false
+ 
+# Set to 'false' to disable the docker garbage collection.
+# Every hour it removes images that are not used by containers and exited containers.
+enable_docker_gc: true
diff --git a/environments/dev/inventory b/environments/dev/inventory
new file mode 100644
index 0000000000000000000000000000000000000000..9955305dddb262add39223df4578aca73660ad4c
--- /dev/null
+++ b/environments/dev/inventory
@@ -0,0 +1,19 @@
+[downloader]
+192.168.0.1
+
+[kube-master]
+# NB : the br_addr must be in the {{ calico_pool }} subnet
+# it will assign a /24 subnet per node
+192.168.0.1 br_addr=10.233.64.1
+
+[kube-node]
+192.168.0.2 br_addr=10.233.65.1
+192.168.0.3 br_addr=10.233.66.1
+192.168.0.4 br_addr=10.233.67.1
+
+[etcd]
+192.168.0.1
+
+[k8s-cluster:children]
+kube-node
+kube-master
diff --git a/environments/production/group_vars/all.yml b/environments/production/group_vars/all.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ef234d25693a2f7c6d3a3dc2b1607364062410a4
--- /dev/null
+++ b/environments/production/group_vars/all.yml
@@ -0,0 +1,6 @@
+# Directory where the binaries will be installed
+# bin_dir: /usr/local/bin
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+# local_release_dir: "/tmp/releases"
diff --git a/environments/production/group_vars/k8s-cluster.yml b/environments/production/group_vars/k8s-cluster.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f1da759a9309bcc38fda7054e43213ab95f28190
--- /dev/null
+++ b/environments/production/group_vars/k8s-cluster.yml
@@ -0,0 +1,68 @@
+# Users to create for basic auth in Kubernetes API via HTTP
+# kube_users:
+#   kube:
+#     pass: changeme
+#     role: admin
+#   root:
+#     pass: changeme
+#     role: admin
+
+# Kubernetes cluster name, also will be used as DNS domain
+# cluster_name: cluster.local
+   #
+# set this variable to calico if needed. keep it empty if flannel is used
+# overlay_network_plugin: calico
+
+# Kubernetes internal network for services, unused block of space.
+# kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+# overlay_network_subnet: 10.233.64.0/18
+
+# internal network total size (optional). This is the prefix of the
+# entire overlay network.  So the entirety of 4.0.0.0/16 must be
+# unused in your environment.
+# overlay_network_prefix: 18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network.  With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+# overlay_network_host_prefix: 24
+
+# Internal DNS configuration.
+# Kubernetes can create and mainatain its own DNS server to resolve service names
+# into appropriate IP addresses. It's highly advisable to run such DNS server,
+# as it greatly simplifies configuration of your applications - you can use
+# service names instead of magic environment variables.
+# You still must manually configure all your containers to use this DNS server,
+# Kubernetes won't do this for you (yet).
+
+# Upstream dns servers used by dnsmasq
+# upstream_dns_servers:
+#   - 8.8.8.8
+#   - 4.4.8.8
+
+# Turn this varable to 'false' to disable whole DNS configuration.
+# dns_setup: true
+# dns_domain: "{{ cluster_name }}"
+
+# Ip address of the kubernetes dns service
+# kube_dns_server: 10.233.0.10
+
+# Number of replicas of DNS instances started on kubernetes
+# dns_replicas: 2
+
+# Set to 'false' to disable default Kubernetes UI setup
+# enable_ui: true
+
+# Set to 'false' to disable default Elasticsearch + Kibana logging setup
+# enable_logging: false
+
+# Set to "false' to disable default Monitoring (cAdvisor + heapster + influxdb + grafana)
+# enable_monitoring: false
+
+# Set to 'false' to disable the docker garbage collection.
+# Every hour it removes images that are not used by containers and exited containers.
+# enable_docker_gc: true
diff --git a/library/kube.py b/library/kube.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf4f2d05b8eb6e19ab8565e28aebbcb58e8613bb
--- /dev/null
+++ b/library/kube.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+DOCUMENTATION = """
+---
+module: kube
+short_description: Manage Kubernetes Cluster
+description:
+  - Create, replace, remove, and stop resources within a Kubernetes Cluster
+version_added: "2.0"
+options:
+  name:
+    required: false
+    default: null
+    description:
+      - The name associated with resource
+  filename:
+    required: false
+    default: null
+    description:
+      - The path and filename of the resource(s) definition file.
+  namespace:
+    required: false
+    default: null
+    description:
+      - The namespace associated with the resource(s)
+  resource:
+    required: false
+    default: null
+    description:
+      - The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
+  label:
+    required: false
+    default: null
+    description:
+      - The labels used to filter specific resources.
+  server:
+    required: false
+    default: null
+    description:
+      - The url for the API server that commands are executed against.
+  api_version:
+    required: false
+    choices: ['v1', 'v1beta3']
+    default: v1
+    description:
+      - The API version associated with cluster.
+  force:
+    required: false
+    default: false
+    description:
+      - A flag to indicate to force delete, replace, or stop.
+  all:
+    required: false
+    default: false
+    description:
+      - A flag to indicate delete all, stop all, or all namespaces when checking exists.
+  log_level:
+    required: false
+    default: 0
+    description:
+      - Indicates the level of verbosity of logging by kubectl.
+  state:
+    required: false
+    choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
+    default: present
+    description:
+      - present handles checking existence or creating if definition file provided,
+        absent handles deleting resource(s) based on other options,
+        latest handles creating ore updating based on existence,
+        reloaded handles updating resource(s) definition using definition file,
+        stopped handles stopping resource(s) based on other options.
+requirements:
+  - kubectl
+author: "Kenny Jones (@kenjones-cisco)"
+"""
+
+EXAMPLES = """
+- name: test nginx is present
+  kube: name=nginx resource=rc state=present
+
+- name: test nginx is stopped
+  kube: name=nginx resource=rc state=stopped
+
+- name: test nginx is absent
+  kube: name=nginx resource=rc state=absent
+
+- name: test nginx is present
+  kube: filename=/tmp/nginx.yml
+"""
+
+
+class KubeManager(object):
+
+    def __init__(self, module):
+
+        self.module = module
+
+        self.base_cmd = [module.get_bin_path('kubectl', True)]
+        self.api_version = module.params.get('api_version')
+
+        if self.api_version:
+            self.base_cmd.append('--api-version=' + self.api_version)
+
+        if module.params.get('server'):
+            self.base_cmd.append('--server=' + module.params.get('server'))
+
+        if module.params.get('log_level'):
+            self.base_cmd.append('--v=' + str(module.params.get('log_level')))
+
+        if module.params.get('namespace'):
+            self.base_cmd.append('--namespace=' + module.params.get('namespace'))
+
+        self.all = module.params.get('all')
+        self.force = module.params.get('force')
+        self.name = module.params.get('name')
+        self.filename = module.params.get('filename')
+        self.resource = module.params.get('resource')
+        self.label = module.params.get('label')
+
+    def _execute(self, cmd):
+        args = self.base_cmd + cmd
+        try:
+            rc, out, err = self.module.run_command(args)
+            if rc != 0:
+                self.module.fail_json(
+                    msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
+        except Exception as exc:
+            self.module.fail_json(
+                msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
+        return out.splitlines()
+
+    def _execute_nofail(self, cmd):
+        args = self.base_cmd + cmd
+        rc, out, err = self.module.run_command(args)
+        if rc != 0:
+            return None
+        return out.splitlines()
+
+    def create(self, check=True):
+        if check and self.exists():
+            return []
+
+        cmd = ['create']
+
+        if not self.filename:
+            self.module.fail_json(msg='filename required to create')
+
+        cmd.append('--filename=' + self.filename)
+
+        return self._execute(cmd)
+
+    def replace(self):
+
+        if not self.force and not self.exists():
+            return []
+
+        cmd = ['replace']
+        if self.api_version != 'v1':
+            cmd = ['update']
+
+        if self.force:
+            cmd.append('--force')
+
+        if not self.filename:
+            self.module.fail_json(msg='filename required to reload')
+
+        cmd.append('--filename=' + self.filename)
+
+        return self._execute(cmd)
+
+    def delete(self):
+
+        if not self.force and not self.exists():
+            return []
+
+        cmd = ['delete']
+
+        if self.filename:
+            cmd.append('--filename=' + self.filename)
+        else:
+            if not self.resource:
+                self.module.fail_json(msg='resource required to delete without filename')
+
+            cmd.append(self.resource)
+
+            if self.name:
+                cmd.append(self.name)
+
+            if self.label:
+                cmd.append('--selector=' + self.label)
+
+            if self.all:
+                cmd.append('--all')
+
+            if self.force:
+                cmd.append('--ignore-not-found')
+
+        return self._execute(cmd)
+
+    def exists(self):
+        cmd = ['get']
+
+        if not self.resource:
+            return False
+
+        cmd.append(self.resource)
+
+        if self.name:
+            cmd.append(self.name)
+
+        cmd.append('--no-headers')
+
+        if self.label:
+            cmd.append('--selector=' + self.label)
+
+        if self.all:
+            cmd.append('--all-namespaces')
+
+        result = self._execute_nofail(cmd)
+        if not result:
+            return False
+        return True
+
+    def stop(self):
+
+        if not self.force and not self.exists():
+            return []
+
+        cmd = ['stop']
+
+        if self.filename:
+            cmd.append('--filename=' + self.filename)
+        else:
+            if not self.resource:
+                self.module.fail_json(msg='resource required to stop without filename')
+
+            cmd.append(self.resource)
+
+            if self.name:
+                cmd.append(self.name)
+
+            if self.label:
+                cmd.append('--selector=' + self.label)
+
+            if self.all:
+                cmd.append('--all')
+
+            if self.force:
+                cmd.append('--ignore-not-found')
+
+        return self._execute(cmd)
+
+
+def main():
+
+    module = AnsibleModule(
+        argument_spec=dict(
+            name=dict(),
+            filename=dict(),
+            namespace=dict(),
+            resource=dict(),
+            label=dict(),
+            server=dict(),
+            api_version=dict(default='v1', choices=['v1', 'v1beta3']),
+            force=dict(default=False, type='bool'),
+            all=dict(default=False, type='bool'),
+            log_level=dict(default=0, type='int'),
+            state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
+            )
+        )
+
+    changed = False
+
+    manager = KubeManager(module)
+    state = module.params.get('state')
+
+    if state == 'present':
+        result = manager.create()
+
+    elif state == 'absent':
+        result = manager.delete()
+
+    elif state == 'reloaded':
+        result = manager.replace()
+
+    elif state == 'stopped':
+        result = manager.stop()
+
+    elif state == 'latest':
+        if manager.exists():
+            manager.force = True
+            result = manager.replace()
+        else:
+            result = manager.create(check=False)
+
+    else:
+        module.fail_json(msg='Unrecognized state %s.' % state)
+
+    if result:
+        changed = True
+    module.exit_json(changed=changed,
+                     msg='success: %s' % (' '.join(result))
+                     )
+
+
+from ansible.module_utils.basic import *  # noqa
+if __name__ == '__main__':
+    main()
diff --git a/roles/addons/defaults/main.yml b/roles/addons/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e61f2795743e6922cf547d7daa8ec60176b20800
--- /dev/null
+++ b/roles/addons/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for addons
diff --git a/roles/addons/files/es-rc.yaml b/roles/addons/files/es-rc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6631153a56ec996a78e3eae5a6d775e00f63298d
--- /dev/null
+++ b/roles/addons/files/es-rc.yaml
@@ -0,0 +1,40 @@
+apiVersion: v1
+kind: ReplicationController
+metadata:
+  name: elasticsearch-logging-v1
+  namespace: kube-system
+  labels:
+    k8s-app: elasticsearch-logging
+    version: v1
+    kubernetes.io/cluster-service: "true"
+spec:
+  replicas: 2
+  selector:
+    k8s-app: elasticsearch-logging
+    version: v1
+  template:
+    metadata:
+      labels:
+        k8s-app: elasticsearch-logging
+        version: v1
+        kubernetes.io/cluster-service: "true"
+    spec:
+      containers:
+      - image: gcr.io/google_containers/elasticsearch:1.7
+        name: elasticsearch-logging         
+        resources:
+          limits:
+            cpu: 100m
+        ports:
+        - containerPort: 9200
+          name: db
+          protocol: TCP
+        - containerPort: 9300
+          name: transport
+          protocol: TCP
+        volumeMounts:
+        - name: es-persistent-storage
+          mountPath: /data
+      volumes:
+      - name: es-persistent-storage
+        emptyDir: {}
diff --git a/roles/addons/files/es-svc.yaml b/roles/addons/files/es-svc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..abf1fd3f68487b3db685b564e85191f6c5fb5e0e
--- /dev/null
+++ b/roles/addons/files/es-svc.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: elasticsearch-logging
+  namespace: kube-system
+  labels:
+    k8s-app: elasticsearch-logging
+    kubernetes.io/cluster-service: "true"
+    kubernetes.io/name: "Elasticsearch"
+spec:
+  ports:
+  - port: 9200
+    protocol: TCP
+    targetPort: db
+  selector:
+    k8s-app: elasticsearch-logging
diff --git a/roles/addons/files/grafana-service.yaml b/roles/addons/files/grafana-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..41b6b8804109c230355f27254dfb6e55ac56002a
--- /dev/null
+++ b/roles/addons/files/grafana-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: monitoring-grafana
+  namespace: kube-system
+  labels: 
+    kubernetes.io/cluster-service: "true"
+    kubernetes.io/name: "Grafana"
+spec: 
+  type: NodePort
+  ports: 
+    - port: 80
+      targetPort: 8080
+  selector: 
+    k8s-app: influxGrafana
+
diff --git a/roles/addons/files/heapster-controller.yaml b/roles/addons/files/heapster-controller.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fbc3badfdffc6070a3c7c00b5962784af18cc92b
--- /dev/null
+++ b/roles/addons/files/heapster-controller.yaml
@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: ReplicationController
+metadata:
+  name: monitoring-heapster-v8
+  namespace: kube-system
+  labels:
+    k8s-app: heapster
+    version: v8
+    kubernetes.io/cluster-service: "true"
+spec:
+  replicas: 1
+  selector:
+    k8s-app: heapster
+    version: v8
+  template:
+    metadata:
+      labels:
+        k8s-app: heapster
+        version: v8
+        kubernetes.io/cluster-service: "true"
+    spec:
+      containers:
+        - image: gcr.io/google_containers/heapster:v0.17.0
+          name: heapster
+          resources:
+            limits:
+              cpu: 100m
+              memory: 300Mi
+          command:
+            - /heapster
+            - --source=kubernetes:''
+            - --sink=influxdb:http://monitoring-influxdb:8086
diff --git a/roles/addons/files/heapster-service.yaml b/roles/addons/files/heapster-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5ba0cadc4df55fa8de1839d558f9117864ceefd5
--- /dev/null
+++ b/roles/addons/files/heapster-service.yaml
@@ -0,0 +1,15 @@
+kind: Service
+apiVersion: v1
+metadata:
+  name: monitoring-heapster
+  namespace: kube-system
+  labels:
+    kubernetes.io/cluster-service: "true"
+    kubernetes.io/name: "Heapster"
+spec: 
+  type: NodePort
+  ports: 
+    - port: 80
+      targetPort: 8082
+  selector: 
+    k8s-app: heapster
diff --git a/roles/addons/files/influxdb-grafana-controller.yaml b/roles/addons/files/influxdb-grafana-controller.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a2763063a1ccd6836e1009e08371b9e983bec8e
--- /dev/null
+++ b/roles/addons/files/influxdb-grafana-controller.yaml
@@ -0,0 +1,53 @@
+apiVersion: v1
+kind: ReplicationController
+metadata:
+  name: monitoring-influx-grafana-v1
+  namespace: kube-system
+  labels: 
+    k8s-app: influxGrafana
+    version: v1
+    kubernetes.io/cluster-service: "true"
+spec: 
+  replicas: 1
+  selector: 
+    k8s-app: influxGrafana
+    version: v1
+  template: 
+    metadata: 
+      labels: 
+        k8s-app: influxGrafana
+        version: v1
+        kubernetes.io/cluster-service: "true"
+    spec: 
+      containers: 
+        - image: gcr.io/google_containers/heapster_influxdb:v0.3
+          name: influxdb
+          resources:
+            limits:
+              cpu: 100m
+              memory: 200Mi
+          ports: 
+            - containerPort: 8083
+              hostPort: 8083
+            - containerPort: 8086
+              hostPort: 8086
+          volumeMounts:
+          - name: influxdb-persistent-storage
+            mountPath: /data
+        - image: gcr.io/google_containers/heapster_grafana:v0.7
+          name: grafana
+          resources:
+            limits:
+              cpu: 100m
+              memory: 100Mi
+          env: 
+            - name: INFLUXDB_EXTERNAL_URL
+              value: /api/v1/proxy/namespaces/kube-system/services/monitoring-influxdb:api/db/
+            - name: INFLUXDB_HOST
+              value: monitoring-influxdb
+            - name: INFLUXDB_PORT
+              value: "8086"
+      volumes:
+      - name: influxdb-persistent-storage
+        emptyDir: {}
+
diff --git a/roles/addons/files/influxdb-service.yaml b/roles/addons/files/influxdb-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..066e052476e9ad1103630d1cf02bfba4804255c2
--- /dev/null
+++ b/roles/addons/files/influxdb-service.yaml
@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: monitoring-influxdb
+  namespace: kube-system
+  labels: 
+    kubernetes.io/cluster-service: "true"
+    kubernetes.io/name: "InfluxDB"
+spec: 
+  ports: 
+    - name: http
+      port: 8083
+      targetPort: 8083
+    - name: api
+      port: 8086
+      targetPort: 8086
+  selector: 
+    k8s-app: influxGrafana
+
diff --git a/roles/addons/files/kibana-rc.yaml b/roles/addons/files/kibana-rc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..893608aef6b458eb1e4af5cee7d37b6260f4b4bc
--- /dev/null
+++ b/roles/addons/files/kibana-rc.yaml
@@ -0,0 +1,34 @@
+apiVersion: v1
+kind: ReplicationController
+metadata:
+  name: kibana-logging-v1
+  namespace: kube-system
+  labels:
+    k8s-app: kibana-logging
+    version: v1
+    kubernetes.io/cluster-service: "true"
+spec:
+  replicas: 1
+  selector:
+    k8s-app: kibana-logging
+    version: v1
+  template:
+    metadata:
+      labels:
+        k8s-app: kibana-logging
+        version: v1
+        kubernetes.io/cluster-service: "true"
+    spec:
+      containers:
+      - name: kibana-logging
+        image: gcr.io/google_containers/kibana:1.3
+        resources:
+          limits:
+            cpu: 100m
+        env:
+          - name: "ELASTICSEARCH_URL"
+            value: "http://elasticsearch-logging:9200"
+        ports:
+        - containerPort: 5601
+          name: ui
+          protocol: TCP
diff --git a/roles/addons/files/kibana-svc.yaml b/roles/addons/files/kibana-svc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..20c4287d75461942472300b94c22f73934adf9e0
--- /dev/null
+++ b/roles/addons/files/kibana-svc.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: kibana-logging
+  namespace: kube-system
+  labels:
+    k8s-app: kibana-logging
+    kubernetes.io/cluster-service: "true"
+    kubernetes.io/name: "Kibana"
+spec:
+  type: NodePort
+  ports:
+  - port: 5601
+    protocol: TCP
+    targetPort: ui
+  selector:
+    k8s-app: kibana-logging
diff --git a/roles/addons/files/kube-system.yaml b/roles/addons/files/kube-system.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..986f4b482217e2147911f8a323236788e810acaf
--- /dev/null
+++ b/roles/addons/files/kube-system.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: kube-system
diff --git a/roles/addons/files/kube-ui-rc.yaml b/roles/addons/files/kube-ui-rc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e853a2177c04bb79fb41e2d26aaf2ef289e39393
--- /dev/null
+++ b/roles/addons/files/kube-ui-rc.yaml
@@ -0,0 +1,36 @@
+apiVersion: v1
+kind: ReplicationController
+metadata:
+  name: kube-ui-v1
+  namespace: kube-system
+  labels:
+    k8s-app: kube-ui
+    version: v1
+    kubernetes.io/cluster-service: "true"
+spec:
+  replicas: 1
+  selector:
+    k8s-app: kube-ui
+    version: v1
+  template:
+    metadata:
+      labels:
+        k8s-app: kube-ui
+        version: v1
+        kubernetes.io/cluster-service: "true"
+    spec:
+      containers:
+      - name: kube-ui
+        image: gcr.io/google_containers/kube-ui:v1.1
+        resources:
+          limits:
+            cpu: 100m
+            memory: 50Mi
+        ports:
+        - containerPort: 8080
+        livenessProbe:
+          httpGet:
+            path: /
+            port: 8080
+          initialDelaySeconds: 30
+          timeoutSeconds: 5
diff --git a/roles/addons/files/kube-ui-svc.yaml b/roles/addons/files/kube-ui-svc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..aee0c97b8b303ebaf9150c369be0b52a11fd7af2
--- /dev/null
+++ b/roles/addons/files/kube-ui-svc.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: kube-ui
+  namespace: kube-system
+  labels:
+    k8s-app: kube-ui
+    kubernetes.io/cluster-service: "true"
+    kubernetes.io/name: "KubeUI"
+spec:
+  type: NodePort
+  selector:
+    k8s-app: kube-ui
+  ports:
+  - port: 80
+    targetPort: 8080
diff --git a/roles/addons/handlers/main.yml b/roles/addons/handlers/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..706ac0f729f912dd7228bd62a5473504f3bf00c4
--- /dev/null
+++ b/roles/addons/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for addons
diff --git a/roles/addons/meta/main.yml b/roles/addons/meta/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5e306c8a4eb65ba290faa66fcd66622bcb424db9
--- /dev/null
+++ b/roles/addons/meta/main.yml
@@ -0,0 +1,4 @@
+---
+dependencies:
+  - { role: kubernetes/master }
+  - { role: kubernetes/common }
diff --git a/roles/addons/tasks/kube-ui.yml b/roles/addons/tasks/kube-ui.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0f769dc6f082b9a07f8e310e7196d1c885dbd973
--- /dev/null
+++ b/roles/addons/tasks/kube-ui.yml
@@ -0,0 +1,44 @@
+---
+- name: Kube-UI | Write pod file
+  copy:
+    src: kube-ui-rc.yaml
+    dest: "{{ kube_manifest_dir }}/kube-ui-rc.yaml"
+  register: kube_ui_rc_def
+  when: enable_ui
+  tags:
+    - addons
+    - kube-ui
+
+- name: Kube-UI | Write service file
+  copy:
+    src: kube-ui-svc.yaml
+    dest: "{{ kube_manifest_dir }}/kube-ui-svc.yaml"
+  register: kube_ui_svc_def
+  when: enable_ui
+  tags:
+    - addons
+    - kube-ui
+
+- name: Kube-UI | Create or update replication controller
+  kube:
+    namespace: kube-system
+    resource: rc
+    name: kube-ui-v1
+    filename: "{{ kube_manifest_dir }}/kube-ui-rc.yaml"
+    state: "{{ kube_ui_rc_def.changed | ternary('latest','present') }}"
+  when: enable_ui
+  tags:
+    - addons
+    - kube-ui
+
+- name: Kube-UI | Create or update service
+  kube:
+    namespace: kube-system
+    resource: svc
+    name: kube-ui
+    filename: "{{ kube_manifest_dir }}/kube-ui-svc.yaml"
+    state: "{{ kube_ui_svc_def.changed | ternary('latest','present') }}"
+  when: enable_ui
+  tags:
+    - addons
+    - kube-ui
diff --git a/roles/addons/tasks/logging.yml b/roles/addons/tasks/logging.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b0ecc67b03908c32533205c29d0c3b0b5add46de
--- /dev/null
+++ b/roles/addons/tasks/logging.yml
@@ -0,0 +1,88 @@
+---
+- name: Logging | Kibana | Write pod file
+  copy:
+    src: kibana-rc.yaml
+    dest: "{{ kube_manifest_dir }}/kibana-rc.yaml"
+  register: kibana_rc_def
+  when: enable_logging
+  tags:
+    - addons
+    - logging
+
+- name: Logging | Kibana | Write service file
+  copy:
+    src: kibana-svc.yaml
+    dest: "{{ kube_manifest_dir }}/kibana-svc.yaml"
+  register: kibana_svc_def
+  when: enable_logging
+  tags:
+    - addons
+    - logging
+
+- name: Logging | ES | Write pod file
+  copy:
+    src: es-rc.yaml
+    dest: "{{ kube_manifest_dir }}/es-rc.yaml"
+  register: es_rc_def
+  when: enable_logging
+  tags:
+    - addons
+    - logging
+
+- name: Logging | ES | Write service file
+  copy:
+    src: es-svc.yaml
+    dest: "{{ kube_manifest_dir }}/es-svc.yaml"
+  register: es_svc_def
+  when: enable_logging
+  tags:
+    - addons
+    - logging
+
+- name: Logging | ES | Create/update replication controller
+  kube:
+    namespace: kube-system
+    resource: rc
+    name: elasticsearch-logging-v1
+    filename: "{{ kube_manifest_dir }}/es-rc.yaml"
+    state: "{{ es_rc_def.changed | ternary('latest','present') }}"
+  when: enable_logging
+  tags:
+    - addons
+    - logging
+
+- name: Logging | ES | Create/update service
+  kube:
+    namespace: kube-system
+    resource: svc
+    name: elasticsearch-logging
+    filename: "{{ kube_manifest_dir }}/es-svc.yaml"
+    state: "{{ es_svc_def.changed | ternary('latest','present') }}"
+  when: enable_logging
+  tags:
+    - addons
+    - logging
+
+- name: Logging | Kibana | Create/update replication controller
+  kube:
+    namespace: kube-system
+    resource: rc
+    name: kibana-logging-v1
+    filename: "{{ kube_manifest_dir }}/kibana-rc.yaml"
+    state: "{{ kibana_rc_def.changed | ternary('latest','present') }}"
+  when: enable_logging
+  tags:
+    - addons
+    - logging
+
+- name: Logging | Kibana | Create/update service
+  kube:
+    namespace: kube-system
+    resource: svc
+    name: kibana-logging
+    filename: "{{ kube_manifest_dir }}/kibana-svc.yaml"
+    state: "{{ kibana_svc_def.changed | ternary('latest','present') }}"
+  when: enable_logging
+  tags:
+    - addons
+    - logging
diff --git a/roles/addons/tasks/main.yml b/roles/addons/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0e20952433cef48c5ec9a181170948dcd80c7f80
--- /dev/null
+++ b/roles/addons/tasks/main.yml
@@ -0,0 +1,45 @@
+---
+- name: create manifests directory
+  file: path={{ kube_manifest_dir }} state=directory
+
+- name: Write kube-system namespace manifest
+  copy:
+    src=kube-system.yaml
+    dest={{ kube_manifest_dir }}/kube-system.yaml
+
+- name: Create kube-system namespace
+  kube:
+    resource: ns
+    name: kube-system
+    filename: "{{ kube_manifest_dir }}/kube-system.yaml"
+    state: present
+  tags:
+    - addons
+  ignore_errors: yes
+
+- name: Run kube-gen-token script to create {{ kube_token_dir }}/known_tokens.csv
+  command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item }}"
+  environment:
+    TOKEN_DIR: "{{ kube_token_dir }}"
+  with_items:
+    - "system:dns"
+    - "system:monitoring"
+    - "system:logging"
+  register: gentoken
+  changed_when: "'Added' in gentoken.stdout"
+  notify:
+    - restart apiserver
+  tags:
+    - addons
+
+- include: skydns.yml
+  when: dns_setup
+
+- include: kube-ui.yml
+  when: enable_ui
+
+- include: logging.yml
+  when: enable_logging
+
+- include: monitoring.yml
+  when: enable_monitoring
diff --git a/roles/addons/tasks/monitoring.yml b/roles/addons/tasks/monitoring.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f64c401fc70b691d32799e3a167e7ab18549783b
--- /dev/null
+++ b/roles/addons/tasks/monitoring.yml
@@ -0,0 +1,111 @@
+---
+- name: Monitoring | Influxdb | Write controller file
+  copy:
+    src: influxdb-grafana-controller.yaml
+    dest: "{{ kube_manifest_dir }}/influxdb-grafana-controller.yaml"
+  register: influxdb_rc_def
+  when: enable_monitoring
+  tags:
+    - addons
+    - monitoring
+
+- name: Monitoring | Influxdb | Write service file
+  copy:
+    src: influxdb-service.yaml
+    dest: "{{ kube_manifest_dir }}/influxdb-service.yaml"
+  register: influxdb_svc_def
+  when: enable_monitoring
+  tags:
+    - addons
+    - monitoring
+
+- name: Monitoring | Grafana | Write service file
+  copy:
+    src: grafana-service.yaml
+    dest: "{{ kube_manifest_dir }}/grafana-service.yaml"
+  register: grafana_svc_def
+  when: enable_monitoring
+  tags:
+    - addons
+    - monitoring
+
+- name: Monitoring | Heapster | Write controller file
+  copy:
+    src: heapster-controller.yaml
+    dest: "{{ kube_manifest_dir }}/heapster-controller.yaml"
+  register: heapster_rc_def
+  when: enable_monitoring
+  tags:
+    - addons
+    - monitoring
+
+- name: Monitoring | Heapster | Write service file
+  copy:
+    src: heapster-service.yaml
+    dest: "{{ kube_manifest_dir }}/heapster-service.yaml"
+  register: heapster_svc_def
+  when: enable_monitoring
+  tags:
+    - addons
+    - monitoring
+
+- name: Monitoring | Influxdb | Create/update replication controller
+  kube:
+    namespace: kube-system
+    resource: rc
+    name: monitoring-influx-grafana-v1
+    filename: "{{ kube_manifest_dir }}/influxdb-grafana-controller.yaml"
+    state: "{{ influxdb_rc_def.changed | ternary('latest','present') }}"
+  when: enable_monitoring
+  tags:
+    - addons
+    - monitoring
+
+- name: Monitoring | Influxdb | Create/update service
+  kube:
+    namespace: kube-system
+    resource: svc
+    name: monitoring-influxdb
+    filename: "{{ kube_manifest_dir }}/influxdb-service.yaml"
+    state: "{{ influxdb_svc_def.changed | ternary('latest','present') }}"
+  when: enable_monitoring
+  tags:
+    - addons
+    - monitoring
+
+- name: Monitoring | Grafana | Create/update service
+  kube:
+    namespace: kube-system
+    resource: svc
+    name: monitoring-grafana
+    filename: "{{ kube_manifest_dir }}/grafana-service.yaml"
+    state: "{{ grafana_svc_def.changed | ternary('latest','present') }}"
+  when: enable_monitoring
+  tags:
+    - addons
+    - monitoring
+
+- name: Monitoring | Heapster | Create/update replication controller
+  kube:
+    namespace: kube-system
+    resource: rc
+    name: monitoring-heapster-v8
+    filename: "{{ kube_manifest_dir }}/heapster-controller.yaml"
+    state: "{{ heapster_rc_def.changed | ternary('latest','present') }}"
+  when: enable_monitoring
+  tags:
+    - addons
+    - monitoring
+
+- name: Monitoring | Heapster | Create/update service
+  kube:
+    namespace: kube-system
+    resource: svc
+    name: monitoring-heapster
+    filename: "{{ kube_manifest_dir }}/heapster-service.yaml"
+    state: "{{ heapster_svc_def.changed | ternary('latest','present') }}"
+  when: enable_monitoring
+  tags:
+    - addons
+    - monitoring
+
diff --git a/roles/addons/tasks/skydns.yml b/roles/addons/tasks/skydns.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8fb6d15fe4c38a70afe671ff629479e2236f3b1b
--- /dev/null
+++ b/roles/addons/tasks/skydns.yml
@@ -0,0 +1,44 @@
+---
+- name: SkyDNS | Write pod file
+  template:
+    src: skydns-rc.yaml.j2
+    dest: "{{ kube_manifest_dir }}/skydns-rc.yaml"
+  register: dns_rc_def
+  when: dns_setup
+  tags:
+    - addons
+    - skydns
+
+- name: SkyDNS | Write service file
+  template:
+    src: skydns-svc.yaml.j2
+    dest: "{{ kube_manifest_dir }}/skydns-svc.yaml"
+  register: dns_svc_def
+  when: dns_setup
+  tags:
+    - addons
+    - skydns
+
+- name: SkyDNS | Create or update replication controller
+  kube:
+    namespace: kube-system
+    resource: rc
+    name: kube-dns-v8
+    filename: "{{ kube_manifest_dir }}/skydns-rc.yaml"
+    state: "{{ dns_rc_def.changed | ternary('latest','present') }}"
+  when: dns_setup
+  tags:
+    - addons
+    - skydns
+
+- name: SkyDNS | Create or update service
+  kube:
+    namespace: kube-system
+    resource: svc
+    name: kube-dns
+    filename: "{{ kube_manifest_dir }}/skydns-svc.yaml"
+    state: "{{ dns_svc_def.changed | ternary('latest','present') }}"
+  when: dns_setup
+  tags:
+    - addons
+    - skydns
diff --git a/roles/addons/templates/skydns-rc.yaml.j2 b/roles/addons/templates/skydns-rc.yaml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..0aba304bb12f2d1ccd2494198cf2b2456bbfd45a
--- /dev/null
+++ b/roles/addons/templates/skydns-rc.yaml.j2
@@ -0,0 +1,91 @@
+apiVersion: v1
+kind: ReplicationController
+metadata:
+  name: kube-dns-v8
+  namespace: kube-system
+  labels:
+    k8s-app: kube-dns
+    version: v8
+    kubernetes.io/cluster-service: "true"
+spec:
+  replicas: {{ dns_replicas }}
+  selector:
+    k8s-app: kube-dns
+    version: v8
+  template:
+    metadata:
+      labels:
+        k8s-app: kube-dns
+        version: v8
+        kubernetes.io/cluster-service: "true"
+    spec:
+      containers:
+      - name: etcd
+        image: gcr.io/google_containers/etcd:2.0.9
+        resources:
+          limits:
+            cpu: 100m
+            memory: 50Mi
+        command:
+        - /usr/local/bin/etcd
+        - -data-dir
+        - /var/etcd/data
+        - -listen-client-urls
+        - http://127.0.0.1:2379,http://127.0.0.1:4001
+        - -advertise-client-urls
+        - http://127.0.0.1:2379,http://127.0.0.1:4001
+        - -initial-cluster-token
+        - skydns-etcd
+        volumeMounts:
+        - name: etcd-storage
+          mountPath: /var/etcd/data
+      - name: kube2sky
+        image: gcr.io/google_containers/kube2sky:1.11
+        resources:
+          limits:
+            cpu: 100m
+            memory: 50Mi
+        args:
+        # command = "/kube2sky"
+        - -domain={{ dns_domain }}
+      - name: skydns
+        image: gcr.io/google_containers/skydns:2015-03-11-001
+        resources:
+          limits:
+            cpu: 100m
+            memory: 50Mi
+        args:
+        # command = "/skydns"
+        - -machines=http://localhost:4001
+        - -addr=0.0.0.0:53
+        - -domain={{ dns_domain }}.
+        ports:
+        - containerPort: 53
+          name: dns
+          protocol: UDP
+        - containerPort: 53
+          name: dns-tcp
+          protocol: TCP
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 8080
+            scheme: HTTP
+          initialDelaySeconds: 30
+          timeoutSeconds: 5
+      - name: healthz
+        image: gcr.io/google_containers/exechealthz:1.0
+        resources:
+          limits:
+            cpu: 10m
+            memory: 20Mi
+        args:
+        - -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} localhost >/dev/null
+        - -port=8080
+        ports:
+        - containerPort: 8080
+          protocol: TCP
+      volumes:
+      - name: etcd-storage
+        emptyDir: {}
+      dnsPolicy: Default  # Don't use cluster DNS.
\ No newline at end of file
diff --git a/roles/addons/templates/skydns-svc.yaml.j2 b/roles/addons/templates/skydns-svc.yaml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..7efcfcf9d13bda56864b9c3175eb97d88fa193e0
--- /dev/null
+++ b/roles/addons/templates/skydns-svc.yaml.j2
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: kube-dns
+  namespace: kube-system
+  labels:
+    k8s-app: kube-dns
+    kubernetes.io/cluster-service: "true"
+    kubernetes.io/name: "KubeDNS"
+spec:
+  selector:
+    k8s-app: kube-dns
+  clusterIP:  {{ kube_dns_server }}
+  ports:
+  - name: dns
+    port: 53
+    protocol: UDP
+  - name: dns-tcp
+    port: 53
+    protocol: TCP
diff --git a/roles/dnsmasq/files/dhclient_nodnsupdate b/roles/dnsmasq/files/dhclient_nodnsupdate
new file mode 100644
index 0000000000000000000000000000000000000000..03c7c997ef4d14a1bfa735d606b53aee2b796951
--- /dev/null
+++ b/roles/dnsmasq/files/dhclient_nodnsupdate
@@ -0,0 +1,4 @@
+#!/bin/sh
+make_resolv_conf() {
+    :
+}
diff --git a/roles/dnsmasq/handlers/main.yml b/roles/dnsmasq/handlers/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..48b3137274e921e7396ce79161cb36823043caf9
--- /dev/null
+++ b/roles/dnsmasq/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: restart dnsmasq
+  command: systemctl restart dnsmasq
diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ab534dfb2a5d790fa8bcd886a407a8929e792809
--- /dev/null
+++ b/roles/dnsmasq/tasks/main.yml
@@ -0,0 +1,58 @@
+---
+- name: populate inventory into hosts file
+  lineinfile:
+    dest: /etc/hosts
+    regexp: "^{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}$"
+    line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}"
+    state: present
+  when: hostvars[item].ansible_default_ipv4.address is defined
+  with_items: groups['all']
+
+- name: clean hosts file
+  lineinfile:
+    dest: /etc/hosts
+    regexp: "{{ item }}"
+    state: absent
+  with_items:
+    - '^127\.0\.0\.1(\s+){{ inventory_hostname }}.*'
+    - '^::1(\s+){{ inventory_hostname }}.*'
+
+- name: install dnsmasq and bindr9utils
+  apt:
+    name: "{{ item }}"
+    state: present
+  with_items:
+    - dnsmasq
+    - bind9utils
+  when: inventory_hostname in groups['kube-master'][0]
+
+- name: ensure dnsmasq.d directory exists
+  file:
+    path: /etc/dnsmasq.d
+    state: directory
+  when: inventory_hostname in groups['kube-master'][0]
+
+- name: configure dnsmasq
+  template:
+    src: 01-kube-dns.conf.j2
+    dest: /etc/dnsmasq.d/01-kube-dns.conf
+    mode: 755
+  notify:
+    - restart dnsmasq
+  when: inventory_hostname in groups['kube-master'][0]
+
+- name: enable dnsmasq
+  service:
+    name: dnsmasq
+    state: started
+    enabled: yes
+  when: inventory_hostname in groups['kube-master'][0]
+
+- name: update resolv.conf with new DNS setup
+  template:
+    src: resolv.conf.j2
+    dest: /etc/resolv.conf
+    mode: 644
+
+- name: disable resolv.conf modification by dhclient
+  copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=u+x
diff --git a/roles/dnsmasq/templates/01-kube-dns.conf.j2 b/roles/dnsmasq/templates/01-kube-dns.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..370c43c994b6db416b7636130fcc8d65c7d233fd
--- /dev/null
+++ b/roles/dnsmasq/templates/01-kube-dns.conf.j2
@@ -0,0 +1,19 @@
+#Listen on all interfaces
+interface=*
+
+addn-hosts=/etc/hosts
+
+bogus-priv
+
+#Set upstream dns servers
+{% if upstream_dns_servers is defined %}
+{% for srv in upstream_dns_servers %}
+server={{ srv }}
+{% endfor %}
+{% else %}
+ server=8.8.8.8
+ server=8.8.4.4
+{% endif %}
+
+# Forward k8s domain to kube-dns
+server=/{{ dns_domain }}/{{ kube_dns_server }}
diff --git a/roles/dnsmasq/templates/resolv.conf.j2 b/roles/dnsmasq/templates/resolv.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..d10a6fc927b317e4217db0a5004551b6a33837dd
--- /dev/null
+++ b/roles/dnsmasq/templates/resolv.conf.j2
@@ -0,0 +1,5 @@
+; generated by ansible
+search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}
+{% for host in groups['kube-master'] %}
+nameserver {{ hostvars[host]['ansible_default_ipv4']['address'] }}
+{% endfor %}
diff --git a/roles/docker/files/docker-gc b/roles/docker/files/docker-gc
new file mode 100644
index 0000000000000000000000000000000000000000..895edd8967f784031d890b3201bb79f782cfcbdb
--- /dev/null
+++ b/roles/docker/files/docker-gc
@@ -0,0 +1,211 @@
+#!/bin/bash
+
+# Copyright (c) 2014 Spotify AB.
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# This script attempts to garbage collect docker containers and images.
+# Containers that exited more than an hour ago are removed.
+# Images that have existed more than an hour and are not in use by any
+# containers are removed.
+
+# Note: Although docker normally prevents removal of images that are in use by
+#       containers, we take extra care to not remove any image tags (e.g.
+#       ubuntu:14.04, busybox, etc) that are used by containers. A naive
+#       "docker rmi `docker images -q`" will leave images stripped of all tags,
+#       forcing users to re-pull the repositories even though the images
+#       themselves are still on disk.
+
+# Note: State is stored in $STATE_DIR, defaulting to /var/lib/docker-gc
+
+set -o nounset
+set -o errexit
+
+GRACE_PERIOD_SECONDS=${GRACE_PERIOD_SECONDS:=3600}
+STATE_DIR=${STATE_DIR:=/var/lib/docker-gc}
+DOCKER=${DOCKER:=docker}
+PID_DIR=${PID_DIR:=/var/run}
+
+for pid in $(pidof -s docker-gc); do
+    if [[ $pid != $$ ]]; then
+        echo "[$(date)] : docker-gc : Process is already running with PID $pid"
+        exit 1
+    fi
+done
+
+trap "rm -f -- '$PID_DIR/dockergc'" EXIT
+
+echo $$ > $PID_DIR/dockergc
+
+
+EXCLUDE_FROM_GC=${EXCLUDE_FROM_GC:=/etc/docker-gc-exclude}
+if [ ! -f "$EXCLUDE_FROM_GC" ]
+then
+  EXCLUDE_FROM_GC=/dev/null
+fi
+
+EXCLUDE_CONTAINERS_FROM_GC=${EXCLUDE_CONTAINERS_FROM_GC:=/etc/docker-gc-exclude-containers}
+if [ ! -f "$EXCLUDE_CONTAINERS_FROM_GC" ]
+then
+  EXCLUDE_CONTAINERS_FROM_GC=/dev/null  
+fi
+
+EXCLUDE_IDS_FILE="exclude_ids"
+EXCLUDE_CONTAINER_IDS_FILE="exclude_container_ids"
+
+function date_parse {
+  if date --utc >/dev/null 2>&1; then
+    # GNU/date
+    echo $(date -u --date "${1}" "+%s")
+  else
+    # BSD/date
+    echo $(date -j -u -f "%F %T" "${1}" "+%s")
+  fi
+}
+
+# Elapsed time since a docker timestamp, in seconds
+function elapsed_time() {
+    # Docker 1.5.0 datetime format is 2015-07-03T02:39:00.390284991
+    # Docker 1.7.0 datetime format is 2015-07-03 02:39:00.390284991 +0000 UTC
+    utcnow=$(date -u "+%s")
+    replace_q="${1#\"}"
+    without_ms="${replace_q:0:19}"
+    replace_t="${without_ms/T/ }"
+    epoch=$(date_parse "${replace_t}")
+    echo $(($utcnow - $epoch))
+}
+
+function compute_exclude_ids() {
+    # Find images that match patterns in the EXCLUDE_FROM_GC file and put their
+    # id prefixes into $EXCLUDE_IDS_FILE, prefixed with ^
+
+    PROCESSED_EXCLUDES="processed_excludes.tmp"
+    # Take each line and put a space at the beginning and end, so when we
+    # grep for them below, it will effectively be: "match either repo:tag
+    # or imageid".  Also delete blank lines or lines that only contain
+    # whitespace
+    sed 's/^\(.*\)$/ \1 /' $EXCLUDE_FROM_GC | sed '/^ *$/d' > $PROCESSED_EXCLUDES
+    # The following looks a bit of a mess, but here's what it does:
+    # 1. Get images
+    # 2. Skip header line
+    # 3. Turn columnar display of 'REPO TAG IMAGEID ....' to 'REPO:TAG IMAGEID'
+    # 4. find lines that contain things mentioned in PROCESSED_EXCLUDES
+    # 5. Grab the image id from the line
+    # 6. Prepend ^ to the beginning of each line
+
+    # What this does is make grep patterns to match image ids mentioned by
+    # either repo:tag or image id for later greppage
+    $DOCKER images \
+        | tail -n+2 \
+        | sed 's/^\([^ ]*\) *\([^ ]*\) *\([^ ]*\).*/ \1:\2 \3 /' \
+        | grep -f $PROCESSED_EXCLUDES 2>/dev/null \
+        | cut -d' ' -f3 \
+        | sed 's/^/^/' > $EXCLUDE_IDS_FILE
+}
+
+function compute_exclude_container_ids() {
+    # Find containers matching to patterns listed in EXCLUDE_CONTAINERS_FROM_GC file
+    # Implode their values with a \| separator on a single line
+    PROCESSED_EXCLUDES=`cat $EXCLUDE_CONTAINERS_FROM_GC \
+        | xargs \
+        | sed -e 's/ /\|/g'`
+    # The empty string would match everything
+    if [ "$PROCESSED_EXCLUDES" = "" ]; then
+        touch $EXCLUDE_CONTAINER_IDS_FILE
+        return
+    fi
+    # Find all docker images
+    # Filter out with matching names 
+    # and put them to $EXCLUDE_CONTAINER_IDS_FILE
+    $DOCKER ps -a \
+        | grep -E "$PROCESSED_EXCLUDES" \
+        | awk '{ print $1 }' \
+        | tr -s " " "\012" \
+        | sort -u > $EXCLUDE_CONTAINER_IDS_FILE
+}
+
+# Change into the state directory (and create it if it doesn't exist)
+if [ ! -d "$STATE_DIR" ]
+then
+  mkdir -p $STATE_DIR
+fi
+cd "$STATE_DIR"
+
+# Verify that docker is reachable
+$DOCKER version 1>/dev/null
+
+# List all currently existing containers
+$DOCKER ps -a -q --no-trunc | sort | uniq > containers.all
+
+# List running containers
+$DOCKER ps -q --no-trunc | sort | uniq > containers.running
+
+# compute ids of container images to exclude from GC
+compute_exclude_ids
+
+# compute ids of containers to exclude from GC
+compute_exclude_container_ids
+
+# List containers that are not running
+comm -23 containers.all containers.running > containers.exited
+
+# Find exited containers that finished at least GRACE_PERIOD_SECONDS ago
+echo -n "" > containers.reap.tmp
+cat containers.exited | while read line
+do
+    EXITED=$(${DOCKER} inspect -f "{{json .State.FinishedAt}}" ${line})
+    ELAPSED=$(elapsed_time $EXITED)
+    if [[ $ELAPSED -gt $GRACE_PERIOD_SECONDS ]]; then
+        echo $line >> containers.reap.tmp
+    fi
+done
+
+# List containers that we will remove and exclude ids.
+cat containers.reap.tmp | sort | uniq | grep -v -f $EXCLUDE_CONTAINER_IDS_FILE > containers.reap || true
+
+# List containers that we will keep.
+comm -23 containers.all containers.reap > containers.keep
+
+# List images used by containers that we keep.
+# This may be both image id's and repo/name:tag, so normalize to image id's only
+cat containers.keep |
+xargs -n 1 $DOCKER inspect -f '{{.Config.Image}}' 2>/dev/null |
+sort | uniq |
+xargs -n 1 $DOCKER inspect -f '{{.Id}}' 2>/dev/null |
+sort | uniq > images.used
+
+# List images to reap; images that existed last run and are not in use.
+$DOCKER images -q --no-trunc | sort | uniq > images.all
+
+# Find images that are created at least GRACE_PERIOD_SECONDS ago
+echo -n "" > images.reap.tmp
+cat images.all | while read line
+do
+    CREATED=$(${DOCKER} inspect -f "{{.Created}}" ${line})
+    ELAPSED=$(elapsed_time $CREATED)
+    if [[ $ELAPSED -gt $GRACE_PERIOD_SECONDS ]]; then
+        echo $line >> images.reap.tmp
+    fi
+done
+comm -23 images.reap.tmp images.used | grep -v -f $EXCLUDE_IDS_FILE > images.reap || true
+
+# Reap containers.
+xargs -n 1 $DOCKER rm --volumes=true < containers.reap &>/dev/null || true
+
+# Reap images.
+xargs -n 1 $DOCKER rmi < images.reap &>/dev/null || true
diff --git a/roles/docker/files/systemd-docker.service b/roles/docker/files/systemd-docker.service
new file mode 100644
index 0000000000000000000000000000000000000000..25eb328d5017b84b0b1b9f6a5b442bdb77c2ac8c
--- /dev/null
+++ b/roles/docker/files/systemd-docker.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Docker Application Container Engine
+Documentation=https://docs.docker.com
+After=network.target docker.socket
+Requires=docker.socket
+
+[Service]
+EnvironmentFile=-/etc/default/docker
+Type=notify
+ExecStart=/usr/bin/docker daemon -H fd:// $DOCKER_OPTS
+MountFlags=slave
+LimitNOFILE=1048576
+LimitNPROC=1048576
+LimitCORE=infinity
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2644b6dfde541f92cd20185cac1463137bf2506e
--- /dev/null
+++ b/roles/docker/handlers/main.yml
@@ -0,0 +1,12 @@
+---
+- name: restart docker
+  command: /bin/true
+  notify:
+    - reload systemd
+    - restart docker service
+
+- name: reload systemd
+  shell: systemctl daemon-reload
+
+- name: restart docker service
+  service: name=docker state=restarted
diff --git a/roles/docker/tasks/configure.yml b/roles/docker/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0cf255522d524bce1e5e68688e3405bc6cf19fb5
--- /dev/null
+++ b/roles/docker/tasks/configure.yml
@@ -0,0 +1,41 @@
+---
+- name: Write script for calico/docker bridge configuration
+  template: src=create_cbr.j2 dest=/etc/network/if-up.d/create_cbr mode=u+x
+  when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
+
+- name: Configure calico/docker bridge
+  shell: /etc/network/if-up.d/create_cbr
+  when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
+
+- name: Configure docker to use cbr0 bridge
+  lineinfile:
+    dest=/etc/default/docker
+    regexp='.*DOCKER_OPTS=.*'
+    line='DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false"'
+  notify:
+    - restart docker
+  when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
+
+- name: enable docker
+  service:
+    name: docker
+    enabled: yes
+    state: started
+  tags:
+    - docker
+
+- meta: flush_handlers
+
+#- name: login to arkena's docker registry
+#  shell : >
+#    docker login --username={{ dockerhub_user }}
+#    --password={{ dockerhub_pass }}
+#    --email={{ dockerhub_email }}
+
+#- pause: prompt='WARNING The next task will remove all exited containers, enter to continue'
+#
+#- name: Purge all exited containers
+#  shell: >
+#    if [ ! -z "$(docker ps -aq -f status=exited)" ]; then
+#    docker rm $(docker ps -aq -f status=exited);
+#    fi
diff --git a/roles/docker/tasks/install.yml b/roles/docker/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2bb29c249938574648ac717aadb9397a86e9f769
--- /dev/null
+++ b/roles/docker/tasks/install.yml
@@ -0,0 +1,33 @@
+---
+- name: Configure debian distribution apt repository
+  template: src=debian.list.j2 dest=/etc/apt/sources.list.d/{{ ansible_distribution_release }}.list
+
+- name: Install prerequisites for https transport
+  apt: pkg={{ item }} state=present update_cache=yes
+  with_items:
+    - apt-transport-https
+    - ca-certificates
+
+- name: Configure docker apt repository
+  template: src=docker.list.j2 dest=/etc/apt/sources.list.d/docker.list
+
+- name: Install docker-engine
+  apt: pkg={{ item }} state=present force=yes update_cache=yes
+  with_items:
+    - aufs-tools
+    - cgroupfs-mount
+    - docker-engine=1.8.2-0~{{ ansible_distribution_release }}
+
+- name: Copy default docker configuration
+  template: src=default-docker.j2 dest=/etc/default/docker
+  notify: restart docker
+
+- name: Copy Docker systemd unit file
+  copy: src=systemd-docker.service dest=/lib/systemd/system/docker.service
+  notify: restart docker
+
+- name: Copy Docker garbage collection script
+  copy: src=docker-gc dest={{ bin_dir }}/docker-gc mode=700
+
+- name: Copy Cron for garbage collection script 
+  template: src=cron_docker-gc.j2 dest=/etc/cron.hourly/cron_docker-gc
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fdb17cf514b71c8b2a38426d53289b2728793413
--- /dev/null
+++ b/roles/docker/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- include: install.yml
+- include: configure.yml
diff --git a/roles/docker/templates/create_cbr.j2 b/roles/docker/templates/create_cbr.j2
new file mode 100644
index 0000000000000000000000000000000000000000..86974aaa19674102965582e3dba4f6b1b2b9fe2b
--- /dev/null
+++ b/roles/docker/templates/create_cbr.j2
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+# Create calico bridge cbr0 if it doesn't exist
+ifaces=$(ifconfig -a | sed 's/[ \t].*//;/^\(lo\|\)$/d' |tr '\n' ' ')
+if ! [[ "${ifaces}" =~ "cbr0" ]];then
+   brctl addbr cbr0
+   ip link set cbr0 up
+fi
+
+# Configure calico bridge ip
+br_ips=$(ip addr list cbr0 |grep "inet " |cut -d' ' -f6)
+if ! [[ "${br_ips}" =~ "{{ br_addr }}/{{ overlay_network_host_prefix }}" ]];then
+       ip a add {{ br_addr }}/{{ overlay_network_host_prefix }} dev cbr0
+fi
diff --git a/roles/docker/templates/cron_docker-gc.j2 b/roles/docker/templates/cron_docker-gc.j2
new file mode 100644
index 0000000000000000000000000000000000000000..27321fd6f159aa098e5f0a0da719f06a31a7b3cf
--- /dev/null
+++ b/roles/docker/templates/cron_docker-gc.j2
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+test -x {{ bin_dir }}/docker-gc || exit 0
+{{ bin_dir }}/docker-gc
diff --git a/roles/docker/templates/debian.list.j2 b/roles/docker/templates/debian.list.j2
new file mode 100644
index 0000000000000000000000000000000000000000..b831e18d01d138ca35cbe2a15570bdcb5d03ff55
--- /dev/null
+++ b/roles/docker/templates/debian.list.j2
@@ -0,0 +1,10 @@
+deb http://debian.arkena.net/debian/ {{ ansible_distribution_release }} main contrib non-free
+deb-src http://debian.arkena.net/debian/ {{ ansible_distribution_release }} main contrib non-free
+deb http://debian.arkena.net/debian/ {{ ansible_distribution_release }}-updates main contrib non-free
+deb-src http://debian.arkena.net/debian/ {{ ansible_distribution_release }}-updates main contrib non-free
+deb http://debian.arkena.net/debian-security/ {{ ansible_distribution_release }}/updates main contrib non-free
+deb-src http://debian.arkena.net/debian-security {{ ansible_distribution_release }}/updates main contrib non-free
+deb http://debian.arkena.net/debian/ {{ ansible_distribution_release }}-backports main contrib
+deb-src http://debian.arkena.net/debian/ {{ ansible_distribution_release }}-backports main contrib
+deb http://debian.arkena.net/debian-smartjog/ {{ ansible_distribution_release }} smartjog
+deb-src http://debian.arkena.net/debian-smartjog/ {{ ansible_distribution_release }} smartjog
diff --git a/roles/docker/templates/default-docker.j2 b/roles/docker/templates/default-docker.j2
new file mode 100644
index 0000000000000000000000000000000000000000..bd71bd7467db80761f46b8fc095658efe15be234
--- /dev/null
+++ b/roles/docker/templates/default-docker.j2
@@ -0,0 +1,15 @@
+# Docker Upstart and SysVinit configuration file
+
+# Customize location of Docker binary (especially for development testing).
+#DOCKER="/usr/local/bin/docker"
+
+# Use DOCKER_OPTS to modify the daemon startup options.
+{% if overlay_network_plugin is defined and overlay_network_plugin == "calico" %}
+DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false"
+{% endif %}
+
+# If you need Docker to use an HTTP proxy, it can also be specified here.
+#export http_proxy="http://127.0.0.1:3128/"
+
+# This is also a handy place to tweak where Docker's temporary files go.
+#export TMPDIR="/mnt/bigdrive/docker-tmp"
diff --git a/roles/docker/templates/docker.list.j2 b/roles/docker/templates/docker.list.j2
new file mode 100644
index 0000000000000000000000000000000000000000..59c5b34085afd3fba074ae13c7ea1fd55bd60ee8
--- /dev/null
+++ b/roles/docker/templates/docker.list.j2
@@ -0,0 +1 @@
+deb https://apt.dockerproject.org/repo debian-{{ ansible_distribution_release }} main
diff --git a/roles/docker/vars/main.yml b/roles/docker/vars/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a47ed8dda9eebd8fec5dd433f03a7b74985fc47d
--- /dev/null
+++ b/roles/docker/vars/main.yml
@@ -0,0 +1,4 @@
+---
+dockerhub_user: arkenadev
+dockerhub_pass: 4rk3n4d3v
+dockerhub_email: smaine.kahlouch@gmail.com 
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8a90f9ce69566ddda5acef5958de7b554b43e624
--- /dev/null
+++ b/roles/download/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+etcd_download_url: https://github.com/coreos/etcd/releases/download
+flannel_download_url: https://github.com/coreos/flannel/releases/download
+kube_download_url: https://github.com/GoogleCloudPlatform/kubernetes/releases/download
+calico_download_url: https://github.com/Metaswitch/calico-docker/releases/download
diff --git a/roles/download/tasks/calico.yml b/roles/download/tasks/calico.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b6d506fe2a9f0d3a4b622854e51fdcda6b50afb5
--- /dev/null
+++ b/roles/download/tasks/calico.yml
@@ -0,0 +1,21 @@
+---
+- name: Create calico release directory
+  local_action: file
+     path={{ local_release_dir }}/calico/bin
+     recurse=yes
+     state=directory
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Check if calicoctl has been downloaded
+  local_action: stat
+     path={{ local_release_dir }}/calico/bin/calicoctl
+  register: c_tar
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+# issues with get_url module and redirects, to be tested again in the near future
+- name: Download calico
+  local_action: shell
+    curl -o {{ local_release_dir }}/calico/bin/calicoctl -Ls {{ calico_download_url }}/{{ calico_version }}/calicoctl
+  when: not c_tar.stat.exists
+  register: dl_calico
+  delegate_to: "{{ groups['kube-master'][0] }}"
diff --git a/roles/download/tasks/etcd.yml b/roles/download/tasks/etcd.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0bf3e6c7a0537667877b16307cbac3fbe90600dd
--- /dev/null
+++ b/roles/download/tasks/etcd.yml
@@ -0,0 +1,42 @@
+---
+- name: Create etcd release directory
+  local_action: file
+     path={{ local_release_dir }}/etcd/bin
+     recurse=yes
+     state=directory
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Check if etcd release archive has been downloaded
+  local_action: stat
+     path={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz
+  register: e_tar
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+# issues with get_url module and redirects, to be tested again in the near future
+- name: Download etcd
+  local_action: shell
+    curl -o {{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz -Ls {{ etcd_download_url }}/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz
+  when: not e_tar.stat.exists
+  register: dl_etcd
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Extract etcd archive
+  local_action: unarchive
+     src={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz
+     dest={{ local_release_dir }}/etcd copy=no
+  when: dl_etcd|changed
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Pick up only etcd binaries
+  local_action: copy
+     src={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/{{ item }}
+     dest={{ local_release_dir }}/etcd/bin
+  with_items:
+    - etcdctl
+    - etcd
+  when: dl_etcd|changed
+
+- name: Delete unused etcd files
+  local_action: file
+     path={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64 state=absent
+  when: dl_etcd|changed
diff --git a/roles/download/tasks/flannel.yml b/roles/download/tasks/flannel.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2de0ae547b85690e980206bdd85c909d6264fe10
--- /dev/null
+++ b/roles/download/tasks/flannel.yml
@@ -0,0 +1,39 @@
+---
+- name: Create flannel release directory
+  local_action: file
+     path={{ local_release_dir }}/flannel
+     recurse=yes
+     state=directory
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Check if flannel release archive has been downloaded
+  local_action: stat
+     path={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
+  register: f_tar
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+# issues with get_url module and redirects, to be tested again in the near future
+- name: Download flannel
+  local_action: shell
+    curl -o {{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz -Ls {{ flannel_download_url }}/v{{ flannel_version }}/flannel-{{ flannel_version }}-linux-amd64.tar.gz
+  when: not f_tar.stat.exists
+  register: dl_flannel
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Extract flannel archive
+  local_action: unarchive
+     src={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
+     dest={{ local_release_dir }}/flannel copy=no
+  when: dl_flannel|changed
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Pick up only flannel binaries
+  local_action: copy
+     src={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}/flanneld
+     dest={{ local_release_dir }}/flannel/bin
+  when: dl_flannel|changed
+
+- name: Delete unused flannel files
+  local_action: file
+     path={{ local_release_dir }}/flannel/flannel-{{ flannel_version }} state=absent
+  when: dl_flannel|changed
diff --git a/roles/download/tasks/kubernetes.yml b/roles/download/tasks/kubernetes.yml
new file mode 100644
index 0000000000000000000000000000000000000000..de6359d147f3b00b07136f066a21e2c18196e001
--- /dev/null
+++ b/roles/download/tasks/kubernetes.yml
@@ -0,0 +1,47 @@
+---
+- name: Create kubernetes release directory
+  local_action: file
+     path={{ local_release_dir }}/kubernetes
+     state=directory
+
+- name: Check if kubernetes release archive has been downloaded
+  local_action: stat
+     path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
+  register: k_tar
+
+# issues with get_url module and redirects, to be tested again in the near future
+- name: Download kubernetes
+  local_action: shell
+    curl -o {{ local_release_dir }}/kubernetes/kubernetes.tar.gz -Ls {{ kube_download_url }}/{{ kube_version }}/kubernetes.tar.gz
+  when: not k_tar.stat.exists or k_tar.stat.checksum != "{{ kube_sha1 }}"
+  register: dl_kube
+
+- name: Compare kubernetes archive checksum
+  local_action: stat
+     path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
+  register: k_tar
+  failed_when: k_tar.stat.checksum != "{{ kube_sha1 }}"
+  when: dl_kube|changed
+
+- name: Extract kubernetes archive
+  local_action: unarchive
+     src={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
+     dest={{ local_release_dir }}/kubernetes copy=no
+  when: dl_kube|changed
+
+- name: Extract kubernetes binaries archive
+  local_action: unarchive
+     src={{ local_release_dir }}/kubernetes/kubernetes/server/kubernetes-server-linux-amd64.tar.gz
+     dest={{ local_release_dir }}/kubernetes copy=no
+  when: dl_kube|changed
+
+- name: Pick up only kubernetes binaries
+  local_action: synchronize
+     src={{ local_release_dir }}/kubernetes/kubernetes/server/bin
+     dest={{ local_release_dir }}/kubernetes
+  when: dl_kube|changed
+
+- name: Delete unused kubernetes files
+  local_action: file
+     path={{ local_release_dir }}/kubernetes/kubernetes state=absent
+  when: dl_kube|changed
diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b4228699be1f7411f0b44f5af42ce6383ceb1e8c
--- /dev/null
+++ b/roles/download/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+- include: kubernetes.yml
+- include: etcd.yml
+- include: calico.yml
+- include: flannel.yml
diff --git a/roles/download/vars/main.yml b/roles/download/vars/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a101633deec539553df846182331e22e1b3edaaf
--- /dev/null
+++ b/roles/download/vars/main.yml
@@ -0,0 +1,8 @@
+---
+etcd_version: v2.2.0
+flannel_version: 0.5.3
+
+kube_version: v1.0.6
+kube_sha1: 289f9a11ea2f3cfcc6cbd50d29c3d16d4978b76c
+
+calico_version: v0.5.1
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..af2442abf5f6b7c1fecb0f3dcad93d65b2fca191
--- /dev/null
+++ b/roles/etcd/handlers/main.yml
@@ -0,0 +1,15 @@
+---
+- name: restart daemons
+  command: /bin/true
+  notify:
+    - reload systemd
+    - restart etcd2
+
+- name: reload systemd
+  command: systemctl daemon-reload
+
+- name: restart etcd2
+  service: name=etcd2 state=restarted
+
+- name: Save iptables rules
+  command: service iptables save
diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml
new file mode 100644
index 0000000000000000000000000000000000000000..97417ee971c6a4b3f6e9f5e003fb7ab12349cbbf
--- /dev/null
+++ b/roles/etcd/tasks/configure.yml
@@ -0,0 +1,15 @@
+---
+- name: Disable ferm
+  service: name=ferm state=stopped enabled=no
+
+- name: Create etcd2 environment vars dir
+  file: path=/etc/systemd/system/etcd2.service.d state=directory
+
+- name: Write etcd2 config file
+  template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2-cluster.conf
+  notify:
+    - reload systemd
+    - restart etcd2
+
+- name: Ensure etcd2 is running
+  service: name=etcd2 state=started enabled=yes
diff --git a/roles/etcd/tasks/install.yml b/roles/etcd/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..462e3d917c3613dbcee945dc5433aaa569071826
--- /dev/null
+++ b/roles/etcd/tasks/install.yml
@@ -0,0 +1,24 @@
+---
+- name: Create etcd user
+  user: name=etcd shell=/bin/nologin home=/var/lib/etcd2
+
+- name: Install etcd binaries
+  copy: 
+     src={{ local_release_dir }}/etcd/bin/{{ item }}
+     dest={{ bin_dir }}
+     owner=etcd
+     mode=u+x
+  with_items:
+    - etcdctl
+    - etcd
+  notify:
+    - restart daemons
+
+- name: Create etcd2 binary symlink
+  file: src=/usr/local/bin/etcd dest=/usr/local/bin/etcd2 state=link
+
+- name: Copy etcd2.service systemd file
+  template:
+    src: systemd-etcd2.service.j2
+    dest: /lib/systemd/system/etcd2.service
+  notify: restart daemons
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fdb17cf514b71c8b2a38426d53289b2728793413
--- /dev/null
+++ b/roles/etcd/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- include: install.yml
+- include: configure.yml
diff --git a/roles/etcd/templates/etcd2.j2 b/roles/etcd/templates/etcd2.j2
new file mode 100644
index 0000000000000000000000000000000000000000..27143e4582e4092cbdc4cd084a14ef1307d8fcc0
--- /dev/null
+++ b/roles/etcd/templates/etcd2.j2
@@ -0,0 +1,17 @@
+# etcd2.0
+[Service]
+{% if inventory_hostname in groups['kube-master'] %}
+Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{ ansible_default_ipv4.address }}:2379,http://{{ ansible_default_ipv4.address }}:4001"
+Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ ansible_default_ipv4.address }}:2380"
+Environment="ETCD_INITIAL_CLUSTER=master=http://{{ ansible_default_ipv4.address }}:2380"
+Environment="ETCD_INITIAL_CLUSTER_STATE=new"
+Environment="ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd"
+Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
+Environment="ETCD_LISTEN_PEER_URLS=http://:2380,http://{{ ansible_default_ipv4.address }}:7001"
+Environment="ETCD_NAME=master"
+{% else %}
+Environment="ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
+Environment="ETCD_INITIAL_CLUSTER=master=http://{{ groups['kube-master'][0] }}:2380"
+Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
+Environment="ETCD_PROXY=on"
+{% endif %}
diff --git a/roles/etcd/templates/systemd-etcd2.service.j2 b/roles/etcd/templates/systemd-etcd2.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..26cda24ebcbedd4886dfa0b4a0274d818fb0b0a3
--- /dev/null
+++ b/roles/etcd/templates/systemd-etcd2.service.j2
@@ -0,0 +1,15 @@
+[Unit]
+Description=etcd2
+Conflicts=etcd.service
+
+[Service]
+User=etcd
+Environment=ETCD_DATA_DIR=/var/lib/etcd2
+Environment=ETCD_NAME=%m
+ExecStart={{ bin_dir }}/etcd2
+Restart=always
+RestartSec=10s
+LimitNOFILE=40000
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/kubernetes/common/defaults/main.yml b/roles/kubernetes/common/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f644baeabde8ddc02e2732b5f2c66d6a2c412bdf
--- /dev/null
+++ b/roles/kubernetes/common/defaults/main.yml
@@ -0,0 +1,41 @@
+# This directory is where all the additional scripts go
+# that Kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+
+# This directory is where all the additional config stuff goes
+# the kubernetes normally puts in /srv/kubernets.
+# This puts them in a sane location.
+# Editting this value will almost surely break something. Don't
+# change it. Things like the systemd scripts are hard coded to
+# look in here. Don't do it.
+kube_config_dir: /etc/kubernetes
+
+# The port the API Server will be listening on.
+kube_master_port: 443
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/certs"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+# This is where you can drop yaml/json files and the kubelet will run those
+# pods on startup
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changable...
+kube_cert_group: kube-cert
+
+dns_domain: "{{ cluster_name }}"
+
+# IP address of the DNS server.
+# Kubernetes will create a pod with several containers, serving as the DNS
+# server and expose it under this IP address. The IP address must be from
+# the range specified as kube_service_addresses. This magic will actually
+# pick the 10th ip address in the kube_service_addresses range and use that.
+# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
diff --git a/roles/kubernetes/common/files/kube-gen-token.sh b/roles/kubernetes/common/files/kube-gen-token.sh
new file mode 100644
index 0000000000000000000000000000000000000000..fa6a5ddc7523de59f4f0c451340c6c9ed017982c
--- /dev/null
+++ b/roles/kubernetes/common/files/kube-gen-token.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# Copyright 2015 The Kubernetes Authors All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+token_dir=${TOKEN_DIR:-/var/srv/kubernetes}
+token_file="${token_dir}/known_tokens.csv"
+
+create_accounts=($@)
+
+touch "${token_file}"
+for account in "${create_accounts[@]}"; do
+  if grep ",${account}," "${token_file}" ; then
+    continue
+  fi
+  token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
+  echo "${token},${account},${account}" >> "${token_file}"
+  echo "${token}" > "${token_dir}/${account}.token"
+  echo "Added ${account}"
+done
diff --git a/roles/kubernetes/common/files/make-ca-cert.sh b/roles/kubernetes/common/files/make-ca-cert.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3950eec91efbbd5ddc7f16dcd2e865f2a0fa4f9a
--- /dev/null
+++ b/roles/kubernetes/common/files/make-ca-cert.sh
@@ -0,0 +1,115 @@
+#!/bin/bash
+
+# Copyright 2014 The Kubernetes Authors All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# Caller should set in the ev:
+# MASTER_IP - this may be an ip or things like "_use_gce_external_ip_"
+# DNS_DOMAIN - which will be passed to minions in --cluster_domain
+# SERVICE_CLUSTER_IP_RANGE - where all service IPs are allocated
+# MASTER_NAME - I'm not sure what it is...
+
+# Also the following will be respected
+# CERT_DIR - where to place the finished certs
+# CERT_GROUP - who the group owner of the cert files should be
+
+cert_ip="${MASTER_IP:="${1}"}"
+master_name="${MASTER_NAME:="kubernetes"}"
+service_range="${SERVICE_CLUSTER_IP_RANGE:="10.0.0.0/16"}"
+dns_domain="${DNS_DOMAIN:="cluster.local"}"
+cert_dir="${CERT_DIR:-"/srv/kubernetes"}"
+cert_group="${CERT_GROUP:="kube-cert"}"
+
+# The following certificate pairs are created:
+#
+#  - ca (the cluster's certificate authority)
+#  - server
+#  - kubelet
+#  - kubecfg (for kubectl)
+#
+# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
+# the certs that we need.
+
+# TODO: Add support for discovery on other providers?
+if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
+  cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
+fi
+
+if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
+  cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
+fi
+
+if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
+  cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
+fi
+
+tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
+trap 'rm -rf "${tmpdir}"' EXIT
+cd "${tmpdir}"
+
+# TODO: For now, this is a patched tool that makes subject-alt-name work, when
+# the fix is upstream  move back to the upstream easyrsa.  This is cached in GCS
+# but is originally taken from:
+#   https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
+#
+# To update, do the following:
+# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
+# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
+# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
+#
+# Due to GCS caching of public objects, it may take time for this to be widely
+# distributed.
+
+# Calculate the first ip address in the service range
+octects=($(echo "${service_range}" | sed -e 's|/.*||' -e 's/\./ /g'))
+((octects[3]+=1))
+service_ip=$(echo "${octects[*]}" | sed 's/ /./g')
+
+# Determine appropriete subject alt names
+sans="IP:${cert_ip},IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${dns_domain},DNS:${master_name}"
+
+curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
+tar xzf easy-rsa.tar.gz > /dev/null
+cd easy-rsa-master/easyrsa3
+
+(./easyrsa init-pki > /dev/null 2>&1
+ ./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1
+ ./easyrsa --subject-alt-name="${sans}" build-server-full "${master_name}" nopass > /dev/null 2>&1
+ ./easyrsa build-client-full kubelet nopass > /dev/null 2>&1
+ ./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1) || {
+ # If there was an error in the subshell, just die.
+ # TODO(roberthbailey): add better error handling here
+ echo "=== Failed to generate certificates: Aborting ==="
+ exit 2
+ }
+
+mkdir -p "$cert_dir"
+
+cp -p pki/ca.crt "${cert_dir}/ca.crt"
+cp -p "pki/issued/${master_name}.crt" "${cert_dir}/server.crt" > /dev/null 2>&1
+cp -p "pki/private/${master_name}.key" "${cert_dir}/server.key" > /dev/null 2>&1
+cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
+cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
+cp -p pki/issued/kubelet.crt "${cert_dir}/kubelet.crt"
+cp -p pki/private/kubelet.key "${cert_dir}/kubelet.key"
+
+CERTS=("ca.crt" "server.key" "server.crt" "kubelet.key" "kubelet.crt" "kubecfg.key" "kubecfg.crt")
+for cert in "${CERTS[@]}"; do
+  chgrp "${cert_group}" "${cert_dir}/${cert}"
+  chmod 660 "${cert_dir}/${cert}"
+done
diff --git a/roles/kubernetes/common/meta/main.yml b/roles/kubernetes/common/meta/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..87756afe15709041c425622d9e864b3fa1db17c3
--- /dev/null
+++ b/roles/kubernetes/common/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+  - { role: etcd }
diff --git a/roles/kubernetes/common/tasks/gen_certs.yml b/roles/kubernetes/common/tasks/gen_certs.yml
new file mode 100644
index 0000000000000000000000000000000000000000..edc0897a54d06f37bf68ab562ca331de567d33bc
--- /dev/null
+++ b/roles/kubernetes/common/tasks/gen_certs.yml
@@ -0,0 +1,42 @@
+---
+#- name: Get create ca cert script from Kubernetes
+#  get_url:
+#    url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
+#    dest={{ kube_script_dir }}/make-ca-cert.sh mode=0500
+#    force=yes
+
+- name: certs | install cert generation script
+  copy:
+    src=make-ca-cert.sh
+    dest={{ kube_script_dir }}
+    mode=0500
+  changed_when: false
+
+# FIXME This only generates a cert for one master...
+- name: certs | run cert generation script
+  command:
+    "{{ kube_script_dir }}/make-ca-cert.sh {{ inventory_hostname }}"
+  args:
+    creates: "{{ kube_cert_dir }}/server.crt"
+  environment:
+    MASTER_IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
+    MASTER_NAME: "{{ inventory_hostname }}"
+    DNS_DOMAIN: "{{ dns_domain }}"
+    SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}"
+    CERT_DIR: "{{ kube_cert_dir }}"
+    CERT_GROUP: "{{ kube_cert_group }}"
+
+- name: certs | check certificate permissions
+  file:
+    path={{ item }}
+    group={{ kube_cert_group }}
+    owner=kube
+    mode=0440
+  with_items:
+    - "{{ kube_cert_dir }}/ca.crt"
+    - "{{ kube_cert_dir }}/server.crt"
+    - "{{ kube_cert_dir }}/server.key"
+    - "{{ kube_cert_dir }}/kubecfg.crt"
+    - "{{ kube_cert_dir }}/kubecfg.key"
+    - "{{ kube_cert_dir }}/kubelet.crt"
+    - "{{ kube_cert_dir }}/kubelet.key"
diff --git a/roles/kubernetes/common/tasks/gen_tokens.yml b/roles/kubernetes/common/tasks/gen_tokens.yml
new file mode 100644
index 0000000000000000000000000000000000000000..cf77d43996e19d43c664cc01f70c91cd73bb6674
--- /dev/null
+++ b/roles/kubernetes/common/tasks/gen_tokens.yml
@@ -0,0 +1,30 @@
+---
+- name: tokens | copy the token gen script
+  copy:
+    src=kube-gen-token.sh
+    dest={{ kube_script_dir }}
+    mode=u+x
+
+- name: tokens | generate tokens for master components
+  command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
+  environment:
+    TOKEN_DIR: "{{ kube_token_dir }}"
+  with_nested:
+    - [ "system:controller_manager", "system:scheduler", "system:kubectl", 'system:proxy' ]
+    - "{{ groups['kube-master'][0] }}"
+  register: gentoken
+  changed_when: "'Added' in gentoken.stdout"
+  notify:
+    - restart daemons
+
+- name: tokens | generate tokens for node components
+  command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
+  environment:
+    TOKEN_DIR: "{{ kube_token_dir }}"
+  with_nested:
+    - [ 'system:kubelet', 'system:proxy' ]
+    - "{{ groups['kube-node'] }}"
+  register: gentoken
+  changed_when: "'Added' in gentoken.stdout"
+  notify:
+    - restart daemons
diff --git a/roles/kubernetes/common/tasks/main.yml b/roles/kubernetes/common/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..76d3bbc805ce35bc9476a0342572c16a14c50e8f
--- /dev/null
+++ b/roles/kubernetes/common/tasks/main.yml
@@ -0,0 +1,29 @@
+---
+- name: define alias command for kubectl all
+  lineinfile:
+    dest=/etc/bash.bashrc
+    line="alias kball='{{ bin_dir }}/kubectl --all-namespaces -o wide'"
+    regexp='^alias kball=.*$'
+    state=present
+    insertafter=EOF
+    create=True
+
+- name: create kubernetes config directory
+  file: path={{ kube_config_dir }} state=directory
+
+- name: create kubernetes script directory
+  file: path={{ kube_script_dir }} state=directory
+
+- name: Make sure manifest directory exists
+  file: path={{ kube_manifest_dir }} state=directory
+
+- name: write the global config file
+  template:
+    src: config.j2
+    dest: "{{ kube_config_dir }}/config"
+  notify:
+    - restart daemons
+
+- include: secrets.yml
+  tags:
+    - secrets
diff --git a/roles/kubernetes/common/tasks/secrets.yml b/roles/kubernetes/common/tasks/secrets.yml
new file mode 100644
index 0000000000000000000000000000000000000000..65107da0b277c40444cf8412ed29b17363311c34
--- /dev/null
+++ b/roles/kubernetes/common/tasks/secrets.yml
@@ -0,0 +1,50 @@
+---
+- name: certs | create system kube-cert groups
+  group: name={{ kube_cert_group }} state=present system=yes
+
+- name: create system kube user
+  user:
+    name=kube
+    comment="Kubernetes user"
+    shell=/sbin/nologin
+    state=present
+    system=yes
+    groups={{ kube_cert_group }}
+
+- name: certs | make sure the certificate directory exits
+  file:
+    path={{ kube_cert_dir }}
+    state=directory
+    mode=o-rwx
+    group={{ kube_cert_group }}
+
+- name: tokens | make sure the tokens directory exits
+  file:
+    path={{ kube_token_dir }}
+    state=directory
+    mode=o-rwx
+    group={{ kube_cert_group }}
+
+- include: gen_certs.yml
+  run_once: true
+  when: inventory_hostname == groups['kube-master'][0]
+
+- name: Read back the CA certificate
+  slurp:
+    src: "{{ kube_cert_dir }}/ca.crt"
+  register: ca_cert
+  run_once: true
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: certs | register the CA certificate as a fact for later use
+  set_fact:
+    kube_ca_cert: "{{ ca_cert.content|b64decode }}"
+
+- name: certs | write CA certificate everywhere
+  copy: content="{{ kube_ca_cert }}" dest="{{ kube_cert_dir }}/ca.crt"
+  notify:
+    - restart daemons
+
+- include: gen_tokens.yml
+  run_once: true
+  when: inventory_hostname == groups['kube-master'][0]
diff --git a/roles/kubernetes/common/templates/config.j2 b/roles/kubernetes/common/templates/config.j2
new file mode 100644
index 0000000000000000000000000000000000000000..526160a7bd3d096c5a77b1251e13ab33b4c9ed97
--- /dev/null
+++ b/roles/kubernetes/common/templates/config.j2
@@ -0,0 +1,26 @@
+###
+# kubernetes system config
+#
+# The following values are used to configure various aspects of all
+# kubernetes services, including
+#
+#   kube-apiserver.service
+#   kube-controller-manager.service
+#   kube-scheduler.service
+#   kubelet.service
+#   kube-proxy.service
+
+# Comma separated list of nodes in the etcd cluster
+# KUBE_ETCD_SERVERS="--etcd_servers="
+
+# logging to stderr means we get it in the systemd journal
+KUBE_LOGTOSTDERR="--logtostderr=true"
+
+# journal message level, 0 is debug
+KUBE_LOG_LEVEL="--v=5"
+
+# Should this cluster be allowed to run privileged docker containers
+KUBE_ALLOW_PRIV="--allow_privileged=true"
+
+# How the replication controller, scheduler, and proxy
+KUBE_MASTER="--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}"
diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..90cd7d5e88a038c2b04b331e348aa4b87996d8da
--- /dev/null
+++ b/roles/kubernetes/master/handlers/main.yml
@@ -0,0 +1,32 @@
+---
+- name: restart daemons
+  command: /bin/true
+  notify:
+    - reload systemd
+    - restart apiserver
+    - restart controller-manager
+    - restart scheduler
+    - restart proxy
+
+- name: reload systemd
+  command: systemctl daemon-reload
+
+- name: restart apiserver
+  service:
+    name: kube-apiserver
+    state: restarted
+
+- name: restart controller-manager
+  service:
+    name: kube-controller-manager
+    state: restarted
+
+- name: restart scheduler
+  service:
+    name: kube-scheduler
+    state: restarted
+
+- name: restart proxy
+  service:
+    name: kube-proxy
+    state: restarted
diff --git a/roles/kubernetes/master/meta/main.yml b/roles/kubernetes/master/meta/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..31675692c658f3e11bc3b05d06503744c6a47cd1
--- /dev/null
+++ b/roles/kubernetes/master/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+  - { role: kubernetes/common }
diff --git a/roles/kubernetes/master/tasks/config.yml b/roles/kubernetes/master/tasks/config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..70c0f2d3fe542341476983903f152ef1eb1bbe9e
--- /dev/null
+++ b/roles/kubernetes/master/tasks/config.yml
@@ -0,0 +1,87 @@
+---
+- name: get the node token values from token files
+  slurp:
+    src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
+  with_items:
+    - "system:controller_manager"
+    - "system:scheduler"
+    - "system:kubectl"
+    - "system:proxy"
+  register: tokens
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Set token facts
+  set_fact:
+    controller_manager_token: "{{ tokens.results[0].content|b64decode }}"
+    scheduler_token: "{{ tokens.results[1].content|b64decode }}"
+    kubectl_token: "{{ tokens.results[2].content|b64decode }}"
+    proxy_token: "{{ tokens.results[3].content|b64decode }}"
+
+- name: write the config files for api server
+  template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver
+  notify:
+    - restart daemons
+
+- name: write config file for controller-manager
+  template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager
+  notify:
+    - restart controller-manager
+
+- name: write the kubecfg (auth) file for controller-manager
+  template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig
+  notify:
+    - restart controller-manager
+
+- name: write the config file for scheduler
+  template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler
+  notify:
+    - restart scheduler
+
+- name: write the kubecfg (auth) file for scheduler
+  template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig
+  notify:
+    - restart scheduler
+
+- name: write the kubecfg (auth) file for kubectl
+  template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig
+
+- name: write the config files for proxy
+  template: src=proxy.j2 dest={{ kube_config_dir }}/proxy
+  notify:
+    - restart daemons
+
+- name: write the kubecfg (auth) file for proxy
+  template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
+
+- name: populate users for basic auth in API
+  lineinfile:
+    dest: "{{ kube_users_dir }}/known_users.csv"
+    create: yes
+    line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
+  with_dict: "{{ kube_users }}"
+  notify:
+    - restart apiserver
+
+- name: Enable apiserver
+  service:
+    name: kube-apiserver
+    enabled: yes
+    state: started
+
+- name: Enable controller-manager
+  service:
+    name: kube-controller-manager
+    enabled: yes
+    state: started
+
+- name: Enable scheduler
+  service:
+    name: kube-scheduler
+    enabled: yes
+    state: started
+
+- name: Enable kube-proxy
+  service:
+    name: kube-proxy
+    enabled: yes
+    state: started
diff --git a/roles/kubernetes/master/tasks/install.yml b/roles/kubernetes/master/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..da6c4b7e104993dc85b2fb6a1b932315b2a6485f
--- /dev/null
+++ b/roles/kubernetes/master/tasks/install.yml
@@ -0,0 +1,34 @@
+---
+- name: Write kube-apiserver systemd init file
+  template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service
+  notify: restart daemons
+
+- name: Write kube-controller-manager systemd init file
+  template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service
+  notify: restart daemons
+
+- name: Write kube-scheduler systemd init file
+  template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service
+  notify: restart daemons
+
+- name: Write kube-proxy systemd init file
+  template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
+  notify: restart daemons
+
+- name: Install kubernetes binaries
+  copy: 
+     src={{ local_release_dir }}/kubernetes/bin/{{ item }}
+     dest={{ bin_dir }}
+     owner=kube
+     mode=u+x
+  with_items:
+    - kube-apiserver
+    - kube-controller-manager 
+    - kube-scheduler
+    - kube-proxy
+    - kubectl
+  notify:
+    - restart daemons
+
+- name: Allow apiserver to bind on both secure and insecure ports
+  shell: setcap cap_net_bind_service+ep {{ bin_dir }}/kube-apiserver
diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8570db68c389b54cc241c45737f459cb88b8ee7d
--- /dev/null
+++ b/roles/kubernetes/master/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- include: install.yml
+- include: config.yml
diff --git a/roles/kubernetes/master/templates/apiserver.j2 b/roles/kubernetes/master/templates/apiserver.j2
new file mode 100644
index 0000000000000000000000000000000000000000..3ec15970c39f345ff26b482a6f5398e1bc215482
--- /dev/null
+++ b/roles/kubernetes/master/templates/apiserver.j2
@@ -0,0 +1,25 @@
+###
+# kubernetes system config
+#
+# The following values are used to configure the kube-apiserver
+#
+
+# The address on the local server to listen to.
+KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
+
+# The port on the local server to listen on.
+KUBE_API_PORT="--insecure-port=8080 --secure-port={{ kube_master_port }}"
+
+# KUBELET_PORT="--kubelet_port=10250"
+
+# Address range to use for services
+KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
+
+# Location of the etcd cluster
+KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
+
+# default admission control policies
+KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
+
+# Add you own!
+KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt"
diff --git a/roles/kubernetes/master/templates/controller-manager.j2 b/roles/kubernetes/master/templates/controller-manager.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c7a932900f55ba44c46a823f8ce9a9b858e7c684
--- /dev/null
+++ b/roles/kubernetes/master/templates/controller-manager.j2
@@ -0,0 +1,6 @@
+###
+# The following values are used to configure the kubernetes controller-manager
+
+# defaults from config and apiserver should be adequate
+
+KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig --service_account_private_key_file={{ kube_cert_dir }}/server.key --root_ca_file={{ kube_cert_dir }}/ca.crt"
diff --git a/roles/kubernetes/master/templates/controller-manager.kubeconfig.j2 b/roles/kubernetes/master/templates/controller-manager.kubeconfig.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c71ac50f367e88da7cc6ced851a39a6b1e875561
--- /dev/null
+++ b/roles/kubernetes/master/templates/controller-manager.kubeconfig.j2
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Config
+current-context: controller-manager-to-{{ cluster_name }}
+preferences: {}
+clusters:
+- cluster:
+    certificate-authority: {{ kube_cert_dir }}/ca.crt
+    server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
+  name: {{ cluster_name }}
+contexts:
+- context:
+    cluster: {{ cluster_name }}
+    user: controller-manager
+  name: controller-manager-to-{{ cluster_name }}
+users:
+- name: controller-manager
+  user:
+    token: {{ controller_manager_token }}
diff --git a/roles/kubernetes/master/templates/kubectl.kubeconfig.j2 b/roles/kubernetes/master/templates/kubectl.kubeconfig.j2
new file mode 100644
index 0000000000000000000000000000000000000000..dd8f0eabe10d4cfc111009c433855e5814748f7b
--- /dev/null
+++ b/roles/kubernetes/master/templates/kubectl.kubeconfig.j2
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Config
+current-context: kubectl-to-{{ cluster_name }}
+preferences: {}
+clusters:
+- cluster:
+    certificate-authority-data: {{ kube_ca_cert|b64encode }}
+    server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
+  name: {{ cluster_name }}
+contexts:
+- context:
+    cluster: {{ cluster_name }}
+    user: kubectl
+  name: kubectl-to-{{ cluster_name }}
+users:
+- name: kubectl
+  user:
+    token: {{ kubectl_token }}
diff --git a/roles/kubernetes/master/templates/proxy.j2 b/roles/kubernetes/master/templates/proxy.j2
new file mode 100644
index 0000000000000000000000000000000000000000..1a1f7b19d9a884a53323df12480e8f9c122c0861
--- /dev/null
+++ b/roles/kubernetes/master/templates/proxy.j2
@@ -0,0 +1,7 @@
+###
+# kubernetes proxy config
+
+# default config should be adequate
+
+# Add your own!
+KUBE_PROXY_ARGS="--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig"
diff --git a/roles/kubernetes/master/templates/proxy.kubeconfig.j2 b/roles/kubernetes/master/templates/proxy.kubeconfig.j2
new file mode 100644
index 0000000000000000000000000000000000000000..3618c4b7c6ea24c702bfae5ccbaac840304b827e
--- /dev/null
+++ b/roles/kubernetes/master/templates/proxy.kubeconfig.j2
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Config
+current-context: proxy-to-{{ cluster_name }}
+preferences: {}
+contexts:
+- context:
+    cluster: {{ cluster_name }}
+    user: proxy
+  name: proxy-to-{{ cluster_name }}
+clusters:
+- cluster:
+    certificate-authority: {{ kube_cert_dir }}/ca.crt
+    server: http://{{ groups['kube-master'][0] }}:8080
+  name: {{ cluster_name }}
+users:
+- name: proxy
+  user:
+    token: {{ proxy_token }}
diff --git a/roles/kubernetes/master/templates/scheduler.j2 b/roles/kubernetes/master/templates/scheduler.j2
new file mode 100644
index 0000000000000000000000000000000000000000..8af898d0bde32acb77624ee58329cd240683218a
--- /dev/null
+++ b/roles/kubernetes/master/templates/scheduler.j2
@@ -0,0 +1,7 @@
+###
+# kubernetes scheduler config
+
+# default config should be adequate
+
+# Add your own!
+KUBE_SCHEDULER_ARGS="--kubeconfig={{ kube_config_dir }}/scheduler.kubeconfig"
diff --git a/roles/kubernetes/master/templates/scheduler.kubeconfig.j2 b/roles/kubernetes/master/templates/scheduler.kubeconfig.j2
new file mode 100644
index 0000000000000000000000000000000000000000..bc6203745eaef96833f9645e561ae7ab914fd4f2
--- /dev/null
+++ b/roles/kubernetes/master/templates/scheduler.kubeconfig.j2
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Config
+current-context: scheduler-to-{{ cluster_name }}
+preferences: {}
+clusters:
+- cluster:
+    certificate-authority: {{ kube_cert_dir }}/ca.crt
+    server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
+  name: {{ cluster_name }}
+contexts:
+- context:
+    cluster: {{ cluster_name }}
+    user: scheduler
+  name: scheduler-to-{{ cluster_name }}
+users:
+- name: scheduler
+  user:
+    token: {{ scheduler_token }}
diff --git a/roles/kubernetes/master/templates/systemd-init/kube-apiserver.service.j2 b/roles/kubernetes/master/templates/systemd-init/kube-apiserver.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..1c478c41fe591c1905060fcb7d4ea66b5c32b125
--- /dev/null
+++ b/roles/kubernetes/master/templates/systemd-init/kube-apiserver.service.j2
@@ -0,0 +1,28 @@
+[Unit]
+Description=Kubernetes API Server
+Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+Requires=etcd2.service
+After=etcd2.service
+
+[Service]
+EnvironmentFile=/etc/network-environment
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/kubernetes/apiserver
+User=kube
+ExecStart={{ bin_dir }}/kube-apiserver \
+	    $KUBE_LOGTOSTDERR \
+	    $KUBE_LOG_LEVEL \
+	    $KUBE_ETCD_SERVERS \
+	    $KUBE_API_ADDRESS \
+	    $KUBE_API_PORT \
+	    $KUBELET_PORT \
+	    $KUBE_ALLOW_PRIV \
+	    $KUBE_SERVICE_ADDRESSES \
+	    $KUBE_ADMISSION_CONTROL \
+	    $KUBE_API_ARGS
+Restart=on-failure
+Type=notify
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/kubernetes/master/templates/systemd-init/kube-controller-manager.service.j2 b/roles/kubernetes/master/templates/systemd-init/kube-controller-manager.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..a308630eb7bd6eeb0b9777424a42b9657464e076
--- /dev/null
+++ b/roles/kubernetes/master/templates/systemd-init/kube-controller-manager.service.j2
@@ -0,0 +1,20 @@
+[Unit]
+Description=Kubernetes Controller Manager
+Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+Requires=etcd2.service
+After=etcd2.service
+
+[Service]
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/kubernetes/controller-manager
+User=kube
+ExecStart={{ bin_dir }}/kube-controller-manager \
+	    $KUBE_LOGTOSTDERR \
+	    $KUBE_LOG_LEVEL \
+	    $KUBE_MASTER \
+	    $KUBE_CONTROLLER_MANAGER_ARGS
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/kubernetes/master/templates/systemd-init/kube-proxy.service.j2 b/roles/kubernetes/master/templates/systemd-init/kube-proxy.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..55e3e6195bc3ce508e4abed2e958c12df63f6969
--- /dev/null
+++ b/roles/kubernetes/master/templates/systemd-init/kube-proxy.service.j2
@@ -0,0 +1,21 @@
+[Unit]
+Description=Kubernetes Kube-Proxy Server
+Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+{% if overlay_network_plugin|default('') %}
+After=docker.service calico-node.service
+{% else %}
+After=docker.service
+{% endif %}
+
+[Service]
+EnvironmentFile=/etc/network-environment
+ExecStart={{ bin_dir }}/kube-proxy \
+	    $KUBE_LOGTOSTDERR \
+	    $KUBE_LOG_LEVEL \
+	    $KUBE_MASTER \
+	    $KUBE_PROXY_ARGS
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/kubernetes/master/templates/systemd-init/kube-scheduler.service.j2 b/roles/kubernetes/master/templates/systemd-init/kube-scheduler.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c5d93111f149e93ff4b61cca2bc9da4c52a313fd
--- /dev/null
+++ b/roles/kubernetes/master/templates/systemd-init/kube-scheduler.service.j2
@@ -0,0 +1,20 @@
+[Unit]
+Description=Kubernetes Scheduler Plugin
+Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+Requires=etcd2.service
+After=etcd2.service
+
+[Service]
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/kubernetes/scheduler
+User=kube
+ExecStart={{ bin_dir }}/kube-scheduler \
+	    $KUBE_LOGTOSTDERR \
+	    $KUBE_LOG_LEVEL \
+	    $KUBE_MASTER \
+	    $KUBE_SCHEDULER_ARGS
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/kubernetes/node/files/fluentd-es.yaml b/roles/kubernetes/node/files/fluentd-es.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..caf90526c26a32d93b2f3570538900d8bd528136
--- /dev/null
+++ b/roles/kubernetes/node/files/fluentd-es.yaml
@@ -0,0 +1,29 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: fluentd-elasticsearch
+  namespace: kube-system
+spec:
+  containers:
+  - name: fluentd-elasticsearch
+    image: gcr.io/google_containers/fluentd-elasticsearch:1.11
+    resources:
+      limits:
+        cpu: 100m
+    args:
+    - -qq
+    volumeMounts:
+    - name: varlog
+      mountPath: /var/log
+    - name: varlibdockercontainers
+      mountPath: /var/lib/docker/containers
+      readOnly: true
+  terminationGracePeriodSeconds: 30
+  volumes:
+  - name: varlog
+    hostPath:
+      path: /var/log
+  - name: varlibdockercontainers
+    hostPath:
+      path: /var/lib/docker/containers
+
diff --git a/roles/kubernetes/node/handlers/main.yml b/roles/kubernetes/node/handlers/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b2327a3460ce7faf1f4b5c27b5f712ef480ae355
--- /dev/null
+++ b/roles/kubernetes/node/handlers/main.yml
@@ -0,0 +1,19 @@
+---
+- name: restart daemons
+  command: /bin/true
+  notify:
+    - restart kubelet
+    - restart proxy
+
+- name: restart kubelet
+  service:
+    name: kubelet
+    state: restarted
+
+- name: restart proxy
+  service:
+    name: kube-proxy
+    state: restarted
+
+- name: reload systemd
+  command: systemctl daemon-reload
diff --git a/roles/kubernetes/node/meta/main.yml b/roles/kubernetes/node/meta/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..31675692c658f3e11bc3b05d06503744c6a47cd1
--- /dev/null
+++ b/roles/kubernetes/node/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+  - { role: kubernetes/common }
diff --git a/roles/kubernetes/node/tasks/config.yml b/roles/kubernetes/node/tasks/config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..91658329da805bd701f8177ee727dc33717e0f31
--- /dev/null
+++ b/roles/kubernetes/node/tasks/config.yml
@@ -0,0 +1,61 @@
+---
+- name: Get the node token values
+  slurp:
+    src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
+  with_items:
+    - "system:kubelet"
+    - "system:proxy"
+  register: tokens
+  run_once: true
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Set token facts
+  set_fact:
+    kubelet_token: "{{ tokens.results[0].content|b64decode }}"
+    proxy_token: "{{ tokens.results[1].content|b64decode }}"
+
+- name: Create kubelet environment vars dir
+  file: path=/etc/systemd/system/kubelet.service.d state=directory
+
+- name: Write kubelet config file
+  template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf
+  notify:
+    - reload systemd
+    - restart kubelet
+
+- name: write the kubecfg (auth) file for kubelet
+  template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig
+  notify:
+    - restart kubelet
+
+- name: Create proxy environment vars dir
+  file: path=/etc/systemd/system/kube-proxy.service.d state=directory
+
+- name: Write proxy config file
+  template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf
+  notify:
+    - reload systemd
+    - restart proxy
+
+- name: write the kubecfg (auth) file for kube-proxy
+  template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
+  notify:
+    - restart proxy
+
+- name: Enable kubelet
+  service:
+    name: kubelet
+    enabled: yes
+    state: started
+
+- name: Enable proxy
+  service:
+    name: kube-proxy
+    enabled: yes
+    state: started
+
+- name: addons | Logging | Create Fluentd pod
+  copy:
+    src: fluentd-es.yaml
+    dest: "{{ kube_manifest_dir }}/fluentd-es.yaml"
+  when: enable_logging
diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..eaacca05905df7faf2448981760f02bcdc5f86d0
--- /dev/null
+++ b/roles/kubernetes/node/tasks/install.yml
@@ -0,0 +1,20 @@
+---
+- name: Write kube-proxy systemd init file
+  template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
+  notify: restart daemons
+
+- name: Write kubelet systemd init file
+  template: src=systemd-init/kubelet.service.j2 dest=/etc/systemd/system/kubelet.service
+  notify: restart daemons
+
+- name: Install kubernetes binaries
+  copy: 
+     src={{ local_release_dir }}/kubernetes/bin/{{ item }}
+     dest={{ bin_dir }}
+     owner=kube
+     mode=u+x
+  with_items:
+    - kube-proxy
+    - kubelet
+  notify:
+    - restart daemons
diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e0efbaf73cf7676ab3a1b4e1ba5b67d37c050ab7
--- /dev/null
+++ b/roles/kubernetes/node/tasks/main.yml
@@ -0,0 +1,4 @@
+---
+- include: install.yml
+- include: config.yml
+- include: temp_workaround.yml
diff --git a/roles/kubernetes/node/tasks/temp_workaround.yml b/roles/kubernetes/node/tasks/temp_workaround.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8dcefe5e8733462f25fa990d930c14ed7c48f449
--- /dev/null
+++ b/roles/kubernetes/node/tasks/temp_workaround.yml
@@ -0,0 +1,5 @@
+- name: Warning Temporary workaround !!! Disable kubelet and kube-proxy on node startup
+  service: name={{ item }} enabled=no
+  with_items:
+    - kubelet
+    - kube-proxy
diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2
new file mode 100644
index 0000000000000000000000000000000000000000..418f6a3c46d522d0eaa7b9f03686b235af90cc76
--- /dev/null
+++ b/roles/kubernetes/node/templates/kubelet.j2
@@ -0,0 +1,21 @@
+[Service]
+Environment="KUBE_LOGTOSTDERR=--logtostderr=true"
+Environment="KUBE_LOG_LEVEL=--v=0"
+Environment="KUBE_ALLOW_PRIV=--allow_privileged=true"
+Environment="KUBE_MASTER=--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}"
+# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
+Environment="KUBELET_ADDRESS=--address=0.0.0.0"
+# The port for the info server to serve on
+# Environment="KUBELET_PORT=--port=10250"
+# You may leave this blank to use the actual hostname
+Environment="KUBELET_HOSTNAME=--hostname_override={{ inventory_hostname }}"
+# location of the api-server
+Environment="KUBELET_API_SERVER=--api_servers=https://{{ groups['kube-master'][0]}}:{{ kube_master_port }}"
+{% if dns_setup %}
+Environment="KUBELET_ARGS=--cluster_dns={{ kube_dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
+{% else %}
+Environment="KUBELET_ARGS=--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
+{% endif %}
+{% if overlay_network_plugin|default('') %}
+Environment="KUBELET_NETWORK_PLUGIN=--network_plugin={{ overlay_network_plugin }}"
+{% endif %}
diff --git a/roles/kubernetes/node/templates/kubelet.kubeconfig.j2 b/roles/kubernetes/node/templates/kubelet.kubeconfig.j2
new file mode 100644
index 0000000000000000000000000000000000000000..79cb17bd5338f78f1f6ba144ae17419ad17cf9a1
--- /dev/null
+++ b/roles/kubernetes/node/templates/kubelet.kubeconfig.j2
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Config
+current-context: kubelet-to-{{ cluster_name }}
+preferences: {}
+clusters:
+- cluster:
+    certificate-authority: {{ kube_cert_dir }}/ca.crt
+    server: https://{{ groups['kube-master'][0] }}:443
+  name: {{ cluster_name }}
+contexts:
+- context:
+    cluster: {{ cluster_name }}
+    user: kubelet
+  name: kubelet-to-{{ cluster_name }}
+users:
+- name: kubelet
+  user:
+    token: {{ kubelet_token }}
diff --git a/roles/kubernetes/node/templates/proxy.j2 b/roles/kubernetes/node/templates/proxy.j2
new file mode 100644
index 0000000000000000000000000000000000000000..7c8305a5f82cd2854633ec47add587d2c8ba6daf
--- /dev/null
+++ b/roles/kubernetes/node/templates/proxy.j2
@@ -0,0 +1,6 @@
+###
+# kubernetes proxy config
+
+# default config should be adequate
+[Service]
+Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig"
diff --git a/roles/kubernetes/node/templates/proxy.kubeconfig.j2 b/roles/kubernetes/node/templates/proxy.kubeconfig.j2
new file mode 100644
index 0000000000000000000000000000000000000000..78d181631e5bfa99aa89f96d75a9804c36dea6d2
--- /dev/null
+++ b/roles/kubernetes/node/templates/proxy.kubeconfig.j2
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Config
+current-context: proxy-to-{{ cluster_name }}
+preferences: {}
+contexts:
+- context:
+    cluster: {{ cluster_name }}
+    user: proxy
+  name: proxy-to-{{ cluster_name }}
+clusters:
+- cluster:
+    certificate-authority: {{ kube_cert_dir }}/ca.crt
+    server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
+  name: {{ cluster_name }}
+users:
+- name: proxy
+  user:
+    token: {{ proxy_token }}
diff --git a/roles/kubernetes/node/templates/systemd-init/kube-proxy.service.j2 b/roles/kubernetes/node/templates/systemd-init/kube-proxy.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..55e3e6195bc3ce508e4abed2e958c12df63f6969
--- /dev/null
+++ b/roles/kubernetes/node/templates/systemd-init/kube-proxy.service.j2
@@ -0,0 +1,21 @@
+[Unit]
+Description=Kubernetes Kube-Proxy Server
+Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+{% if overlay_network_plugin|default('') %}
+After=docker.service calico-node.service
+{% else %}
+After=docker.service
+{% endif %}
+
+[Service]
+EnvironmentFile=/etc/network-environment
+ExecStart={{ bin_dir }}/kube-proxy \
+	    $KUBE_LOGTOSTDERR \
+	    $KUBE_LOG_LEVEL \
+	    $KUBE_MASTER \
+	    $KUBE_PROXY_ARGS
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/kubernetes/node/templates/systemd-init/kubelet.service.j2 b/roles/kubernetes/node/templates/systemd-init/kubelet.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..8fb5fc89a0f3d54d3354c540bcdf87e030a0768d
--- /dev/null
+++ b/roles/kubernetes/node/templates/systemd-init/kubelet.service.j2
@@ -0,0 +1,26 @@
+[Unit]
+Description=Kubernetes Kubelet Server
+Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+{% if overlay_network_plugin|default('') %}
+After=docker.service calico-node.service
+{% else %}
+After=docker.service
+{% endif %}
+
+[Service]
+#WorkingDirectory=/var/lib/kubelet
+EnvironmentFile=/etc/network-environment
+ExecStart={{ bin_dir }}/kubelet \
+	    $KUBE_LOGTOSTDERR \
+	    $KUBE_LOG_LEVEL \
+	    $KUBELET_API_SERVER \
+	    $KUBELET_ADDRESS \
+	    $KUBELET_PORT \
+	    $KUBELET_HOSTNAME \
+	    $KUBE_ALLOW_PRIV \
+	    $KUBELET_ARGS \
+	    $KUBELET_NETWORK_PLUGIN
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/overlay_network/handlers/main.yml b/roles/overlay_network/handlers/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b875863e3c3e0b202bc9ba08bdd2478368a67646
--- /dev/null
+++ b/roles/overlay_network/handlers/main.yml
@@ -0,0 +1,28 @@
+---
+- name: restart calico-node
+  service: name=calico-node state=restarted
+
+- name: restart docker
+  service: name=docker state=restarted
+
+- name: restart flannel
+  service: name=flannel state=restarted
+  notify:
+    - reload systemd
+    - stop docker
+    - delete docker0
+    - start docker
+  when: inventory_hostname in groups['kube-node']
+
+- name: stop docker
+  service: name=docker state=stopped
+
+- name: delete docker0
+  command: ip link delete docker0
+  ignore_errors: yes
+
+- name: start docker
+  service: name=docker state=started
+
+- name : reload systemd
+  shell: systemctl daemon-reload
diff --git a/roles/overlay_network/tasks/calico.yml b/roles/overlay_network/tasks/calico.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2b5e3d0402bc41a41bfe6c6a12f808f56f3d96fc
--- /dev/null
+++ b/roles/overlay_network/tasks/calico.yml
@@ -0,0 +1,37 @@
+---
+- name: Install calicoctl bin
+  copy: 
+     src={{ local_release_dir }}/calico/bin/calicoctl
+     dest={{ bin_dir }}
+     mode=u+x
+  notify: restart calico-node
+
+- name: Create calicoctl symlink (needed by kubelet)
+  file: src=/usr/local/bin/calicoctl dest=/usr/bin/calicoctl state=link
+
+- name: Write calico-node systemd init file
+  template: src=calico/calico-node.service.j2 dest=/etc/systemd/system/calico-node.service
+  notify: 
+    - reload systemd
+    - restart calico-node
+
+- name: Write network-environment
+  template: src=calico/network-environment.j2 dest=/etc/network-environment mode=u+x
+  notify: 
+    - reload systemd
+    - restart calico-node
+
+- name: Enable calico-node
+  service: name=calico-node enabled=yes state=started
+
+- name: Configure calico-node remove default pool
+  shell: calicoctl pool remove 192.168.0.0/16
+  environment: 
+     ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
+  run_once: true
+
+- name: Configure calico-node desired pool
+  shell: calicoctl pool add {{ overlay_network_subnet }}
+  environment: 
+     ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
+  run_once: true
diff --git a/roles/overlay_network/tasks/flannel.yml b/roles/overlay_network/tasks/flannel.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fc06c55ce117e216c2415313b85f4c960c8ff214
--- /dev/null
+++ b/roles/overlay_network/tasks/flannel.yml
@@ -0,0 +1,57 @@
+---
+- name: Create flannel user
+  user: name=flannel shell=/bin/nologin
+
+- name: Install flannel binaries
+  copy: 
+     src={{ local_release_dir }}/flannel/bin/flanneld
+     dest={{ bin_dir }}
+     owner=flannel
+     mode=u+x
+  notify:
+    - restart flannel
+
+- name: Write flannel.service systemd file
+  template:
+    src: flannel/systemd-flannel.service.j2
+    dest: /etc/systemd/system/flannel.service
+  notify: restart flannel
+
+- name: Write docker.service systemd file
+  template:
+    src: flannel/systemd-docker.service.j2
+    dest: /lib/systemd/system/docker.service
+  notify: restart docker
+
+- name: Set fact for ectcd command conf file location
+  set_fact:
+    conf_file: "/tmp/flannel-conf.json"
+  run_once: true
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Create flannel config file to go in etcd
+  template: src=flannel/flannel-conf.json.j2 dest={{ conf_file }}
+  run_once: true
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Flannel configuration into etcd
+  shell: "{{ bin_dir }}/etcdctl set /{{ cluster_name }}/network/config < {{ conf_file }}"
+  delegate_to: "{{ groups['kube-master'][0] }}"
+  notify: restart flannel
+
+- name: Clean up the flannel config file
+  file: path=/tmp/flannel-config.json state=absent
+  run_once: true
+  delegate_to: "{{ groups['kube-master'][0] }}"
+
+- name: Write network-environment
+  template: src=flannel/network-environment.j2 dest=/etc/network-environment mode=u+x
+  notify: restart flannel
+
+- name: Launch Flannel
+  service: name=flannel state=started enabled=yes
+  notify:
+    - restart flannel
+
+- name: Enable Docker
+  service: name=docker enabled=yes state=started
diff --git a/roles/overlay_network/tasks/main.yml b/roles/overlay_network/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..47a5d8b306586538e09fe57a70310e01619e12bf
--- /dev/null
+++ b/roles/overlay_network/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- name: "Test if overlay network is defined"
+  fail: msg="ERROR, One overlay_network variable must be defined (Flannel or Calico)"
+  when: ( overlay_network_plugin is defined and overlay_network_plugin == "calico" and overlay_network_plugin == "flannel" ) or
+        overlay_network_plugin is not defined 
+
+- include: flannel.yml
+  when: overlay_network_plugin == "flannel"
+- include: calico.yml
+  when: overlay_network_plugin == "calico"
+
+- meta: flush_handlers
+
diff --git a/roles/overlay_network/templates/calico/calico-node.service.j2 b/roles/overlay_network/templates/calico/calico-node.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..4f51407b0ea13ed00ac8fd3da219e1d9e098e9e1
--- /dev/null
+++ b/roles/overlay_network/templates/calico/calico-node.service.j2
@@ -0,0 +1,19 @@
+[Unit]
+Description=calicoctl node
+After=etcd2.service
+
+[Service]
+EnvironmentFile=/etc/network-environment
+User=root
+PermissionsStartOnly=true
+ExecStartPre={{ bin_dir }}/calicoctl checksystem --fix
+{% if inventory_hostname in groups['kube-node'] %}
+ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --kubernetes
+{% else %}
+ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4}
+{% endif %}
+RemainAfterExit=yes
+Type=oneshot
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/overlay_network/templates/calico/network-environment.j2 b/roles/overlay_network/templates/calico/network-environment.j2
new file mode 100755
index 0000000000000000000000000000000000000000..d03013f1a70d9c7ff189469a493f08a0919d7d1f
--- /dev/null
+++ b/roles/overlay_network/templates/calico/network-environment.j2
@@ -0,0 +1,19 @@
+#! /usr/bin/bash
+# This node's IPv4 address
+CALICO_IPAM=true
+DEFAULT_IPV4={{ ansible_default_ipv4.address }}
+
+{% if inventory_hostname in groups['kube-node'] %}
+# The kubernetes master IP
+KUBERNETES_MASTER={{ groups['kube-master'][0] }}
+
+# Location of etcd cluster used by Calico.  By default, this uses the etcd
+# instance running on the Kubernetes Master
+ETCD_AUTHORITY={{ groups['kube-master'][0] }}:4001
+
+# The kubernetes-apiserver location - used by the calico plugin
+KUBE_API_ROOT=http://{{ groups['kube-master'][0] }}:8080/api/v1/
+
+# Location of the calicoctl binary - used by the calico plugin
+CALICOCTL_PATH="{{ bin_dir }}/calicoctl"
+{% endif %}
diff --git a/roles/overlay_network/templates/flannel/flannel-conf.json.j2 b/roles/overlay_network/templates/flannel/flannel-conf.json.j2
new file mode 100644
index 0000000000000000000000000000000000000000..e14c4a94561415813540a423d0000ea8c427308c
--- /dev/null
+++ b/roles/overlay_network/templates/flannel/flannel-conf.json.j2
@@ -0,0 +1 @@
+{ "Network": "{{ kube_service_addresses }}", "SubnetLen": {{ overlay_network_host_prefix }}, "Backend": { "Type": "vxlan" } }
diff --git a/roles/overlay_network/templates/flannel/network-environment.j2 b/roles/overlay_network/templates/flannel/network-environment.j2
new file mode 100644
index 0000000000000000000000000000000000000000..ac0b171d49d64b87277e501b57682d43cfcbe3b8
--- /dev/null
+++ b/roles/overlay_network/templates/flannel/network-environment.j2
@@ -0,0 +1 @@
+FLANNEL_ETCD_PREFIX="--etcd-prefix=/{{ cluster_name }}/network"
diff --git a/roles/overlay_network/templates/flannel/systemd-docker.service.j2 b/roles/overlay_network/templates/flannel/systemd-docker.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c38a25e62fd4fd8ac4738b2bdd321a44e7e6b8c3
--- /dev/null
+++ b/roles/overlay_network/templates/flannel/systemd-docker.service.j2
@@ -0,0 +1,17 @@
+[Unit]
+Description=Docker Application Container Engine
+Documentation=http://docs.docker.com
+After=network.target docker.socket flannel.service
+Requires=docker.socket
+
+[Service]
+EnvironmentFile=/run/flannel/subnet.env
+EnvironmentFile=-/etc/default/docker
+ExecStart=/usr/bin/docker -d -H fd:// --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU} $DOCKER_OPTS
+MountFlags=slave
+LimitNOFILE=1048576
+LimitNPROC=1048576
+LimitCORE=infinity
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/overlay_network/templates/flannel/systemd-flannel.service.j2 b/roles/overlay_network/templates/flannel/systemd-flannel.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..38ac1c40a6166988bdd1e9324e684473b9649760
--- /dev/null
+++ b/roles/overlay_network/templates/flannel/systemd-flannel.service.j2
@@ -0,0 +1,12 @@
+[Unit]
+Description=Flannel Network Overlay
+Documentation=https://coreos.com/flannel/docs/latest
+
+[Service]
+EnvironmentFile=/etc/network-environment
+ExecStart={{ bin_dir }}/flanneld \
+       $FLANNEL_ETCD_PREFIX
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target