Skip to content
Snippets Groups Projects
Commit a3f892c7 authored by Pablo Moreno's avatar Pablo Moreno Committed by GitHub
Browse files

Merge pull request #1 from kubespray/master

stays up to date with main project
parents 192136df 7fe255e5
No related branches found
No related tags found
No related merge requests found
Showing
with 654 additions and 28 deletions
[Unit]
Description=etcd-proxy docker wrapper
Wants=docker.socket
After=docker.service
[Service]
User=root
PermissionsStartOnly=true
ExecStart={{ docker_bin_dir | default("/usr/bin") }}/docker run --restart=always \
--env-file=/etc/etcd-proxy.env \
{# TODO(mattymo): Allow docker IP binding and disable in envfile
-p 2380:2380 -p 2379:2379 #}
--net=host \
--stop-signal=SIGKILL \
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
--name={{ etcd_proxy_member_name | default("etcd-proxy") }} \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
{% if etcd_after_v3 %}
{{ etcd_container_bin_dir }}etcd
{% endif %}
ExecStartPre=-{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_proxy_member_name | default("etcd-proxy") }}
ExecReload={{ docker_bin_dir | default("/usr/bin") }}/docker restart {{ etcd_proxy_member_name | default("etcd-proxy") }}
ExecStop={{ docker_bin_dir | default("/usr/bin") }}/docker stop {{ etcd_proxy_member_name | default("etcd-proxy") }}
Restart=always
RestartSec=15s
[Install]
WantedBy=multi-user.target
[Unit]
Description=etcd-proxy
After=network.target
[Service]
Type=notify
User=etcd
PermissionsStartOnly=true
EnvironmentFile=/etc/etcd-proxy.env
ExecStart={{ bin_dir }}/etcd
ExecStartPre=/bin/mkdir -p /var/lib/etcd-proxy
ExecStartPre=/bin/chown -R etcd: /var/lib/etcd-proxy
NotifyAccess=all
Restart=always
RestartSec=10s
LimitNOFILE=40000
[Install]
WantedBy=multi-user.target
ETCD_DATA_DIR=/var/lib/etcd-proxy
ETCD_PROXY=on
ETCD_LISTEN_CLIENT_URLS={{ etcd_access_endpoint }}
ETCD_NAME={{ etcd_proxy_member_name | default("etcd-proxy") }}
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
ETCD_DATA_DIR=/var/lib/etcd ETCD_DATA_DIR=/var/lib/etcd
{% if is_etcd_master %} ETCD_ADVERTISE_CLIENT_URLS={{ etcd_client_url }}
ETCD_ADVERTISE_CLIENT_URLS=http://{{ hostvars[inventory_hostname]['access_ip'] | default(hostvars[inventory_hostname]['ip'] | default( hostvars[inventory_hostname]['ansible_default_ipv4']['address'])) }}:2379 ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_peer_url }}
ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ hostvars[inventory_hostname]['access_ip'] | default(hostvars[inventory_hostname]['ip'] | default( hostvars[inventory_hostname]['ansible_default_ipv4']['address'])) }}:2380
ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %} ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %}
{% if not is_etcd_proxy %}
ETCD_LISTEN_CLIENT_URLS=http://{{ etcd_address }}:2379,http://127.0.0.1:2379
{% else %}
ETCD_LISTEN_CLIENT_URLS=http://{{ etcd_address }}:2379
{% endif %}
ETCD_ELECTION_TIMEOUT=10000 ETCD_ELECTION_TIMEOUT=10000
ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
ETCD_LISTEN_PEER_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}:2380 ETCD_LISTEN_PEER_URLS=http://{{ etcd_address }}:2380
ETCD_NAME={{ etcd_member_name }} ETCD_NAME={{ etcd_member_name }}
{% endif %} ETCD_PROXY=off
ETCD_INITIAL_CLUSTER={% for host in groups['etcd'] %}etcd{{ loop.index|string }}={{ hostvars[host]['etcd_peer_url'] }}{% if not loop.last %},{% endif %}{% endfor %} ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
ETCD_LISTEN_CLIENT_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}:2379,http://127.0.0.1:2379
# Versions
kubedns_version: 1.7
kubednsmasq_version: 1.3
exechealthz_version: 1.1
# Images
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
kubedns_image_tag: "{{ kubedns_version }}"
kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64"
kubednsmasq_image_tag: "{{ kubednsmasq_version }}"
exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
exechealthz_image_tag: "{{ exechealthz_version }}"
\ No newline at end of file
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = """
---
module: kube
short_description: Manage Kubernetes Cluster
description:
- Create, replace, remove, and stop resources within a Kubernetes Cluster
version_added: "2.0"
options:
name:
required: false
default: null
description:
- The name associated with resource
filename:
required: false
default: null
description:
- The path and filename of the resource(s) definition file.
kubectl:
required: false
default: null
description:
- The path to the kubectl bin
namespace:
required: false
default: null
description:
- The namespace associated with the resource(s)
resource:
required: false
default: null
description:
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
label:
required: false
default: null
description:
- The labels used to filter specific resources.
server:
required: false
default: null
description:
- The url for the API server that commands are executed against.
force:
required: false
default: false
description:
- A flag to indicate to force delete, replace, or stop.
all:
required: false
default: false
description:
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
log_level:
required: false
default: 0
description:
- Indicates the level of verbosity of logging by kubectl.
state:
required: false
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
default: present
description:
- present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options,
latest handles creating ore updating based on existence,
reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options.
requirements:
- kubectl
author: "Kenny Jones (@kenjones-cisco)"
"""
EXAMPLES = """
- name: test nginx is present
kube: name=nginx resource=rc state=present
- name: test nginx is stopped
kube: name=nginx resource=rc state=stopped
- name: test nginx is absent
kube: name=nginx resource=rc state=absent
- name: test nginx is present
kube: filename=/tmp/nginx.yml
"""
class KubeManager(object):
def __init__(self, module):
self.module = module
self.kubectl = module.params.get('kubectl')
if self.kubectl is None:
self.kubectl = module.get_bin_path('kubectl', True)
self.base_cmd = [self.kubectl]
if module.params.get('server'):
self.base_cmd.append('--server=' + module.params.get('server'))
if module.params.get('log_level'):
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
if module.params.get('namespace'):
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
self.all = module.params.get('all')
self.force = module.params.get('force')
self.name = module.params.get('name')
self.filename = module.params.get('filename')
self.resource = module.params.get('resource')
self.label = module.params.get('label')
def _execute(self, cmd):
args = self.base_cmd + cmd
try:
rc, out, err = self.module.run_command(args)
if rc != 0:
self.module.fail_json(
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
except Exception as exc:
self.module.fail_json(
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
return out.splitlines()
def _execute_nofail(self, cmd):
args = self.base_cmd + cmd
rc, out, err = self.module.run_command(args)
if rc != 0:
return None
return out.splitlines()
def create(self, check=True):
if check and self.exists():
return []
cmd = ['create']
if not self.filename:
self.module.fail_json(msg='filename required to create')
cmd.append('--filename=' + self.filename)
return self._execute(cmd)
def replace(self):
if not self.force and not self.exists():
return []
cmd = ['replace']
if self.force:
cmd.append('--force')
if not self.filename:
self.module.fail_json(msg='filename required to reload')
cmd.append('--filename=' + self.filename)
return self._execute(cmd)
def delete(self):
if not self.force and not self.exists():
return []
cmd = ['delete']
if self.filename:
cmd.append('--filename=' + self.filename)
else:
if not self.resource:
self.module.fail_json(msg='resource required to delete without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def exists(self):
cmd = ['get']
if not self.resource:
return False
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
cmd.append('--no-headers')
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all-namespaces')
result = self._execute_nofail(cmd)
if not result:
return False
return True
def stop(self):
if not self.force and not self.exists():
return []
cmd = ['stop']
if self.filename:
cmd.append('--filename=' + self.filename)
else:
if not self.resource:
self.module.fail_json(msg='resource required to stop without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
filename=dict(),
namespace=dict(),
resource=dict(),
label=dict(),
server=dict(),
kubectl=dict(),
force=dict(default=False, type='bool'),
all=dict(default=False, type='bool'),
log_level=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
)
)
changed = False
manager = KubeManager(module)
state = module.params.get('state')
if state == 'present':
result = manager.create()
elif state == 'absent':
result = manager.delete()
elif state == 'reloaded':
result = manager.replace()
elif state == 'stopped':
result = manager.stop()
elif state == 'latest':
if manager.exists():
manager.force = True
result = manager.replace()
else:
result = manager.create(check=False)
else:
module.fail_json(msg='Unrecognized state %s.' % state)
if result:
changed = True
module.exit_json(changed=changed,
msg='success: %s' % (' '.join(result))
)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()
- name: Write calico-policy-controller yaml
template: src=calico-policy-controller.yml.j2 dest=/etc/kubernetes/calico-policy-controller.yml
when: inventory_hostname == groups['kube-master'][0]
- name: Start of Calico policy controller
kube:
kubectl: "{{bin_dir}}/kubectl"
filename: /etc/kubernetes/calico-policy-controller.yml
when: inventory_hostname == groups['kube-master'][0]
---
- name: Kubernetes Apps | Lay Down KubeDNS Template
template: src={{item.file}} dest=/etc/kubernetes/{{item.file}}
with_items:
- {file: kubedns-rc.yml, type: rc}
- {file: kubedns-svc.yml, type: svc}
register: manifests
when: inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Start Resources
kube:
name: kubedns
namespace: kube-system
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: /etc/kubernetes/{{item.item.file}}
state: "{{item.changed | ternary('latest','present') }}"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
- include: tasks/calico-policy-controller.yml
when: enable_network_policy is defined and enable_network_policy == True
apiVersion: extensions/v1beta1
kind: ReplicaSet
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
matchLabels:
kubernetes.io/cluster-service: "true"
k8s-app: calico-policy
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
k8s-app: calico-policy
spec:
hostNetwork: true
containers:
- name: calico-policy-controller
image: calico/kube-policy-controller:latest
env:
- name: ETCD_ENDPOINTS
value: "{{ etcd_endpoint }}"
# Location of the Kubernetes API - this shouldn't need to be
# changed so long as it is used in conjunction with
# CONFIGURE_ETC_HOSTS="true".
- name: K8S_API
value: "https://kubernetes.default:443"
# Configure /etc/hosts within the container to resolve
# the kubernetes.default Service to the correct clusterIP
# using the environment provided by the kubelet.
# This removes the need for KubeDNS to resolve the Service.
- name: CONFIGURE_ETC_HOSTS
value: "true"
apiVersion: v1
kind: ReplicationController
metadata:
name: kubedns
namespace: kube-system
labels:
k8s-app: kubedns
version: v19
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kubedns
version: v19
template:
metadata:
labels:
k8s-app: kubedns
version: v19
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: kubedns
image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 30
timeoutSeconds: 5
args:
# command = "/kube-dns"
- --domain={{ dns_domain }}.
- --dns-port=10053
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- name: dnsmasq
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
args:
- --log-facility=-
- --cache-size=1000
- --no-resolv
- --server=127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- name: healthz
image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}"
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 10m
memory: 50Mi
requests:
cpu: 10m
# Note that this container shouldn't really need 50Mi of memory. The
# limits are set higher than expected pending investigation on #29688.
# The extra memory was stolen from the kubedns container to keep the
# net memory requested by the pod constant.
memory: 50Mi
args:
- -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1:10053 >/dev/null
- -port=8080
- -quiet
ports:
- containerPort: 8080
protocol: TCP
dnsPolicy: Default # Don't use cluster DNS.
apiVersion: v1
kind: Service
metadata:
name: kubedns
namespace: kube-system
labels:
k8s-app: kubedns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "kubedns"
spec:
selector:
k8s-app: kubedns
clusterIP: {{ skydns_server }}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
kpm_registry: "https://api.kpm.sh"
kpm_namespace: "default"
kpm_packages: []
\ No newline at end of file
---
- debug: msg="No helm charts"
File moved
- name: Install pip ---
action:
module: "{{ ansible_pkg_mgr }}"
name: "python-pip"
state: latest
when: ansible_os_family != "CoreOS" and kpm_packages | length > 0
- name: install kpm - name: install kpm
pip: pip:
name: "kpm" name: "kpm"
state: "latest" state: "present"
version: "0.16.1"
when: kpm_packages | length > 0 when: kpm_packages | length > 0
- name: manage kubernetes applications - name: manage kubernetes applications
......
dependencies:
- {role: kubernetes-apps/ansible, tags: apps}
- {role: kubernetes-apps/kpm, tags: [apps, kpm]}
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
# An experimental dev/test only dynamic volumes provisioner,
# for PetSets. Works for kube>=v1.3 only.
kube_hostpath_dynamic_provisioner: "false"
# This is where you can drop yaml/json files and the kubelet will run those
# pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This directory is where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location.
# Editting this value will almost surely break something. Don't
# change it. Things like the systemd scripts are hard coded to
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
kube_apiserver_insecure_bind_address: 127.0.0.1
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
--- ---
- name: restart kube-apiserver - name: Master | restart kubelet
set_fact: command: /bin/true
restart_apimaster: True notify:
- Master | reload systemd
- Master | reload kubelet
- Master | wait for master static pods
- name: Master | wait for master static pods
command: /bin/true
notify:
- Master | wait for the apiserver to be running
- Master | wait for kube-scheduler
- Master | wait for kube-controller-manager
- name: Master | reload systemd
command: systemctl daemon-reload
when: ansible_service_mgr == "systemd"
- name: Master | reload kubelet
service:
name: kubelet
state: restarted
- name: Master | wait for kube-scheduler
uri: url=http://localhost:10251/healthz
register: scheduler_result
until: scheduler_result.status == 200
retries: 15
delay: 5
- name: Master | wait for kube-controller-manager
uri: url=http://localhost:10252/healthz
register: controller_manager_result
until: controller_manager_result.status == 200
retries: 15
delay: 5
- name: Master | wait for the apiserver to be running
uri: url=http://localhost:8080/healthz
register: result
until: result.status == 200
retries: 10
delay: 6
--- ---
dependencies: dependencies:
- role: download - role: download
file: "{{ downloads.kubernetes_kubectl }}" file: "{{ downloads.hyperkube }}"
- role: download
file: "{{ downloads.kubernetes_apiserver }}"
- { role: etcd }
- { role: kubernetes/node }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment