Newer
Older
bin_dir: /usr/local/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Uncomment this line for CoreOS only.
# Directory where python binary is installed
# ansible_python_interpreter: "/opt/bin/python"
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP
kube:
pass: "{{kube_api_pwd}}"
role: admin
root:
pass: "changeme"
role: admin
# Kubernetes cluster name, also will be used as DNS domain
# For some environments, each node has a pubilcally accessible
# address and an address it should bind services to. These are
# really inventory level variables, but described here for consistency.
#
# When advertising access, the access_ip will be used, but will defer to
# ip and then the default ansible ip when unspecified.
#
# When binding to restrict access, the ip variable will be used, but will
# defer to the default ansible ip when unspecified.
#
# The ip variable is used for specific address binding, e.g. listen address
# for etcd. This is use to help with environments like Vagrant or multi-nic
# systems where one address should be preferred over another.
# ip: 10.2.2.2
#
# The access_ip variable is used to define how other nodes should access
# the node. This is used in flannel to allow other flannel nodes to see
# this node for example. The access_ip is really useful AWS and Google
# environments where the nodes are accessed remotely by the "public" ip,
# but don't know about that address themselves.
# access_ip: 1.1.1.1
# Service endpoints. May be a VIP or a load balanced frontend IP, like one
# that a HAProxy or Nginx provides, or just a local service endpoint.
# Etcd endpoints use a local etcd-proxies to reach the etcd cluster via
# auto-evaluated endpoints. Those will reuse the access_ip for etcd cluster,
# if specified, or defer to the localhost:2379 as well.
# Etcd access modes:
# Enable multiaccess to configure clients to access all of the etcd members directly
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
# This may be the case if clients support and loadbalance multiple etcd servers natively.
etcd_multiaccess: false
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# The port the API Server will be listening on.
kube_apiserver_port: 443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# For multi masters architecture:
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
# This domain name will be inserted into the /etc/hosts file of all servers
# configuration example with haproxy:
# listen kubernetes-apiserver-https
# bind 10.99.0.21:8383
# option ssl-hello-chk
# mode tcp
# timeout client 3h
# timeout server 3h
# server master1 10.99.0.26:443
# server master2 10.99.0.27:443
# balance roundrobin
# And the corresponding example config vars:
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
# loadbalancer_apiserver:
# adress: 10.99.0.21
# port: 8383
# For internal only multi-masters architecture:
# Assume there is no loadbalancers configured for internal access. Enable this flag,
# if a loadbalancer listens the localhost, which may be the case when there is neither
# loadbalancer_apiserver nor access_ip is configured, or you want to override both.
loadbalancer_apiserver_localhost: false
# The internal kube-apiserver endpoints:
# The secure endpoint is auto-evaluated. If loadbalancer_apiserver_localhost=true,
# it uses the localhost:kube_apiserver_port. Otherwise, it uses the external
# apiserver_loadbalancer_domain_name:loadbalancer_apiserver.port. Or defers to the
# access_ip:kube_apiserver_port, then ip, then default ansible ip.
# The insecure endpoint ignores the apiserver_loadbalancer_domain_name and access_ip.
# By default, it is a localhost:kube_apiserver_insecure_port.
# If loadbalancer_apiserver_localhost, it defers to ip, then default ansible ip.
# Apiserver access modes:
# Configure clients to access all of the apiservers directly as the
# "http(s)://hostX:port, http(s)://hostY:port, ..." and ignore the loadbalancers, if any.
# This may be the case, if clients support and loadbalance multiple apiservers or
# when there is no loadbalancers configured.
kube_apiserver_multiaccess: true
# Define connections security for kube controllers, schedulers and proxies
kube_proxy_insecure: true
kube_controller_insecure: true
kube_scheduler_insecure: true
# Kubernetes internal network for services, unused block of space.
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
# internal network total size (optional). This is the prefix of the
# entire network. Must be unused in your environment.
# kube_network_prefix: 18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
# With calico it is possible to distributed routes with border routers of the datacenter.
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
# The subnets of each nodes will be distributed by the datacenter router
# Internal DNS configuration.
# Kubernetes can create and mainatain its own DNS server to resolve service names
# into appropriate IP addresses. It's highly advisable to run such DNS server,
# as it greatly simplifies configuration of your applications - you can use
# service names instead of magic environment variables.
# You still must manually configure all your containers to use this DNS server,
# Kubernetes won't do this for you (yet).
# Upstream dns servers used by dnsmasq
#
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
dns_setup: true
dns_domain: "{{ cluster_name }}"
# # Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
# There are some changes specific to the cloud providers
# for instance we need to encapsulate packets with some network plugins
# If set the possible values are either 'gce', 'aws' or 'openstack'
# When openstack is used make sure to source in the openstack credentials
# like you would do when using nova-client before starting the playbook.
## Set these proxy values in order to update docker daemon to use proxies
# http_proxy: ""
# https_proxy: ""
# no_proxy: ""
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: "--insecure-registry={{ kube_service_addresses }}"