Skip to content
Snippets Groups Projects
Commit d821448e authored by Matthew Mosesohn's avatar Matthew Mosesohn Committed by GitHub
Browse files

Merge branch 'master' into synthscale

parents 2ba66f0b 0afadb91
No related branches found
No related tags found
No related merge requests found
Showing
with 159 additions and 61 deletions
...@@ -8,23 +8,33 @@ ...@@ -8,23 +8,33 @@
shell: nohup bash -c "sleep 5 && shutdown -r now 'Reboot required for updated kernel'" & shell: nohup bash -c "sleep 5 && shutdown -r now 'Reboot required for updated kernel'" &
- name: Wait for some seconds - name: Wait for some seconds
pause: seconds=10 pause:
seconds: 10
- set_fact: - set_fact:
is_bastion: "{{ inventory_hostname == 'bastion' }}" is_bastion: "{{ inventory_hostname == 'bastion' }}"
wait_for_delegate: "localhost" wait_for_delegate: "localhost"
- set_fact: - set_fact:
wait_for_delegate: "{{hostvars['bastion']['ansible_ssh_host']}}" wait_for_delegate: "{{hostvars['bastion']['ansible_ssh_host']}}"
when: "{{ 'bastion' in groups['all'] }}" when: "{{ 'bastion' in groups['all'] }}"
- name: wait for bastion to come back - name: wait for bastion to come back
wait_for: host={{ ansible_ssh_host }} port=22 delay=10 timeout=300 wait_for:
host: "{{ ansible_ssh_host }}"
port: 22
delay: 10
timeout: 300
become: false become: false
delegate_to: localhost delegate_to: localhost
when: "is_bastion" when: "is_bastion"
- name: waiting for server to come back (using bastion if necessary) - name: waiting for server to come back (using bastion if necessary)
wait_for: host={{ ansible_ssh_host }} port=22 delay=10 timeout=300 wait_for:
host: "{{ ansible_ssh_host }}"
port: 22
delay: 10
timeout: 300
become: false become: false
delegate_to: "{{ wait_for_delegate }}" delegate_to: "{{ wait_for_delegate }}"
when: "not is_bastion" when: "not is_bastion"
...@@ -5,7 +5,9 @@ ...@@ -5,7 +5,9 @@
tags: facts tags: facts
- name: Write calico-policy-controller yaml - name: Write calico-policy-controller yaml
template: src=calico-policy-controller.yml.j2 dest={{kube_config_dir}}/calico-policy-controller.yml template:
src: calico-policy-controller.yml.j2
dest: "{{kube_config_dir}}/calico-policy-controller.yml"
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]
- name: Start of Calico policy controller - name: Start of Calico policy controller
......
--- ---
- name: Kubernetes Apps | Wait for kube-apiserver - name: Kubernetes Apps | Wait for kube-apiserver
uri: url=http://localhost:8080/healthz uri:
url: http://localhost:8080/healthz
register: result register: result
until: result.status == 200 until: result.status == 200
retries: 10 retries: 10
...@@ -8,7 +9,9 @@ ...@@ -8,7 +9,9 @@
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Lay Down KubeDNS Template - name: Kubernetes Apps | Lay Down KubeDNS Template
template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} template:
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items: with_items:
- {file: kubedns-rc.yml, type: rc} - {file: kubedns-rc.yml, type: rc}
- {file: kubedns-svc.yml, type: svc} - {file: kubedns-svc.yml, type: svc}
......
- name: Kubernetes Apps | Lay Down Netchecker Template - name: Kubernetes Apps | Lay Down Netchecker Template
template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} template:
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items: with_items:
- {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent} - {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent}
- {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet} - {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet}
......
--- ---
- debug: msg="No helm charts" - debug:
msg: "No helm charts"
...@@ -22,21 +22,24 @@ ...@@ -22,21 +22,24 @@
state: restarted state: restarted
- name: Master | wait for kube-scheduler - name: Master | wait for kube-scheduler
uri: url=http://localhost:10251/healthz uri:
url: http://localhost:10251/healthz
register: scheduler_result register: scheduler_result
until: scheduler_result.status == 200 until: scheduler_result.status == 200
retries: 15 retries: 15
delay: 5 delay: 5
- name: Master | wait for kube-controller-manager - name: Master | wait for kube-controller-manager
uri: url=http://localhost:10252/healthz uri:
url: http://localhost:10252/healthz
register: controller_manager_result register: controller_manager_result
until: controller_manager_result.status == 200 until: controller_manager_result.status == 200
retries: 15 retries: 15
delay: 5 delay: 5
- name: Master | wait for the apiserver to be running - name: Master | wait for the apiserver to be running
uri: url=http://localhost:8080/healthz uri:
url: http://localhost:8080/healthz
register: result register: result
until: result.status == 200 until: result.status == 200
retries: 10 retries: 10
......
...@@ -36,7 +36,9 @@ ...@@ -36,7 +36,9 @@
- meta: flush_handlers - meta: flush_handlers
- name: copy kube system namespace manifest - name: copy kube system namespace manifest
copy: src=namespace.yml dest={{kube_config_dir}}/{{system_namespace}}-ns.yml copy:
src: namespace.yml
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
run_once: yes run_once: yes
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]
tags: apps tags: apps
......
...@@ -43,7 +43,8 @@ ...@@ -43,7 +43,8 @@
when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists
- name: "Pre-upgrade | Pause while waiting for kubelet to delete kube-apiserver pod" - name: "Pre-upgrade | Pause while waiting for kubelet to delete kube-apiserver pod"
pause: seconds=20 pause:
seconds: 20
when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists
tags: kube-apiserver tags: kube-apiserver
...@@ -12,12 +12,18 @@ ...@@ -12,12 +12,18 @@
tags: nginx tags: nginx
- name: Write kubelet config file - name: Write kubelet config file
template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet.env backup=yes template:
src: kubelet.j2
dest: "{{ kube_config_dir }}/kubelet.env"
backup: yes
notify: restart kubelet notify: restart kubelet
tags: kubelet tags: kubelet
- name: write the kubecfg (auth) file for kubelet - name: write the kubecfg (auth) file for kubelet
template: src=node-kubeconfig.yaml.j2 dest={{ kube_config_dir }}/node-kubeconfig.yaml backup=yes template:
src: node-kubeconfig.yaml.j2
dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
backup: yes
notify: restart kubelet notify: restart kubelet
tags: kubelet tags: kubelet
......
--- ---
- name: nginx-proxy | Write static pod - name: nginx-proxy | Write static pod
template: src=manifests/nginx-proxy.manifest.j2 dest={{kube_manifest_dir}}/nginx-proxy.yml template:
src: manifests/nginx-proxy.manifest.j2
dest: "{{kube_manifest_dir}}/nginx-proxy.yml"
- name: nginx-proxy | Make nginx directory - name: nginx-proxy | Make nginx directory
file: path=/etc/nginx state=directory mode=0700 owner=root file:
path: /etc/nginx
state: directory
mode: 0700
owner: root
- name: nginx-proxy | Write nginx-proxy configuration - name: nginx-proxy | Write nginx-proxy configuration
template: src=nginx.conf.j2 dest="/etc/nginx/nginx.conf" owner=root mode=0755 backup=yes template:
src: nginx.conf.j2
dest: "/etc/nginx/nginx.conf"
owner: root
mode: 0755
backup: yes
...@@ -14,7 +14,9 @@ ...@@ -14,7 +14,9 @@
notify: Preinstall | restart network notify: Preinstall | restart network
- name: Remove kargo specific dhclient hook - name: Remove kargo specific dhclient hook
file: path="{{ dhclienthookfile }}" state=absent file:
path: "{{ dhclienthookfile }}"
state: absent
when: dhclienthookfile is defined when: dhclienthookfile is defined
notify: Preinstall | restart network notify: Preinstall | restart network
......
...@@ -3,7 +3,9 @@ ...@@ -3,7 +3,9 @@
# Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time # Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time
- name: install growpart - name: install growpart
package: name=cloud-utils-growpart state=latest package:
name: cloud-utils-growpart
state: latest
- name: check if growpart needs to be run - name: check if growpart needs to be run
command: growpart -N /dev/sda 1 command: growpart -N /dev/sda 1
......
...@@ -88,12 +88,18 @@ ...@@ -88,12 +88,18 @@
tags: [network, calico, weave, canal, bootstrap-os] tags: [network, calico, weave, canal, bootstrap-os]
- name: Update package management cache (YUM) - name: Update package management cache (YUM)
yum: update_cache=yes name='*' yum:
update_cache: yes
name: '*'
when: ansible_pkg_mgr == 'yum' when: ansible_pkg_mgr == 'yum'
tags: bootstrap-os tags: bootstrap-os
- name: Install latest version of python-apt for Debian distribs - name: Install latest version of python-apt for Debian distribs
apt: name=python-apt state=latest update_cache=yes cache_valid_time=3600 apt:
name: python-apt
state: latest
update_cache: yes
cache_valid_time: 3600
when: ansible_os_family == "Debian" when: ansible_os_family == "Debian"
tags: bootstrap-os tags: bootstrap-os
...@@ -125,9 +131,17 @@ ...@@ -125,9 +131,17 @@
tags: bootstrap-os tags: bootstrap-os
# Todo : selinux configuration # Todo : selinux configuration
- name: Set selinux policy to permissive - name: Confirm selinux deployed
selinux: policy=targeted state=permissive stat:
path: /etc/selinux/config
when: ansible_os_family == "RedHat" when: ansible_os_family == "RedHat"
register: slc
- name: Set selinux policy to permissive
selinux:
policy: targeted
state: permissive
when: ansible_os_family == "RedHat" and slc.stat.exists == True
changed_when: False changed_when: False
tags: bootstrap-os tags: bootstrap-os
...@@ -146,7 +160,8 @@ ...@@ -146,7 +160,8 @@
tags: bootstrap-os tags: bootstrap-os
- name: Stat sysctl file configuration - name: Stat sysctl file configuration
stat: path={{sysctl_file_path}} stat:
path: "{{sysctl_file_path}}"
register: sysctl_file_stat register: sysctl_file_stat
tags: bootstrap-os tags: bootstrap-os
...@@ -198,7 +213,8 @@ ...@@ -198,7 +213,8 @@
tags: [bootstrap-os, resolvconf] tags: [bootstrap-os, resolvconf]
- name: Check if we are running inside a Azure VM - name: Check if we are running inside a Azure VM
stat: path=/var/lib/waagent/ stat:
path: /var/lib/waagent/
register: azure_check register: azure_check
tags: bootstrap-os tags: bootstrap-os
......
--- ---
- set_fact: kube_apiserver_count="{{ groups['kube-master'] | length }}" - set_fact:
- set_fact: kube_apiserver_address="{{ ip | default(ansible_default_ipv4['address']) }}" kube_apiserver_count: "{{ groups['kube-master'] | length }}"
- set_fact: kube_apiserver_access_address="{{ access_ip | default(kube_apiserver_address) }}"
- set_fact: is_kube_master="{{ inventory_hostname in groups['kube-master'] }}" - set_fact:
- set_fact: first_kube_master="{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}" kube_apiserver_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact:
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
- set_fact:
is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
- set_fact:
first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
- set_fact: - set_fact:
loadbalancer_apiserver_localhost: false loadbalancer_apiserver_localhost: false
when: loadbalancer_apiserver is defined when: loadbalancer_apiserver is defined
- set_fact: - set_fact:
kube_apiserver_endpoint: |- kube_apiserver_endpoint: |-
{% if not is_kube_master and loadbalancer_apiserver_localhost -%} {% if not is_kube_master and loadbalancer_apiserver_localhost -%}
...@@ -21,34 +32,54 @@ ...@@ -21,34 +32,54 @@
{%- endif -%} {%- endif -%}
{%- endif %} {%- endif %}
- set_fact: etcd_address="{{ ip | default(ansible_default_ipv4['address']) }}" - set_fact:
- set_fact: etcd_access_address="{{ access_ip | default(etcd_address) }}" etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact: etcd_peer_url="https://{{ etcd_access_address }}:2380"
- set_fact: etcd_client_url="https://{{ etcd_access_address }}:2379" - set_fact:
- set_fact: etcd_authority="127.0.0.1:2379" etcd_access_address: "{{ access_ip | default(etcd_address) }}"
- set_fact: etcd_endpoint="https://{{ etcd_authority }}"
- set_fact:
etcd_peer_url: "https://{{ etcd_access_address }}:2380"
- set_fact:
etcd_client_url: "https://{{ etcd_access_address }}:2379"
- set_fact:
etcd_authority: "127.0.0.1:2379"
- set_fact:
etcd_endpoint: "https://{{ etcd_authority }}"
- set_fact: - set_fact:
etcd_access_addresses: |- etcd_access_addresses: |-
{% for item in groups['etcd'] -%} {% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %} https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}
{%- endfor %} {%- endfor %}
- set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact:
etcd_access_endpoint: "{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact: - set_fact:
etcd_member_name: |- etcd_member_name: |-
{% for host in groups['etcd'] %} {% for host in groups['etcd'] %}
{% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %} {% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
{% endfor %} {% endfor %}
- set_fact: - set_fact:
etcd_peer_addresses: |- etcd_peer_addresses: |-
{% for item in groups['etcd'] -%} {% for item in groups['etcd'] -%}
{{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %} {{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{%- endfor %} {%- endfor %}
- set_fact: - set_fact:
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}" is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
- set_fact: - set_fact:
etcd_after_v3: etcd_version | version_compare("v3.0.0", ">=") etcd_after_v3: etcd_version | version_compare("v3.0.0", ">=")
- set_fact: - set_fact:
etcd_container_bin_dir: "{% if etcd_after_v3 %}/usr/local/bin/{% else %}/{% endif %}" etcd_container_bin_dir: "{% if etcd_after_v3 %}/usr/local/bin/{% else %}/{% endif %}"
- set_fact: - set_fact:
peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}" peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
......
...@@ -39,11 +39,13 @@ ...@@ -39,11 +39,13 @@
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- name: target temporary resolvconf cloud init file (Container Linux by CoreOS) - name: target temporary resolvconf cloud init file (Container Linux by CoreOS)
set_fact: resolvconffile=/tmp/resolveconf_cloud_init_conf set_fact:
resolvconffile: /tmp/resolveconf_cloud_init_conf
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- name: check if /etc/dhclient.conf exists - name: check if /etc/dhclient.conf exists
stat: path=/etc/dhclient.conf stat:
path: /etc/dhclient.conf
register: dhclient_stat register: dhclient_stat
- name: target dhclient conf file for /etc/dhclient.conf - name: target dhclient conf file for /etc/dhclient.conf
...@@ -52,7 +54,8 @@ ...@@ -52,7 +54,8 @@
when: dhclient_stat.stat.exists when: dhclient_stat.stat.exists
- name: check if /etc/dhcp/dhclient.conf exists - name: check if /etc/dhcp/dhclient.conf exists
stat: path=/etc/dhcp/dhclient.conf stat:
path: /etc/dhcp/dhclient.conf
register: dhcp_dhclient_stat register: dhcp_dhclient_stat
- name: target dhclient conf file for /etc/dhcp/dhclient.conf - name: target dhclient conf file for /etc/dhcp/dhclient.conf
......
...@@ -146,10 +146,10 @@ ...@@ -146,10 +146,10 @@
- name: Gen_certs | check certificate permissions - name: Gen_certs | check certificate permissions
file: file:
path={{ kube_cert_dir }} path: "{{ kube_cert_dir }}"
group={{ kube_cert_group }} group: "{{ kube_cert_group }}"
owner=kube owner: kube
recurse=yes recurse: yes
- name: Gen_certs | set permissions on keys - name: Gen_certs | set permissions on keys
shell: chmod 0600 {{ kube_cert_dir}}/*key.pem shell: chmod 0600 {{ kube_cert_dir}}/*key.pem
......
--- ---
- include: check-certs.yml - include: check-certs.yml
tags: [k8s-secrets, facts] tags: [k8s-secrets, facts]
- include: check-tokens.yml - include: check-tokens.yml
tags: [k8s-secrets, facts] tags: [k8s-secrets, facts]
- name: Make sure the certificate directory exits - name: Make sure the certificate directory exits
file: file:
path={{ kube_cert_dir }} path: "{{ kube_cert_dir }}"
state=directory state: directory
mode=o-rwx mode: o-rwx
group={{ kube_cert_group }} group: "{{ kube_cert_group }}"
- name: Make sure the tokens directory exits - name: Make sure the tokens directory exits
file: file:
path={{ kube_token_dir }} path: "{{ kube_token_dir }}"
state=directory state: directory
mode=o-rwx mode: o-rwx
group={{ kube_cert_group }} group: "{{ kube_cert_group }}"
- name: Make sure the users directory exits - name: Make sure the users directory exits
file: file:
path={{ kube_users_dir }} path: "{{ kube_users_dir }}"
state=directory state: directory
mode=o-rwx mode: o-rwx
group={{ kube_cert_group }} group: "{{ kube_cert_group }}"
- name: Populate users for basic auth in API - name: Populate users for basic auth in API
lineinfile: lineinfile:
...@@ -62,10 +63,10 @@ ...@@ -62,10 +63,10 @@
- name: "Get_tokens | Make sure the tokens directory exits (on {{groups['kube-master'][0]}})" - name: "Get_tokens | Make sure the tokens directory exits (on {{groups['kube-master'][0]}})"
file: file:
path={{ kube_token_dir }} path: "{{ kube_token_dir }}"
state=directory state: directory
mode=o-rwx mode: o-rwx
group={{ kube_cert_group }} group: "{{ kube_cert_group }}"
run_once: yes run_once: yes
delegate_to: "{{groups['kube-master'][0]}}" delegate_to: "{{groups['kube-master'][0]}}"
when: gen_tokens|default(false) when: gen_tokens|default(false)
...@@ -77,9 +78,11 @@ ...@@ -77,9 +78,11 @@
- include: sync_kube_master_certs.yml - include: sync_kube_master_certs.yml
when: cert_management == "vault" and inventory_hostname in groups['kube-master'] when: cert_management == "vault" and inventory_hostname in groups['kube-master']
tags: k8s-secrets tags: k8s-secrets
- include: sync_kube_node_certs.yml - include: sync_kube_node_certs.yml
when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster'] when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster']
tags: k8s-secrets tags: k8s-secrets
- include: gen_certs_vault.yml - include: gen_certs_vault.yml
when: cert_management == "vault" when: cert_management == "vault"
tags: k8s-secrets tags: k8s-secrets
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment