Skip to content
Snippets Groups Projects
Commit 0afadb91 authored by Matthew Mosesohn's avatar Matthew Mosesohn Committed by GitHub
Browse files

Merge pull request #1046 from skyscooby/pedantic-syntax-cleanup

Cleanup legacy syntax, spacing, files all to yml
parents d4f15ab4 3cc14918
No related branches found
No related tags found
No related merge requests found
Showing
with 158 additions and 63 deletions
---
- debug: msg="No helm charts"
- debug:
msg: "No helm charts"
......@@ -22,21 +22,24 @@
state: restarted
- name: Master | wait for kube-scheduler
uri: url=http://localhost:10251/healthz
uri:
url: http://localhost:10251/healthz
register: scheduler_result
until: scheduler_result.status == 200
retries: 15
delay: 5
- name: Master | wait for kube-controller-manager
uri: url=http://localhost:10252/healthz
uri:
url: http://localhost:10252/healthz
register: controller_manager_result
until: controller_manager_result.status == 200
retries: 15
delay: 5
- name: Master | wait for the apiserver to be running
uri: url=http://localhost:8080/healthz
uri:
url: http://localhost:8080/healthz
register: result
until: result.status == 200
retries: 10
......
......@@ -36,7 +36,9 @@
- meta: flush_handlers
- name: copy kube system namespace manifest
copy: src=namespace.yml dest={{kube_config_dir}}/{{system_namespace}}-ns.yml
copy:
src: namespace.yml
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
run_once: yes
when: inventory_hostname == groups['kube-master'][0]
tags: apps
......
......@@ -43,7 +43,8 @@
when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists
- name: "Pre-upgrade | Pause while waiting for kubelet to delete kube-apiserver pod"
pause: seconds=20
pause:
seconds: 20
when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists
tags: kube-apiserver
......@@ -12,12 +12,18 @@
tags: nginx
- name: Write kubelet config file
template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet.env backup=yes
template:
src: kubelet.j2
dest: "{{ kube_config_dir }}/kubelet.env"
backup: yes
notify: restart kubelet
tags: kubelet
- name: write the kubecfg (auth) file for kubelet
template: src=node-kubeconfig.yaml.j2 dest={{ kube_config_dir }}/node-kubeconfig.yaml backup=yes
template:
src: node-kubeconfig.yaml.j2
dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
backup: yes
notify: restart kubelet
tags: kubelet
......
---
- name: nginx-proxy | Write static pod
template: src=manifests/nginx-proxy.manifest.j2 dest={{kube_manifest_dir}}/nginx-proxy.yml
template:
src: manifests/nginx-proxy.manifest.j2
dest: "{{kube_manifest_dir}}/nginx-proxy.yml"
- name: nginx-proxy | Make nginx directory
file: path=/etc/nginx state=directory mode=0700 owner=root
file:
path: /etc/nginx
state: directory
mode: 0700
owner: root
- name: nginx-proxy | Write nginx-proxy configuration
template: src=nginx.conf.j2 dest="/etc/nginx/nginx.conf" owner=root mode=0755 backup=yes
template:
src: nginx.conf.j2
dest: "/etc/nginx/nginx.conf"
owner: root
mode: 0755
backup: yes
......@@ -14,7 +14,9 @@
notify: Preinstall | restart network
- name: Remove kargo specific dhclient hook
file: path="{{ dhclienthookfile }}" state=absent
file:
path: "{{ dhclienthookfile }}"
state: absent
when: dhclienthookfile is defined
notify: Preinstall | restart network
......
......@@ -3,7 +3,9 @@
# Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time
- name: install growpart
package: name=cloud-utils-growpart state=latest
package:
name: cloud-utils-growpart
state: latest
- name: check if growpart needs to be run
command: growpart -N /dev/sda 1
......
......@@ -88,12 +88,18 @@
tags: [network, calico, weave, canal, bootstrap-os]
- name: Update package management cache (YUM)
yum: update_cache=yes name='*'
yum:
update_cache: yes
name: '*'
when: ansible_pkg_mgr == 'yum'
tags: bootstrap-os
- name: Install latest version of python-apt for Debian distribs
apt: name=python-apt state=latest update_cache=yes cache_valid_time=3600
apt:
name: python-apt
state: latest
update_cache: yes
cache_valid_time: 3600
when: ansible_os_family == "Debian"
tags: bootstrap-os
......@@ -132,7 +138,9 @@
register: slc
- name: Set selinux policy to permissive
selinux: policy=targeted state=permissive
selinux:
policy: targeted
state: permissive
when: ansible_os_family == "RedHat" and slc.stat.exists == True
changed_when: False
tags: bootstrap-os
......@@ -152,7 +160,8 @@
tags: bootstrap-os
- name: Stat sysctl file configuration
stat: path={{sysctl_file_path}}
stat:
path: "{{sysctl_file_path}}"
register: sysctl_file_stat
tags: bootstrap-os
......@@ -204,7 +213,8 @@
tags: [bootstrap-os, resolvconf]
- name: Check if we are running inside a Azure VM
stat: path=/var/lib/waagent/
stat:
path: /var/lib/waagent/
register: azure_check
tags: bootstrap-os
......
---
- set_fact: kube_apiserver_count="{{ groups['kube-master'] | length }}"
- set_fact: kube_apiserver_address="{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact: kube_apiserver_access_address="{{ access_ip | default(kube_apiserver_address) }}"
- set_fact: is_kube_master="{{ inventory_hostname in groups['kube-master'] }}"
- set_fact: first_kube_master="{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
- set_fact:
kube_apiserver_count: "{{ groups['kube-master'] | length }}"
- set_fact:
kube_apiserver_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact:
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
- set_fact:
is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
- set_fact:
first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
- set_fact:
loadbalancer_apiserver_localhost: false
when: loadbalancer_apiserver is defined
- set_fact:
kube_apiserver_endpoint: |-
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
......@@ -21,34 +32,54 @@
{%- endif -%}
{%- endif %}
- set_fact: etcd_address="{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact: etcd_access_address="{{ access_ip | default(etcd_address) }}"
- set_fact: etcd_peer_url="https://{{ etcd_access_address }}:2380"
- set_fact: etcd_client_url="https://{{ etcd_access_address }}:2379"
- set_fact: etcd_authority="127.0.0.1:2379"
- set_fact: etcd_endpoint="https://{{ etcd_authority }}"
- set_fact:
etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact:
etcd_access_address: "{{ access_ip | default(etcd_address) }}"
- set_fact:
etcd_peer_url: "https://{{ etcd_access_address }}:2380"
- set_fact:
etcd_client_url: "https://{{ etcd_access_address }}:2379"
- set_fact:
etcd_authority: "127.0.0.1:2379"
- set_fact:
etcd_endpoint: "https://{{ etcd_authority }}"
- set_fact:
etcd_access_addresses: |-
{% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}
{%- endfor %}
- set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact:
etcd_access_endpoint: "{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact:
etcd_member_name: |-
{% for host in groups['etcd'] %}
{% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
{% endfor %}
- set_fact:
etcd_peer_addresses: |-
{% for item in groups['etcd'] -%}
{{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{%- endfor %}
- set_fact:
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
- set_fact:
etcd_after_v3: etcd_version | version_compare("v3.0.0", ">=")
- set_fact:
etcd_container_bin_dir: "{% if etcd_after_v3 %}/usr/local/bin/{% else %}/{% endif %}"
- set_fact:
peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
......
......@@ -39,11 +39,13 @@
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- name: target temporary resolvconf cloud init file (Container Linux by CoreOS)
set_fact: resolvconffile=/tmp/resolveconf_cloud_init_conf
set_fact:
resolvconffile: /tmp/resolveconf_cloud_init_conf
when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- name: check if /etc/dhclient.conf exists
stat: path=/etc/dhclient.conf
stat:
path: /etc/dhclient.conf
register: dhclient_stat
- name: target dhclient conf file for /etc/dhclient.conf
......@@ -52,7 +54,8 @@
when: dhclient_stat.stat.exists
- name: check if /etc/dhcp/dhclient.conf exists
stat: path=/etc/dhcp/dhclient.conf
stat:
path: /etc/dhcp/dhclient.conf
register: dhcp_dhclient_stat
- name: target dhclient conf file for /etc/dhcp/dhclient.conf
......
......@@ -142,10 +142,10 @@
- name: Gen_certs | check certificate permissions
file:
path={{ kube_cert_dir }}
group={{ kube_cert_group }}
owner=kube
recurse=yes
path: "{{ kube_cert_dir }}"
group: "{{ kube_cert_group }}"
owner: kube
recurse: yes
- name: Gen_certs | set permissions on keys
shell: chmod 0600 {{ kube_cert_dir}}/*key.pem
......
---
- include: check-certs.yml
tags: [k8s-secrets, facts]
- include: check-tokens.yml
tags: [k8s-secrets, facts]
- name: Make sure the certificate directory exits
file:
path={{ kube_cert_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
path: "{{ kube_cert_dir }}"
state: directory
mode: o-rwx
group: "{{ kube_cert_group }}"
- name: Make sure the tokens directory exits
file:
path={{ kube_token_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
path: "{{ kube_token_dir }}"
state: directory
mode: o-rwx
group: "{{ kube_cert_group }}"
- name: Make sure the users directory exits
file:
path={{ kube_users_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
path: "{{ kube_users_dir }}"
state: directory
mode: o-rwx
group: "{{ kube_cert_group }}"
- name: Populate users for basic auth in API
lineinfile:
......@@ -62,10 +63,10 @@
- name: "Get_tokens | Make sure the tokens directory exits (on {{groups['kube-master'][0]}})"
file:
path={{ kube_token_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
path: "{{ kube_token_dir }}"
state: directory
mode: o-rwx
group: "{{ kube_cert_group }}"
run_once: yes
delegate_to: "{{groups['kube-master'][0]}}"
when: gen_tokens|default(false)
......@@ -77,9 +78,11 @@
- include: sync_kube_master_certs.yml
when: cert_management == "vault" and inventory_hostname in groups['kube-master']
tags: k8s-secrets
- include: sync_kube_node_certs.yml
when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster']
tags: k8s-secrets
- include: gen_certs_vault.yml
when: cert_management == "vault"
tags: k8s-secrets
......
......@@ -35,11 +35,15 @@
group: root
- name: Calico-rr | Write calico-rr.env for systemd init file
template: src=calico-rr.env.j2 dest=/etc/calico/calico-rr.env
template:
src: calico-rr.env.j2
dest: /etc/calico/calico-rr.env
notify: restart calico-rr
- name: Calico-rr | Write calico-rr systemd init file
template: src=calico-rr.service.j2 dest=/etc/systemd/system/calico-rr.service
template:
src: calico-rr.service.j2
dest: /etc/systemd/system/calico-rr.service
notify: restart calico-rr
- name: Calico-rr | Configure route reflector
......
......@@ -60,7 +60,9 @@
tags: [hyperkube, upgrade]
- name: Calico | wait for etcd
uri: url=https://localhost:2379/health validate_certs=no
uri:
url: https://localhost:2379/health
validate_certs: no
register: result
until: result.status == 200 or result.status == 401
retries: 10
......@@ -160,17 +162,23 @@
when: legacy_calicoctl
- name: Calico (old) | Write calico-node systemd init file
template: src=calico-node.service.legacy.j2 dest=/etc/systemd/system/calico-node.service
template:
src: calico-node.service.legacy.j2
dest: /etc/systemd/system/calico-node.service
when: legacy_calicoctl
notify: restart calico-node
- name: Calico | Write calico.env for systemd init file
template: src=calico.env.j2 dest=/etc/calico/calico.env
template:
src: calico.env.j2
dest: /etc/calico/calico.env
when: not legacy_calicoctl
notify: restart calico-node
- name: Calico | Write calico-node systemd init file
template: src=calico-node.service.j2 dest=/etc/systemd/system/calico-node.service
template:
src: calico-node.service.j2
dest: /etc/systemd/system/calico-node.service
when: not legacy_calicoctl
notify: restart calico-node
......
......@@ -28,7 +28,9 @@
state: restarted
- name: Flannel | pause while Docker restarts
pause: seconds=10 prompt="Waiting for docker restart"
pause:
seconds: 10
prompt: "Waiting for docker restart"
- name: Flannel | wait for docker
command: "{{ docker_bin_dir }}/docker images"
......
---
- name: reset | stop services
service: name={{ item }} state=stopped
service:
name: "{{ item }}"
state: stopped
with_items:
- kubelet
- etcd
......@@ -33,7 +35,9 @@
shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
- name: reset | restart docker if needed
service: name=docker state=restarted
service:
name: docker
state: restarted
when: docker_dropins_removed.changed
- name: reset | gather mounted kubelet dirs
......@@ -46,7 +50,9 @@
with_items: '{{ mounted_dirs.stdout_lines }}'
- name: reset | delete some files and directories
file: path={{ item }} state=absent
file:
path: "{{ item }}"
state: absent
with_items:
- "{{kube_config_dir}}"
- /var/lib/kubelet
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment