diff --git a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml index 9705401105fcf799865aefd21818f47a8005fa71..a1c5d7f8a03eae8102c27a4fc9a5fc870889dd40 100644 --- a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml +++ b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml @@ -1,6 +1,6 @@ --- - name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV - template: src={{item.file}} dest=/etc/kubernetes/{{item.dest}} + template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}} with_items: - { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json} - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml} @@ -13,7 +13,7 @@ namespace: default kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" - filename: "/etc/kubernetes/{{item.item.dest}}" + filename: "{{kube_config_dir}}/{{item.item.dest}}" state: "{{item.changed | ternary('latest','present') }}" with_items: "{{ gluster_pv.results }}" when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml index 65b65fe3968efe7457ad7861460a12e0fc28efdb..c90e173e72e45ae7fa24fd498155cea8a319033c 100644 --- a/inventory/group_vars/all.yml +++ b/inventory/group_vars/all.yml @@ -4,6 +4,28 @@ bootstrap_os: none # Directory where the binaries will be installed bin_dir: /usr/local/bin +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernets. +# This puts them in a sane location and namespace. +# Editting those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" +system_namespace: kube-system + +# Logging directory (sysvinit systems) +kube_log_dir: "/var/log/kubernetes" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +# This is where to save basic auth file +kube_users_dir: "{{ kube_config_dir }}/users" + # Where the binaries will be downloaded. # Note: ensure that you've enough disk space (about 1G) local_release_dir: "/tmp/releases" diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index fa89d6c6ab53a10ce65c1617ff90f6d713ae1588..468b23779e0fac104d852311c1cd880642c5aa1f 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -34,7 +34,7 @@ state: link - name: Create dnsmasq manifests - template: src={{item.file}} dest=/etc/kubernetes/{{item.file}} + template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} with_items: - {file: dnsmasq-ds.yml, type: ds} - {file: dnsmasq-svc.yml, type: svc} @@ -44,10 +44,10 @@ - name: Start Resources kube: name: dnsmasq - namespace: kube-system + namespace: "{{system_namespace}}" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" - filename: /etc/kubernetes/{{item.item.file}} + filename: "{{kube_config_dir}}/{{item.item.file}}" state: "{{item.changed | ternary('latest','present') }}" with_items: "{{ manifests.results }}" when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/dnsmasq/templates/dnsmasq-ds.yml b/roles/dnsmasq/templates/dnsmasq-ds.yml index 2f4a1cdd7884fa549d4ccb894c6919c879d25958..08ff70bff099ecb22b4063b4c7b04ae5c33f4fbd 100644 --- a/roles/dnsmasq/templates/dnsmasq-ds.yml +++ b/roles/dnsmasq/templates/dnsmasq-ds.yml @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: dnsmasq - namespace: kube-system + namespace: "{{system_namespace}}" labels: k8s-app: dnsmasq spec: diff --git a/roles/dnsmasq/templates/dnsmasq-svc.yml b/roles/dnsmasq/templates/dnsmasq-svc.yml index 52be6fd8374aa05fb4c0287f7bcf1ec344939739..1606aa93253ba2f3a0f918cedbc63ca942dd1710 100644 --- a/roles/dnsmasq/templates/dnsmasq-svc.yml +++ b/roles/dnsmasq/templates/dnsmasq-svc.yml @@ -6,7 +6,7 @@ metadata: kubernetes.io/cluster-service: 'true' k8s-app: dnsmasq name: dnsmasq - namespace: kube-system + namespace: {{system_namespace}} spec: ports: - port: 53 diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 1b9e9c3f647bacd255097ffc75efb30f26c1aee0..dd2bd2d8ac5281e028a96c8a35aab8d665548f75 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -1,6 +1,3 @@ -kube_config_dir: /etc/kubernetes -kube_namespace: kube-system - # Versions kubedns_version: 1.9 kubednsmasq_version: 1.3 diff --git a/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml b/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml index 02a49f211a349fe084661f21b331789871672ebe..a3915f9ba53987d4f3d04987e61d593e2dd35284 100644 --- a/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml +++ b/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml @@ -8,6 +8,6 @@ name: "calico-policy-controller" kubectl: "{{bin_dir}}/kubectl" filename: "{{kube_config_dir}}/calico-policy-controller.yml" - namespace: "{{kube_namespace}}" + namespace: "{{system_namespace}}" resource: "rs" when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ansible/tasks/main.yaml b/roles/kubernetes-apps/ansible/tasks/main.yaml index 2977772c3020bd9fc67566bb748c6b5b419a5b25..1b4c77eff23563a66269e8f671c81c268673cff9 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yaml +++ b/roles/kubernetes-apps/ansible/tasks/main.yaml @@ -11,7 +11,7 @@ - name: Kubernetes Apps | Start Resources kube: name: kubedns - namespace: "{{ kube_namespace }}" + namespace: "{{ system_namespace }}" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/ansible/templates/calico-policy-controller.yml.j2 b/roles/kubernetes-apps/ansible/templates/calico-policy-controller.yml.j2 index a522c80ade9e196e41cbb8d86bf9dd9d74163a13..1bc55331674c45852f2cddab5652cd1a57661ce0 100644 --- a/roles/kubernetes-apps/ansible/templates/calico-policy-controller.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/calico-policy-controller.yml.j2 @@ -2,7 +2,7 @@ apiVersion: extensions/v1beta1 kind: ReplicaSet metadata: name: calico-policy-controller - namespace: {{ kube_namespace }} + namespace: {{ system_namespace }} labels: k8s-app: calico-policy kubernetes.io/cluster-service: "true" @@ -15,7 +15,7 @@ spec: template: metadata: name: calico-policy-controller - namespace: kube-system + namespace: {{system_namespace}} labels: kubernetes.io/cluster-service: "true" k8s-app: calico-policy diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml b/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml index 0fe4d2f58d5e80d9dfa870f58ae3e75a7b97b110..a7392cc87f76550a1a94870b3a08e6138d2472f7 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ReplicationController metadata: name: kubedns - namespace: {{ kube_namespace }} + namespace: {{ system_namespace }} labels: k8s-app: kubedns version: v19 diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml index 7f88d06668094d8cc1c9e1b444761650863d20ef..ce87793265701a2701f5a151ad3cfc5974d3f85c 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: name: kubedns - namespace: {{ kube_namespace }} + namespace: {{ system_namespace }} labels: k8s-app: kubedns kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml index c6bcd6992dbcb28bd170324b9abc8413a0498e2d..1b8de999a3edebc5cc5f20d4b098773126fcef88 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml @@ -3,15 +3,15 @@ kube: name: "canal-config" kubectl: "{{bin_dir}}/kubectl" - filename: "/etc/kubernetes/canal-config.yaml" + filename: "{{kube_config_dir}}/canal-config.yaml" resource: "configmap" - namespace: "kube-system" + namespace: "{{system_namespace}}" - name: Start flannel and calico-node run_once: true kube: name: "canal-node" kubectl: "{{bin_dir}}/kubectl" - filename: "/etc/kubernetes/canal-node.yaml" + filename: "{{kube_config_dir}}/canal-node.yaml" resource: "ds" - namespace: "kube-system" + namespace: "{{system_namespace}}" diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index c33fa788f4676f0a6b1338acfef3b8ce08c2d4ad..c1fbbb583f23dac9a5aa9a6efb9a72a58012a1bc 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -1,28 +1,7 @@ -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - -# This is where all of the bearer tokens will be stored -kube_token_dir: "{{ kube_config_dir }}/tokens" - -# This is where to save basic auth file -kube_users_dir: "{{ kube_config_dir }}/users" - # An experimental dev/test only dynamic volumes provisioner, # for PetSets. Works for kube>=v1.3 only. kube_hostpath_dynamic_provisioner: "false" -# This is where you can drop yaml/json files and the kubelet will run those -# pods on startup -kube_manifest_dir: "{{ kube_config_dir }}/manifests" - -# This directory is where all the additional config stuff goes -# the kubernetes normally puts in /srv/kubernets. -# This puts them in a sane location. -# Editting this value will almost surely break something. Don't -# change it. Things like the systemd scripts are hard coded to -# look in here. Don't do it. -kube_config_dir: /etc/kubernetes - # change to 0.0.0.0 to enable insecure access from anywhere (not recommended) kube_apiserver_insecure_bind_address: 127.0.0.1 @@ -30,9 +9,6 @@ kube_apiserver_insecure_bind_address: 127.0.0.1 # Inclusive at both ends of the range. kube_apiserver_node_port_range: "30000-32767" -# Logging directory (sysvinit systems) -kube_log_dir: "/var/log/kubernetes" - # ETCD cert dir for connecting apiserver to etcd etcd_config_dir: /etc/ssl/etcd etcd_cert_dir: "{{ etcd_config_dir }}/ssl" diff --git a/roles/kubernetes/master/files/namespace.yml b/roles/kubernetes/master/files/namespace.yml index 986f4b482217e2147911f8a323236788e810acaf..9bdf201a21a84c558b75a5763480bb6407ca5388 100644 --- a/roles/kubernetes/master/files/namespace.yml +++ b/roles/kubernetes/master/files/namespace.yml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: kube-system + name: "{{system_namespace}}" diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index 8e3353a21611efc7c5818e82cfdb81f327b61c87..e1b5cc5d25273d04aeec08776937a9002b20fcbb 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -36,28 +36,27 @@ tags: kube-apiserver - meta: flush_handlers -# Create kube-system namespace -- name: copy 'kube-system' namespace manifest - copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml + +- name: copy kube system namespace manifest + copy: src=namespace.yml dest={{kube_config_dir}}/{{system_namespace}}-ns.yml run_once: yes when: inventory_hostname == groups['kube-master'][0] tags: apps -- name: Check if kube-system exists - command: "{{ bin_dir }}/kubectl get ns kube-system" +- name: Check if kube system namespace exists + command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}" register: 'kubesystem' changed_when: False failed_when: False run_once: yes tags: apps -- name: Create 'kube-system' namespace - command: "{{ bin_dir }}/kubectl create -f /etc/kubernetes/kube-system-ns.yml" +- name: Create kube system namespace + command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml" changed_when: False when: kubesystem|failed and inventory_hostname == groups['kube-master'][0] tags: apps -# Write other manifests - name: Write kube-controller-manager manifest template: src: manifests/kube-controller-manager.manifest.j2 diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 530b009c65fe9b62068da2b5347b4acfbcf6496c..b292e5106756ba4e470f83fea0204525440d5add 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-apiserver - namespace: kube-system + namespace: {{system_namespace}} labels: k8s-app: kube-apiserver spec: diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index cdfbef06473e5795d92620b6df8850307ee46f24..1385b3cf468aa7f2a6a2e1d299240c00493c4f69 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-controller-manager - namespace: kube-system + namespace: {{system_namespace}} labels: k8s-app: kube-controller spec: diff --git a/roles/kubernetes/master/vars/main.yml b/roles/kubernetes/master/vars/main.yml index 2eeb525fe6ff8fb7a7f20ebf80adf2d559a5d4ff..a5eba4f2beb8e2ffd29feb1d06b0564fe438dd3f 100644 --- a/roles/kubernetes/master/vars/main.yml +++ b/roles/kubernetes/master/vars/main.yml @@ -3,4 +3,4 @@ namespace_kubesystem: apiVersion: v1 kind: Namespace metadata: - name: kube-system \ No newline at end of file + name: "{{system_namespace}}" diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 8c4ce38a51e82700a78ff6e95db899c5210681e3..b0f73e50d6fe4ad81b409ac2bb113d60c97fd5de 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -1,15 +1,6 @@ -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - # change to 0.0.0.0 to enable insecure access from anywhere (not recommended) kube_apiserver_insecure_bind_address: 127.0.0.1 -# This is where you can drop yaml/json files and the kubelet will run those -# pods on startup -kube_manifest_dir: "{{ kube_config_dir }}/manifests" - -dns_domain: "{{ cluster_name }}" - # resolv.conf to base dns config kube_resolv_conf: "/etc/resolv.conf" @@ -22,16 +13,5 @@ kube_proxy_masquerade_all: true # - extensions/v1beta1/daemonsets=true # - extensions/v1beta1/deployments=true -# Logging directory (sysvinit systems) -kube_log_dir: "/var/log/kubernetes" - -# This directory is where all the additional config stuff goes -# the kubernetes normally puts in /srv/kubernets. -# This puts them in a sane location. -# Editting this value will almost surely break something. Don't -# change it. Things like the systemd scripts are hard coded to -# look in here. Don't do it. -kube_config_dir: /etc/kubernetes - nginx_image_repo: nginx nginx_image_tag: 1.11.4-alpine diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 67cc4ca863797bb34f8b2f5dd22e6949868c156f..3e0c095e18e822504ad9cd2cb3dcb67e4c84daad 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -1,4 +1,9 @@ --- +- set_fact: + standalone_kubelet: >- + {%- if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] -%}true{%- else -%}false{%- endif -%} + tags: facts + - include: install.yml tags: kubelet diff --git a/roles/kubernetes/node/tasks/nginx-proxy.yml b/roles/kubernetes/node/tasks/nginx-proxy.yml index 056c55a93659e723e381858285702bcfba638844..885b84f8f000a23c10217c68c3436b385f1394df 100644 --- a/roles/kubernetes/node/tasks/nginx-proxy.yml +++ b/roles/kubernetes/node/tasks/nginx-proxy.yml @@ -1,6 +1,6 @@ --- - name: nginx-proxy | Write static pod - template: src=manifests/nginx-proxy.manifest.j2 dest=/etc/kubernetes/manifests/nginx-proxy.yml + template: src=manifests/nginx-proxy.manifest.j2 dest={{kube_manifest_dir}}/nginx-proxy.yml - name: nginx-proxy | Make nginx directory file: path=/etc/nginx state=directory mode=0700 owner=root diff --git a/roles/kubernetes/node/templates/deb-kubelet.initd.j2 b/roles/kubernetes/node/templates/deb-kubelet.initd.j2 index 5d5184efe370dbc1cb795f26a630995ffe36c005..6f349b8f2b0920e8b68883e8b29f5044fb7d4304 100644 --- a/roles/kubernetes/node/templates/deb-kubelet.initd.j2 +++ b/roles/kubernetes/node/templates/deb-kubelet.initd.j2 @@ -27,7 +27,7 @@ DAEMON_USER=root [ -x "$DAEMON" ] || exit 0 # Read configuration variable file if it is present -[ -r /etc/kubernetes/$NAME.env ] && . /etc/kubernetes/$NAME.env +[ -r {{kube_config_dir}}/$NAME.env ] && . {{kube_config_dir}}/$NAME.env # Define LSB log_* functions. # Depend on lsb-base (>= 3.2-14) to ensure that this file is present diff --git a/roles/kubernetes/node/templates/kubelet-container.j2 b/roles/kubernetes/node/templates/kubelet-container.j2 index 45a76accc9816e0fa778b0de780cc6f2b2456c9e..7d4f536ab2e4bf3914cafcbba7a4458077f29df7 100644 --- a/roles/kubernetes/node/templates/kubelet-container.j2 +++ b/roles/kubernetes/node/templates/kubelet-container.j2 @@ -3,7 +3,7 @@ --net=host --pid=host --name=kubelet --restart=on-failure:5 \ -v /etc/cni:/etc/cni:ro \ -v /opt/cni:/opt/cni:ro \ --v /etc/kubernetes:/etc/kubernetes \ +-v {{kube_config_dir}}:{{kube_config_dir}} \ -v /sys:/sys \ -v /dev:/dev \ -v {{ docker_daemon_graph }}:/var/lib/docker \ diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index 3c1f31ab2268d5d839a19675419759730b9ba081..a9ecce448a422085af9294f86ebad37c91264d10 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -12,17 +12,21 @@ KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}" # KUBELET_PORT="--port=10250" # You may leave this blank to use the actual hostname KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}" -{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %} -KUBELET_REGISTER_NODE="--register-node=false" -{% endif %} # location of the api-server +{% set kubelet_args_base %}--pod-manifest-path={{ kube_manifest_dir }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}{% endset %} {% if dns_setup|bool and skip_dnsmasq|bool %} -KUBELET_ARGS="--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig --pod-manifest-path={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" +{% set kubelet_args_dns %}--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }} {{ kubelet_args_base }}{% endset %} {% elif dns_setup|bool %} -KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig --pod-manifest-path={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" +{% set kubelet_args_dns %}--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }} {{ kubelet_args_base }}{% endset %} +{% else %} +{% set kubelet_args_dns = kubelet_args_base %} +{% endif %} +{% if not standalone_kubelet|bool %} +{% set kubelet_args %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig {{ kubelet_args_dns }}{% endset %} {% else %} -KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --require-kubeconfig --pod-manifest-path={{ kube_manifest_dir }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" +{% set kubelet_args = kubelet_args_dns %} {% endif %} +KUBELET_ARGS="{{ kubelet_args }}" {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave", "canal"] %} KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d" {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} diff --git a/roles/kubernetes/node/templates/kubelet.service.j2 b/roles/kubernetes/node/templates/kubelet.service.j2 index ad62d856244132bbc200c220732072c43294fcc4..b3113d5dad79bb014d7a27f5cda3122ce947c11c 100644 --- a/roles/kubernetes/node/templates/kubelet.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.service.j2 @@ -10,7 +10,7 @@ Wants=docker.socket {% endif %} [Service] -EnvironmentFile=/etc/kubernetes/kubelet.env +EnvironmentFile={{kube_config_dir}}/kubelet.env ExecStart={{ bin_dir }}/kubelet \ $KUBE_LOGTOSTDERR \ $KUBE_LOG_LEVEL \ diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 12a1a76634b785011dcfc16cd508acf4b3eec092..694ee1e3603a847b29eb88498cb0d4e9a663eae7 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-proxy - namespace: kube-system + namespace: {{system_namespace}} labels: k8s-app: kube-proxy spec: @@ -17,7 +17,7 @@ spec: - --v={{ kube_log_level }} - --master={{ kube_apiserver_endpoint }} {% if not is_kube_master %} - - --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml + - --kubeconfig={{kube_config_dir}}/node-kubeconfig.yaml {% endif %} - --bind-address={{ ip | default(ansible_default_ipv4.address) }} - --cluster-cidr={{ kube_pods_subnet }} @@ -31,10 +31,10 @@ spec: - mountPath: /etc/ssl/certs name: ssl-certs-host readOnly: true - - mountPath: /etc/kubernetes/node-kubeconfig.yaml + - mountPath: {{kube_config_dir}}/node-kubeconfig.yaml name: "kubeconfig" readOnly: true - - mountPath: /etc/kubernetes/ssl + - mountPath: {{kube_config_dir}}/ssl name: "etc-kube-ssl" readOnly: true - mountPath: /var/run/dbus @@ -46,10 +46,10 @@ spec: path: /usr/share/ca-certificates - name: "kubeconfig" hostPath: - path: "/etc/kubernetes/node-kubeconfig.yaml" + path: "{{kube_config_dir}}/node-kubeconfig.yaml" - name: "etc-kube-ssl" hostPath: - path: "/etc/kubernetes/ssl" + path: "{{kube_config_dir}}/ssl" - name: "var-run-dbus" hostPath: path: "/var/run/dbus" diff --git a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 index 0930ee61ec6177d2de3fe95e321983d8974c0b42..db15bd2b9680b7b4070766d4794fcb5aaf6155c0 100644 --- a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: nginx-proxy - namespace: kube-system + namespace: {{system_namespace}} labels: k8s-app: kube-nginx spec: diff --git a/roles/kubernetes/node/templates/rh-kubelet.initd.j2 b/roles/kubernetes/node/templates/rh-kubelet.initd.j2 index 5a709e118f89f2821b7e3702dd7cb9d2fa03a1c8..faae10d1aaadb9041918a96faa6cd2ebe35fe289 100644 --- a/roles/kubernetes/node/templates/rh-kubelet.initd.j2 +++ b/roles/kubernetes/node/templates/rh-kubelet.initd.j2 @@ -27,7 +27,7 @@ pidfile="/var/run/$prog.pid" lockfile="/var/lock/subsys/$prog" logfile="/var/log/$prog" -[ -e /etc/kubernetes/$prog.env ] && . /etc/kubernetes/$prog.env +[ -e {{kube_config_dir}}/$prog.env ] && . {{kube_config_dir}}/$prog.env start() { if [ ! -x $exec ]; then @@ -35,7 +35,7 @@ start() { echo "Docker executable $exec not found" else echo "You do not have permission to execute the Docker executable $exec" - fi + fi exit 5 fi diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml index 64f0ff24bdbacf06e3d739aa4f02db6d15eea419..35ad8abeaf9da9fe277aec37f87831fadc3a807d 100644 --- a/roles/kubernetes/preinstall/defaults/main.yml +++ b/roles/kubernetes/preinstall/defaults/main.yml @@ -1,26 +1,6 @@ --- run_gitinfos: false -# This directory is where all the additional scripts go -# that Kubernetes normally puts in /srv/kubernetes. -# This puts them in a sane location -kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" - -# This directory is where all the additional config stuff goes -# the kubernetes normally puts in /srv/kubernets. -# This puts them in a sane location. -# Editting this value will almost surely break something. Don't -# change it. Things like the systemd scripts are hard coded to -# look in here. Don't do it. -kube_config_dir: /etc/kubernetes - -# Logging directory (sysvinit systems) -kube_log_dir: "/var/log/kubernetes" - -# This is where you can drop yaml/json files and the kubelet will run those -# pods on startup -kube_manifest_dir: "{{ kube_config_dir }}/manifests" - epel_rpm_download_url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm" common_required_pkgs: diff --git a/roles/kubernetes/secrets/defaults/main.yml b/roles/kubernetes/secrets/defaults/main.yml deleted file mode 100644 index c6011a9bf331e546512bb1ee0d63533bf5020930..0000000000000000000000000000000000000000 --- a/roles/kubernetes/secrets/defaults/main.yml +++ /dev/null @@ -1,21 +0,0 @@ -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - -# This is where all of the bearer tokens will be stored -kube_token_dir: "{{ kube_config_dir }}/tokens" - -# This is where to save basic auth file -kube_users_dir: "{{ kube_config_dir }}/users" - -# This directory is where all the additional config stuff goes -# the kubernetes normally puts in /srv/kubernets. -# This puts them in a sane location. -# Editting this value will almost surely break something. Don't -# change it. Things like the systemd scripts are hard coded to -# look in here. Don't do it. -kube_config_dir: /etc/kubernetes - -# This directory is where all the additional scripts go -# that Kubernetes normally puts in /srv/kubernetes. -# This puts them in a sane location -kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml index 15ce2f657ff731628812f894a8a7335ca43788ff..d968e9e46b0b6579821c00c21935a933f2aef024 100644 --- a/roles/network_plugin/canal/tasks/main.yml +++ b/roles/network_plugin/canal/tasks/main.yml @@ -35,12 +35,12 @@ - name: Canal | Write canal configmap template: src: canal-config.yml.j2 - dest: /etc/kubernetes/canal-config.yaml + dest: "{{kube_config_dir}}/canal-config.yaml" - name: Canal | Write canal node configuration template: src: canal-node.yml.j2 - dest: /etc/kubernetes/canal-node.yaml + dest: "{{kube_config_dir}}/canal-node.yaml" - name: Canal | Copy cni plugins from hyperkube command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -a /opt/cni/bin/ /cnibindir/" diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml index f4ca65d12517b0c7fe6d6170aa8b0ee971afc172..4dde123aeacf88d764ad57194dd94145bd9423bd 100644 --- a/roles/network_plugin/flannel/tasks/main.yml +++ b/roles/network_plugin/flannel/tasks/main.yml @@ -10,7 +10,7 @@ - name: Flannel | Create flannel pod manifest template: src: flannel-pod.yml - dest: /etc/kubernetes/manifests/flannel-pod.manifest + dest: "{{kube_manifest_dir}}/flannel-pod.manifest" notify: Flannel | delete default docker bridge - name: Flannel | Wait for flannel subnet.env file presence diff --git a/roles/network_plugin/flannel/templates/flannel-pod.yml b/roles/network_plugin/flannel/templates/flannel-pod.yml index 70b62e9ac5e791ffcac688319eaea1e5d36c2c90..1af2152ea6c1dec8a3532e9daadc208e3039ea55 100644 --- a/roles/network_plugin/flannel/templates/flannel-pod.yml +++ b/roles/network_plugin/flannel/templates/flannel-pod.yml @@ -3,7 +3,7 @@ apiVersion: "v1" metadata: name: "flannel" - namespace: "kube-system" + namespace: "{{system_namespace}}" labels: app: "flannel" version: "v0.1" diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index bdacbbfc48d978f17f440425dbd5c1f1c9c7b378..74a92abd5fc855cac218bedb14478ee076ec3908 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -34,7 +34,7 @@ - name: reset | delete some files and directories file: path={{ item }} state=absent with_items: - - /etc/kubernetes/ + - "{{kube_config_dir}}" - /var/lib/kubelet - /var/lib/etcd - /etc/ssl/etcd diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml index 03447842f16e6d066ec57c5082a8ee488aa5c27d..d0f3b9df0b3a0dfb7299ec514e254e290eadc3f7 100644 --- a/scripts/collect-info.yaml +++ b/scripts/collect-info.yaml @@ -41,31 +41,31 @@ cmd: journalctl -u kubelet --no-pager - name: kubedns_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kubedns -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system kubedns; done" + do kubectl logs ${i} --namespace {{system_namespace}} kubedns; done" - name: apiserver_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-apiserver -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system; done" + do kubectl logs ${i} --namespace {{system_namespace}}; done" - name: controller_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-controller -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system; done" + do kubectl logs ${i} --namespace {{system_namespace}}; done" - name: scheduler_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-scheduler -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system; done" + do kubectl logs ${i} --namespace {{system_namespace}}; done" - name: proxy_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-proxy -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system; done" + do kubectl logs ${i} --namespace {{system_namespace}}; done" - name: nginx_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-nginx -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system; done" + do kubectl logs ${i} --namespace {{system_namespace}}; done" - name: flannel_logs cmd: "for i in `kubectl get pods --all-namespaces -l app=flannel -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system flannel-container; done" + do kubectl logs ${i} --namespace {{system_namespace}} flannel-container; done" - name: canal_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=canal-node -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system flannel; done" + do kubectl logs ${i} --namespace {{system_namespace}} flannel; done" - name: calico_policy_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=calico-policy -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system calico-policy-controller; done" + do kubectl logs ${i} --namespace {{system_namespace}} calico-policy-controller; done" logs: - /var/log/syslog