diff --git a/cluster.yml b/cluster.yml index 4fc852d973df5e0000987ef56d03fd9e8ce4fe1e..6a9de14da1acedfa754498dccd73b2ad38bcc48d 100644 --- a/cluster.yml +++ b/cluster.yml @@ -94,6 +94,7 @@ roles: - { role: kubespray-defaults} - { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" } + - { role: win_nodes/kubernetes_patch, tags: win_nodes, when: "kubeadm_enabled" } - hosts: kube-master any_errors_fatal: "{{ any_errors_fatal | default(true) }}" diff --git a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2 b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2 index a6d1df9348b8734fde08fea33f3f5bf1b2293ede..4489e241869b5302f3a024a936cd3f08279cb762 100644 --- a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2 +++ b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2 @@ -52,3 +52,6 @@ spec: - --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}} - --logtostderr=true - --v={{ kube_log_level }} + # When having win nodes in cluster without this patch, this pod cloud try to be created in windows + nodeSelector: + beta.kubernetes.io/os: linux diff --git a/roles/dnsmasq/templates/dnsmasq-deploy.yml.j2 b/roles/dnsmasq/templates/dnsmasq-deploy.yml.j2 index 0fb6045e8269ad4061a8d435412553e25a78c53d..c3a32f02e68989809739cd871220cd0963fe3b63 100644 --- a/roles/dnsmasq/templates/dnsmasq-deploy.yml.j2 +++ b/roles/dnsmasq/templates/dnsmasq-deploy.yml.j2 @@ -24,6 +24,9 @@ spec: tolerations: - effect: NoSchedule operator: Exists + # When having win nodes in cluster without this patch, this pod cloud try to be created in windows + nodeSelector: + beta.kubernetes.io/os: linux containers: - name: dnsmasq image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}" diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index d2426769f03d3d3fd5fe384a7436c559a56e8be9..5835dff10bb8da1b12173316e0c06db9f79f26d2 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -138,6 +138,15 @@ msg: "{{available_packages}}" when: docker_task_result|failed +# This is required to ensure any apt upgrade will not break kubernetes +- name: Set docker pin priority to apt_preferences on Debian family + template: + src: "apt_preferences.d/debian_docker.j2" + dest: "/etc/apt/preferences.d/docker" + owner: "root" + mode: 0644 + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) + - name: ensure service is started if docker packages are already present service: name: docker diff --git a/roles/docker/templates/apt_preferences.d/debian_docker.j2 b/roles/docker/templates/apt_preferences.d/debian_docker.j2 new file mode 100644 index 0000000000000000000000000000000000000000..f21008b6c14b42e620d07c11907700458940812d --- /dev/null +++ b/roles/docker/templates/apt_preferences.d/debian_docker.j2 @@ -0,0 +1,3 @@ +Package: docker-ce +Pin: version {{ docker_version }}.* +Pin-Priority: 1001 \ No newline at end of file diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 38df04d731a363161a76993561cd4706ce34e8e5..db59a983fa7eae5d6b605aac6e33007bb25a1a4e 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -6,7 +6,6 @@ - facts - include_tasks: "gen_certs_{{ cert_management }}.yml" - when: tags: - etcd-secrets diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 index 11c8d37f0bd2089085461e4b514f86bf844ef3ed..e726e8d2a7f78311919eadcbaa9a8e92b35e4c63 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 @@ -28,6 +28,9 @@ spec: labels: k8s-app: kubedns-autoscaler spec: + # When having win nodes in cluster without this patch, this pod cloud try to be created in windows + nodeSelector: + beta.kubernetes.io/os: linux tolerations: - effect: NoSchedule operator: Equal diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 index 549d93c1420aa5d5025084a41d74386710a5ea9a..96ef72283effeb0922ecbe1d0ab74374538104fb 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 @@ -27,6 +27,9 @@ spec: annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: + # When having win nodes in cluster without this patch, this pod cloud try to be created in windows + nodeSelector: + beta.kubernetes.io/os: linux tolerations: - key: "CriticalAddonsOnly" operator: "Exists" diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 index 4314482314aaf13cd7d549b8147d82f811e9a344..a2c4850c442600eb747b4e930bd1d0c9faeefb10 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 @@ -15,6 +15,9 @@ spec: tolerations: - effect: NoSchedule operator: Exists + # When having win nodes in cluster without this patch, this pod cloud try to be created in windows + nodeSelector: + beta.kubernetes.io/os: linux containers: - name: netchecker-agent image: "{{ agent_img }}" diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 index ad32d509a13b6a0791f51e71d363d4de9ebecbf4..f046e8f4b58385428f5d2180113ac5ecfe0df65f 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 @@ -13,6 +13,9 @@ spec: app: netchecker-agent-hostnet spec: hostNetwork: True + # When having win nodes in cluster without this patch, this pod cloud try to be created in windows + nodeSelector: + beta.kubernetes.io/os: linux {% if kube_version | version_compare('v1.6', '>=') %} dnsPolicy: ClusterFirstWithHostNet {% endif %} diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 index 6e9ad30c03d0aeb1cef0eac8412d743ee192bd6b..03b118f8d70720409e1f4d44ddef57d94c6afdc8 100644 --- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 +++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 @@ -29,6 +29,9 @@ spec: spec: priorityClassName: system-node-critical serviceAccountName: efk + # When having win nodes in cluster without this patch, this pod cloud try to be created in windows + nodeSelector: + beta.kubernetes.io/os: linux containers: - name: fluentd-es image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}" diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2 index 884b6d79b827ab79df7786cb0a3e3c0c4723af29..0578844f9aacf5c4bf87d9ca42d55d24900497cb 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2 @@ -42,3 +42,6 @@ spec: requests: cpu: 10m memory: 20Mi + # When having win nodes in cluster without this patch, this pod cloud try to be created in windows + nodeSelector: + beta.kubernetes.io/os: linux diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml index d6ef52a619ffc87922ac8d15abf259bb65b76cf5..827154612d5460ad52cd13ac9c256af02d5009d5 100644 --- a/roles/kubernetes/master/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml @@ -121,6 +121,7 @@ --ignore-preflight-errors=all --allow-experimental-upgrades --allow-release-candidate-upgrades + --force register: kubeadm_upgrade # Retry is because upload config sometimes fails retries: 3 diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index d1292887aad1a22fd6d272a9899ad40e4a351913..ece9be10cde5e92b324430c6bf9926a7adb89d0f 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -12,6 +12,9 @@ spec: {% if kube_version | version_compare('v1.6', '>=') %} dnsPolicy: ClusterFirst {% endif %} + # When having win nodes in cluster without this patch, this pod cloud try to be created in windows + nodeSelector: + beta.kubernetes.io/os: linux containers: - name: kube-proxy image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} diff --git a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 index a1e9a78156ade8abafc085be0d1ab9967d6aaedb..756eba7ee9c5e83550e05e61c16df3e1fd316d5f 100644 --- a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 @@ -7,6 +7,9 @@ metadata: k8s-app: kube-nginx spec: hostNetwork: true + # When having win nodes in cluster without this patch, this pod cloud try to be created in windows + nodeSelector: + beta.kubernetes.io/os: linux containers: - name: nginx-proxy image: {{ nginx_image_repo }}:{{ nginx_image_tag }} diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 index b201e8e7f01818ad5528d5115b3ef5a79656cc83..de9be8d9e173e2bdfa5ae72e5739a91346e61a6a 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -53,6 +53,9 @@ spec: k8s-app: flannel spec: serviceAccountName: flannel + # When having win nodes in cluster without this patch, this pod cloud try to be created in windows + nodeSelector: + beta.kubernetes.io/os: linux containers: - name: kube-flannel image: {{ flannel_image_repo }}:{{ flannel_image_tag }} diff --git a/roles/win_nodes/kubernetes_patch/defaults/main.yml b/roles/win_nodes/kubernetes_patch/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..587f73ab42b091af8a5ee54542040db31f0ad5e6 --- /dev/null +++ b/roles/win_nodes/kubernetes_patch/defaults/main.yml @@ -0,0 +1,3 @@ +--- + +kubernetes_user_manifests_path: "{{ ansible_env.HOME }}/kube-manifests" diff --git a/roles/win_nodes/kubernetes_patch/files/nodeselector-os-linux-patch.json b/roles/win_nodes/kubernetes_patch/files/nodeselector-os-linux-patch.json new file mode 100644 index 0000000000000000000000000000000000000000..d718ff4465e0ca7cfb082dcdfa711790731b4a8b --- /dev/null +++ b/roles/win_nodes/kubernetes_patch/files/nodeselector-os-linux-patch.json @@ -0,0 +1 @@ +{"spec":{"template":{"spec":{"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}} \ No newline at end of file diff --git a/roles/win_nodes/kubernetes_patch/tasks/main.yml b/roles/win_nodes/kubernetes_patch/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..8d88818a5131cf6810e05d8524fda48514a017ad --- /dev/null +++ b/roles/win_nodes/kubernetes_patch/tasks/main.yml @@ -0,0 +1,34 @@ +--- + +- name: Ensure that user manifests directory exists + file: + path: "{{ kubernetes_user_manifests_path }}/kubernetes" + state: directory + recurse: yes + tags: [init, cni] + +- name: Apply kube-proxy nodeselector + block: + - name: Copy kube-proxy daemonset nodeselector patch + copy: + src: nodeselector-os-linux-patch.json + dest: "{{ kubernetes_user_manifests_path }}/nodeselector-os-linux-patch.json" + + # Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch" + - name: Check current nodeselector for kube-proxy daemonset + shell: kubectl get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta\.kubernetes\.io/os}' + register: current_kube_proxy_state + + - name: Apply nodeselector patch for kube-proxy daemonset + shell: kubectl patch ds kube-proxy --namespace=kube-system --type=strategic -p "$(cat nodeselector-os-linux-patch.json)" + args: + chdir: "{{ kubernetes_user_manifests_path }}" + register: patch_kube_proxy_state + when: current_kube_proxy_state.stdout | trim | lower != "linux" + + - debug: msg={{ patch_kube_proxy_state.stdout_lines }} + when: patch_kube_proxy_state is not skipped + + - debug: msg={{ patch_kube_proxy_state.stderr_lines }} + when: patch_kube_proxy_state is not skipped + tags: init