Skip to content
Snippets Groups Projects
Commit 127969d6 authored by Matthew Mosesohn's avatar Matthew Mosesohn Committed by k8s-ci-robot
Browse files

Align node-role value for kubeadm compatibility (#3558)

kubeadm sets node label node-role.kubernetes.io/master=''
and this is not configurable. We should use it everywhere.
parent 4b711e29
No related branches found
No related tags found
No related merge requests found
...@@ -44,7 +44,7 @@ cephfs_provisioner_enabled: false ...@@ -44,7 +44,7 @@ cephfs_provisioner_enabled: false
ingress_nginx_enabled: false ingress_nginx_enabled: false
# ingress_nginx_host_network: false # ingress_nginx_host_network: false
# ingress_nginx_nodeselector: # ingress_nginx_nodeselector:
# node-role.kubernetes.io/master: "true" # node-role.kubernetes.io/master: ""
# ingress_nginx_namespace: "ingress-nginx" # ingress_nginx_namespace: "ingress-nginx"
# ingress_nginx_insecure_port: 80 # ingress_nginx_insecure_port: 80
# ingress_nginx_secure_port: 443 # ingress_nginx_secure_port: 443
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
ingress_nginx_namespace: "ingress-nginx" ingress_nginx_namespace: "ingress-nginx"
ingress_nginx_host_network: false ingress_nginx_host_network: false
ingress_nginx_nodeselector: ingress_nginx_nodeselector:
node-role.kubernetes.io/master: "true" node-role.kubernetes.io/master: ""
ingress_nginx_insecure_port: 80 ingress_nginx_insecure_port: 80
ingress_nginx_secure_port: 443 ingress_nginx_secure_port: 443
ingress_nginx_configmap: {} ingress_nginx_configmap: {}
......
...@@ -81,12 +81,12 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" ...@@ -81,12 +81,12 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{# Kubelet node labels #} {# Kubelet node labels #}
{% set role_node_labels = [] %} {% set role_node_labels = [] %}
{% if inventory_hostname in groups['kube-master'] %} {% if inventory_hostname in groups['kube-master'] %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/master=true') %} {% set dummy = role_node_labels.append("node-role.kubernetes.io/master=''") %}
{% if not standalone_kubelet|bool %} {% if not standalone_kubelet|bool %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %} {% set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
{% endif %} {% endif %}
{% else %} {% else %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %} {% set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
{% endif %} {% endif %}
{% set inventory_node_labels = [] %} {% set inventory_node_labels = [] %}
{% if node_labels is defined %} {% if node_labels is defined %}
......
...@@ -95,12 +95,12 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" ...@@ -95,12 +95,12 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{# Kubelet node labels #} {# Kubelet node labels #}
{% set role_node_labels = [] %} {% set role_node_labels = [] %}
{% if inventory_hostname in groups['kube-master'] %} {% if inventory_hostname in groups['kube-master'] %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/master=true') %} {% set dummy = role_node_labels.append("node-role.kubernetes.io/master=''") %}
{% if not standalone_kubelet|bool %} {% if not standalone_kubelet|bool %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %} {% set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
{% endif %} {% endif %}
{% else %} {% else %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %} {% set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
{% endif %} {% endif %}
{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %} {% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
{% if inventory_hostname in nvidia_gpu_nodes %} {% if inventory_hostname in nvidia_gpu_nodes %}
......
...@@ -27,7 +27,7 @@ spec: ...@@ -27,7 +27,7 @@ spec:
hostNetwork: true hostNetwork: true
hostPID: true hostPID: true
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "true" node-role.kubernetes.io/master: ""
tolerations: tolerations:
- operator: Exists - operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12) # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
......
...@@ -23,7 +23,7 @@ spec: ...@@ -23,7 +23,7 @@ spec:
hostNetwork: true hostNetwork: true
hostPID: true hostPID: true
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "true" node-role.kubernetes.io/master: ""
tolerations: tolerations:
- operator: Exists - operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12) # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
......
...@@ -27,7 +27,7 @@ spec: ...@@ -27,7 +27,7 @@ spec:
hostNetwork: true hostNetwork: true
hostPID: true hostPID: true
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "true" node-role.kubernetes.io/master: ""
tolerations: tolerations:
- operator: Exists - operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12) # Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment