Skip to content
Snippets Groups Projects
Commit 383d582b authored by Brad Beam's avatar Brad Beam Committed by GitHub
Browse files

Merge pull request #1382 from jwfang/rbac

basic rbac support
parents 6eacedc4 805d9f22
No related branches found
No related tags found
No related merge requests found
Showing
with 241 additions and 52 deletions
......@@ -62,6 +62,7 @@ before_script:
KUBELET_DEPLOYMENT: "docker"
VAULT_DEPLOYMENT: "docker"
WEAVE_CPU_LIMIT: "100m"
AUTHORIZATION_MODES: "{ 'authorization_modes': [] }"
MAGIC: "ci check this"
.gce: &gce
......@@ -132,6 +133,7 @@ before_script:
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e "${AUTHORIZATION_MODES}"
--limit "all:!fake_hosts"
cluster.yml
......@@ -160,6 +162,7 @@ before_script:
-e resolvconf_mode=${RESOLVCONF_MODE}
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
-e "${AUTHORIZATION_MODES}"
--limit "all:!fake_hosts"
$PLAYBOOK;
fi
......@@ -190,6 +193,7 @@ before_script:
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e "${AUTHORIZATION_MODES}"
--limit "all:!fake_hosts"
cluster.yml;
fi
......@@ -232,6 +236,7 @@ before_script:
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e "${AUTHORIZATION_MODES}"
--limit "all:!fake_hosts"
cluster.yml;
fi
......@@ -373,6 +378,15 @@ before_script:
CLUSTER_MODE: separate
STARTUP_SCRIPT: ""
.ubuntu_flannel_rbac_variables: &ubuntu_flannel_rbac_variables
# stage: deploy-gce-special
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
KUBE_NETWORK_PLUGIN: flannel
CLOUD_IMAGE: ubuntu-1604-xenial
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: separate
STARTUP_SCRIPT: ""
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
coreos-calico-sep:
stage: deploy-gce-part1
......@@ -598,6 +612,17 @@ ubuntu-vault-sep:
except: ['triggers']
only: ['master', /^pr-.*$/]
ubuntu-flannel-rbac-sep:
stage: deploy-gce-special
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_flannel_rbac_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
# Premoderated with manual actions
ci-authorized:
<<: *job
......
......@@ -67,6 +67,11 @@ following default cluster paramters:
OpenStack (default is unset)
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
Kubernetes
* *authorization_modes* - A list of [authorization mode](
https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
that the cluster should be configured for. Defaults to `[]` (i.e. no authorization).
Note: `RBAC` is currently in experimental phase, and do not support either calico or
vault. Upgrade from non-RBAC to RBAC is not tested.
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
private addresses, make sure to pick another values for ``kube_service_addresses``
......
......@@ -41,3 +41,7 @@ netchecker_server_memory_requests: 64M
etcd_cert_dir: "/etc/ssl/etcd/ssl"
canal_cert_dir: "/etc/canal/certs"
rbac_resources:
- sa
- clusterrole
- clusterrolebinding
......@@ -13,11 +13,35 @@
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {name: kube-dns, file: kubedns-deploy.yml, type: deployment}
- {name: kube-dns, file: kubedns-svc.yml, type: svc}
- {name: kubedns, file: kubedns-sa.yml, type: sa}
- {name: kubedns, file: kubedns-deploy.yml, type: deployment}
- {name: kubedns, file: kubedns-svc.yml, type: svc}
- {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa}
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole}
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding}
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment}
register: manifests
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
when:
- dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
- rbac_enabled or item.type not in rbac_resources
tags: dnsmasq
# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns
- name: Kubernetes Apps | Patch system:kube-dns ClusterRole
command: >
{{bin_dir}}/kubectl patch clusterrole system:kube-dns
--patch='{
"rules": [
{
"apiGroups" : [""],
"resources" : ["endpoints", "services"],
"verbs": ["list", "watch", "get"]
}
]
}'
when:
- dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
- rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
tags: dnsmasq
- name: Kubernetes Apps | Start Resources
......@@ -29,6 +53,7 @@
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "{{item.changed | ternary('latest','present') }}"
with_items: "{{ manifests.results }}"
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
tags: dnsmasq
......
......@@ -5,10 +5,15 @@
with_items:
- {file: netchecker-agent-ds.yml.j2, type: ds, name: netchecker-agent}
- {file: netchecker-agent-hostnet-ds.yml.j2, type: ds, name: netchecker-agent-hostnet}
- {file: netchecker-server-sa.yml.j2, type: sa, name: netchecker-server}
- {file: netchecker-server-clusterrole.yml.j2, type: clusterrole, name: netchecker-server}
- {file: netchecker-server-clusterrolebinding.yml.j2, type: clusterrolebinding, name: netchecker-server}
- {file: netchecker-server-deployment.yml.j2, type: po, name: netchecker-server}
- {file: netchecker-server-svc.yml.j2, type: svc, name: netchecker-service}
register: manifests
when: inventory_hostname == groups['kube-master'][0]
when:
- inventory_hostname == groups['kube-master'][0]
- rbac_enabled or item.type not in rbac_resources
#FIXME: remove if kubernetes/features#124 is implemented
- name: Kubernetes Apps | Purge old Netchecker daemonsets
......@@ -31,4 +36,5 @@
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "{{item.changed | ternary('latest','present') }}"
with_items: "{{ manifests.results }}"
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
when: inventory_hostname == groups['kube-master'][0]
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cluster-proportional-autoscaler
namespace: {{ system_namespace }}
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cluster-proportional-autoscaler
namespace: {{ system_namespace }}
subjects:
- kind: ServiceAccount
name: cluster-proportional-autoscaler
namespace: {{ system_namespace }}
roleRef:
kind: ClusterRole
name: cluster-proportional-autoscaler
apiGroup: rbac.authorization.k8s.io
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ServiceAccount
apiVersion: v1
metadata:
name: cluster-proportional-autoscaler
namespace: {{ system_namespace }}
......@@ -16,7 +16,7 @@ apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kubedns-autoscaler
namespace: kube-system
namespace: {{ system_namespace }}
labels:
k8s-app: kubedns-autoscaler
kubernetes.io/cluster-service: "true"
......@@ -39,11 +39,13 @@ spec:
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --namespace={{ system_namespace }}
- --configmap=kubedns-autoscaler
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
- --target=Deployment/kube-dns
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
- --logtostderr=true
- --v=2
{% if rbac_enabled %}
serviceAccountName: cluster-proportional-autoscaler
{% endif %}
......@@ -151,4 +151,6 @@ spec:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
{% if rbac_enabled %}
serviceAccountName: kube-dns
{% endif %}
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: {{ system_namespace }}
labels:
kubernetes.io/cluster-service: "true"
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
app: netchecker-agent-hostnet
name: netchecker-agent-hostnet
namespace: {{ netcheck_namespace }}
spec:
template:
metadata:
name: netchecker-agent-hostnet
labels:
app: netchecker-agent-hostnet
spec:
hostNetwork: True
{% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet
{% endif %}
containers:
- name: netchecker-agent
image: "{{ agent_img }}"
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
args:
- "-v=5"
- "-alsologtostderr=true"
- "-serverendpoint=netchecker-service:8081"
- "-reportinterval={{ agent_report_interval }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ netchecker_agent_cpu_limit }}
memory: {{ netchecker_agent_memory_limit }}
requests:
cpu: {{ netchecker_agent_cpu_requests }}
memory: {{ netchecker_agent_memory_requests }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: netchecker-server
namespace: {{ netcheck_namespace }}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list"]
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: netchecker-server
namespace: {{ netcheck_namespace }}
subjects:
- kind: ServiceAccount
name: netchecker-server
namespace: {{ netcheck_namespace }}
roleRef:
kind: ClusterRole
name: netchecker-server
apiGroup: rbac.authorization.k8s.io
......@@ -31,3 +31,6 @@ spec:
- "-logtostderr"
- "-kubeproxyinit"
- "-endpoint=0.0.0.0:8081"
{% if rbac_enabled %}
serviceAccountName: netchecker-server
{% endif %}
apiVersion: v1
kind: ServiceAccount
metadata:
name: netchecker-server
namespace: {{ netcheck_namespace }}
labels:
kubernetes.io/cluster-service: "true"
......@@ -10,10 +10,36 @@
mode: 0755
register: helm_container
- name: Helm | Lay Down Helm Manifests (RBAC)
template:
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {name: tiller, file: tiller-sa.yml, type: sa}
- {name: tiller, file: tiller-clusterrolebinding.yml, type: clusterrolebinding}
register: manifests
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
- name: Helm | Apply Helm Manifests (RBAC)
kube:
name: "{{item.item.name}}"
namespace: "{{ system_namespace }}"
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "{{item.changed | ternary('latest','present') }}"
with_items: "{{ manifests.results }}"
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
- name: Helm | Install/upgrade helm
command: "{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}"
when: helm_container.changed
- name: Helm | Patch tiller deployment for RBAC
command: kubectl patch deployment tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' -n {{ system_namespace }}
when: rbac_enabled
- name: Helm | Set up bash completion
shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh"
when: ( helm_container.changed and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] )
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: tiller
namespace: {{ system_namespace }}
subjects:
- kind: ServiceAccount
name: tiller
namespace: {{ system_namespace }}
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: {{ system_namespace }}
labels:
kubernetes.io/cluster-service: "true"
......@@ -64,4 +64,4 @@ apiserver_custom_flags: []
controller_mgr_custom_flags: []
scheduler_custom_flags: []
\ No newline at end of file
scheduler_custom_flags: []
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment