Add Node authorization mode based on graph of node-related objects

pull/6/head
Jordan Liggitt 2017-05-30 15:15:38 -04:00
parent fed30040c9
commit fc8e915a4b
No known key found for this signature in database
GPG Key ID: 24E7ADF9A3B42012
14 changed files with 1457 additions and 17 deletions

View File

@ -76,6 +76,7 @@ ENABLE_APISERVER_BASIC_AUDIT=${ENABLE_APISERVER_BASIC_AUDIT:-false}
# RBAC Mode options
ALLOW_ANY_TOKEN=${ALLOW_ANY_TOKEN:-false}
ENABLE_RBAC=${ENABLE_RBAC:-false}
AUTHORIZATION_MODE=${AUTHORIZATION_MODE:-""}
KUBECONFIG_TOKEN=${KUBECONFIG_TOKEN:-""}
AUTH_ARGS=${AUTH_ARGS:-""}
@ -201,6 +202,8 @@ KUBELET_HOST=${KUBELET_HOST:-"127.0.0.1"}
API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-/127.0.0.1(:[0-9]+)?$,/localhost(:[0-9]+)?$}
KUBELET_PORT=${KUBELET_PORT:-10250}
LOG_LEVEL=${LOG_LEVEL:-3}
# Use to increase verbosity on particular files, e.g. LOG_SPEC=token_controller*=5,other_controller*=4
LOG_SPEC=${LOG_SPEC:-""}
LOG_DIR=${LOG_DIR:-"/tmp"}
CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
CONTAINER_RUNTIME_ENDPOINT=${CONTAINER_RUNTIME_ENDPOINT:-""}
@ -435,6 +438,12 @@ function start_apiserver {
if [[ "${ENABLE_RBAC}" = true ]]; then
authorizer_arg="--authorization-mode=RBAC "
fi
if [[ -n "${AUTHORIZATION_MODE}" ]]; then
if [[ "${ENABLE_RBAC}" = true ]]; then
warning "AUTHORIZATION_MODE=$AUTHORIZATION_MODE overrode ENABLE_RBAC=true"
fi
authorizer_arg="--authorization-mode=${AUTHORIZATION_MODE} "
fi
priv_arg=""
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
priv_arg="--allow-privileged "
@ -487,6 +496,7 @@ function start_apiserver {
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" apiserver ${swagger_arg} ${audit_arg} ${anytoken_arg} ${authorizer_arg} ${priv_arg} ${runtime_config}\
${advertise_address} \
--v=${LOG_LEVEL} \
--vmodule="${LOG_SPEC}" \
--cert-dir="${CERT_DIR}" \
--client-ca-file="${CERT_DIR}/client-ca.crt" \
--service-account-key-file="${SERVICE_ACCOUNT_KEY}" \
@ -894,7 +904,7 @@ if [[ "${START_MODE}" != "nokubelet" ]]; then
esac
fi
if [[ -n "${PSP_ADMISSION}" && "${ENABLE_RBAC}" = true ]]; then
if [[ -n "${PSP_ADMISSION}" && ("${ENABLE_RBAC}" = true || "${AUTHORIZATION_MODE}" = *RBAC* ) ]]; then
create_psp_policy
fi

View File

@ -26,10 +26,13 @@ go_library(
deps = [
"//pkg/apis/rbac:go_default_library",
"//pkg/auth/authorizer/abac:go_default_library",
"//pkg/auth/nodeidentifier:go_default_library",
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
"//pkg/client/listers/rbac/internalversion:go_default_library",
"//pkg/kubeapiserver/authorizer/modes:go_default_library",
"//plugin/pkg/auth/authorizer/node:go_default_library",
"//plugin/pkg/auth/authorizer/rbac:go_default_library",
"//plugin/pkg/auth/authorizer/rbac/bootstrappolicy:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",

View File

@ -28,10 +28,13 @@ import (
"k8s.io/apiserver/plugin/pkg/authorizer/webhook"
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/auth/authorizer/abac"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
rbaclisters "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion"
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/node"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"
)
type AuthorizationConfig struct {
@ -107,6 +110,19 @@ func (config AuthorizationConfig) New() (authorizer.Authorizer, error) {
}
// Keep cases in sync with constant list above.
switch authorizationMode {
case modes.ModeNode:
graph := node.NewGraph()
node.AddGraphEventHandlers(
graph,
config.InformerFactory.Core().InternalVersion().Pods(),
config.InformerFactory.Core().InternalVersion().PersistentVolumes(),
)
nodeAuthorizer := node.NewAuthorizer(graph, nodeidentifier.NewDefaultNodeIdentifier(), bootstrappolicy.NodeRules())
authorizers = append(authorizers, nodeAuthorizer)
// Don't bind system:nodes to the system:node role
bootstrappolicy.AddClusterRoleBindingFilter(bootstrappolicy.OmitNodesGroupBinding)
case modes.ModeAlwaysAllow:
authorizers = append(authorizers, authorizerfactory.NewAlwaysAllowAuthorizer())
case modes.ModeAlwaysDeny:

View File

@ -22,9 +22,10 @@ const (
ModeABAC string = "ABAC"
ModeWebhook string = "Webhook"
ModeRBAC string = "RBAC"
ModeNode string = "Node"
)
var AuthorizationModeChoices = []string{ModeAlwaysAllow, ModeAlwaysDeny, ModeABAC, ModeWebhook, ModeRBAC}
var AuthorizationModeChoices = []string{ModeAlwaysAllow, ModeAlwaysDeny, ModeABAC, ModeWebhook, ModeRBAC, ModeNode}
// IsValidAuthorizationMode returns true if the given authorization mode is a valid one for the apiserver
func IsValidAuthorizationMode(authzMode string) bool {

View File

@ -24,6 +24,7 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//plugin/pkg/auth/authorizer/node:all-srcs",
"//plugin/pkg/auth/authorizer/rbac:all-srcs",
],
tags = ["automanaged"],

View File

@ -0,0 +1,63 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["node_authorizer_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/auth/nodeidentifier:go_default_library",
"//plugin/pkg/auth/authorizer/rbac/bootstrappolicy:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"graph.go",
"graph_populator.go",
"node_authorizer.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/persistentvolume:go_default_library",
"//pkg/api/pod:go_default_library",
"//pkg/apis/rbac:go_default_library",
"//pkg/auth/nodeidentifier:go_default_library",
"//pkg/client/informers/informers_generated/internalversion/core/internalversion:go_default_library",
"//plugin/pkg/auth/authorizer/rbac:go_default_library",
"//third_party/forked/gonum/graph:go_default_library",
"//third_party/forked/gonum/graph/simple:go_default_library",
"//third_party/forked/gonum/graph/traverse:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,9 @@
approvers:
- timstclair
- liggitt
- deads2k
reviewers:
- timstclair
- liggitt
- deads2k
- ericchiang

View File

@ -0,0 +1,265 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"sync"
"k8s.io/kubernetes/pkg/api"
pvutil "k8s.io/kubernetes/pkg/api/persistentvolume"
podutil "k8s.io/kubernetes/pkg/api/pod"
"k8s.io/kubernetes/third_party/forked/gonum/graph"
"k8s.io/kubernetes/third_party/forked/gonum/graph/simple"
)
// namedVertex implements graph.Node and remembers the type, namespace, and name of its related API object
type namedVertex struct {
name string
namespace string
id int
vertexType vertexType
}
func newNamedVertex(vertexType vertexType, namespace, name string, id int) *namedVertex {
return &namedVertex{
vertexType: vertexType,
name: name,
namespace: namespace,
id: id,
}
}
func (n *namedVertex) ID() int {
return n.id
}
func (n *namedVertex) String() string {
if len(n.namespace) == 0 {
return vertexTypes[n.vertexType] + ":" + n.name
}
return vertexTypes[n.vertexType] + ":" + n.namespace + "/" + n.name
}
// destinationEdge is a graph edge that includes a denormalized reference to the final destination vertex.
// This should only be used when there is a single leaf vertex reachable from T.
type destinationEdge struct {
F graph.Node
T graph.Node
Destination graph.Node
}
func newDestinationEdge(from, to, destination graph.Node) graph.Edge {
return &destinationEdge{F: from, T: to, Destination: destination}
}
func (e *destinationEdge) From() graph.Node { return e.F }
func (e *destinationEdge) To() graph.Node { return e.T }
func (e *destinationEdge) Weight() float64 { return 0 }
func (e *destinationEdge) DestinationID() int { return e.Destination.ID() }
// Graph holds graph vertices and a way to look up a vertex for a particular API type/namespace/name.
// All edges point toward the vertices representing Kubernetes nodes:
//
// node <- pod
// pod <- secret,configmap,pvc
// pvc <- pv
// pv <- secret
type Graph struct {
lock sync.RWMutex
graph *simple.DirectedAcyclicGraph
// vertices is a map of type -> namespace -> name -> vertex
vertices map[vertexType]namespaceVertexMapping
}
// namespaceVertexMapping is a map of namespace -> name -> vertex
type namespaceVertexMapping map[string]nameVertexMapping
// nameVertexMapping is a map of name -> vertex
type nameVertexMapping map[string]*namedVertex
func NewGraph() *Graph {
return &Graph{
vertices: map[vertexType]namespaceVertexMapping{},
graph: simple.NewDirectedAcyclicGraph(0, 0),
}
}
// vertexType indicates the type of the API object the vertex represents.
// represented as a byte to minimize space used in the vertices.
type vertexType byte
const (
configMapVertexType vertexType = iota
nodeVertexType
podVertexType
pvcVertexType
pvVertexType
secretVertexType
)
var vertexTypes = map[vertexType]string{
configMapVertexType: "configmap",
nodeVertexType: "node",
podVertexType: "pod",
pvcVertexType: "pvc",
pvVertexType: "pv",
secretVertexType: "secret",
}
// must be called under a write lock
func (g *Graph) getOrCreateVertex_locked(vertexType vertexType, namespace, name string) *namedVertex {
if vertex, exists := g.getVertex_rlocked(vertexType, namespace, name); exists {
return vertex
}
return g.createVertex_locked(vertexType, namespace, name)
}
// must be called under a read lock
func (g *Graph) getVertex_rlocked(vertexType vertexType, namespace, name string) (*namedVertex, bool) {
vertex, exists := g.vertices[vertexType][namespace][name]
return vertex, exists
}
// must be called under a write lock
func (g *Graph) createVertex_locked(vertexType vertexType, namespace, name string) *namedVertex {
typedVertices, exists := g.vertices[vertexType]
if !exists {
typedVertices = namespaceVertexMapping{}
g.vertices[vertexType] = typedVertices
}
namespacedVertices, exists := typedVertices[namespace]
if !exists {
namespacedVertices = map[string]*namedVertex{}
typedVertices[namespace] = namespacedVertices
}
vertex := newNamedVertex(vertexType, namespace, name, g.graph.NewNodeID())
namespacedVertices[name] = vertex
g.graph.AddNode(vertex)
return vertex
}
// must be called under write lock
func (g *Graph) deleteVertex_locked(vertexType vertexType, namespace, name string) {
vertex, exists := g.getVertex_rlocked(vertexType, namespace, name)
if !exists {
return
}
// find existing neighbors with a single edge (meaning we are their only neighbor)
neighborsToRemove := []graph.Node{}
g.graph.VisitFrom(vertex, func(neighbor graph.Node) bool {
// this downstream neighbor has only one edge (which must be from us), so remove them as well
if g.graph.Degree(neighbor) == 1 {
neighborsToRemove = append(neighborsToRemove, neighbor)
}
return true
})
g.graph.VisitTo(vertex, func(neighbor graph.Node) bool {
// this upstream neighbor has only one edge (which must be to us), so remove them as well
if g.graph.Degree(neighbor) == 1 {
neighborsToRemove = append(neighborsToRemove, neighbor)
}
return true
})
// remove the vertex
g.graph.RemoveNode(vertex)
delete(g.vertices[vertexType][namespace], name)
if len(g.vertices[vertexType][namespace]) == 0 {
delete(g.vertices[vertexType], namespace)
}
// remove neighbors that are now edgeless
for _, neighbor := range neighborsToRemove {
g.graph.RemoveNode(neighbor)
n := neighbor.(*namedVertex)
delete(g.vertices[n.vertexType][n.namespace], n.name)
if len(g.vertices[n.vertexType][n.namespace]) == 0 {
delete(g.vertices[n.vertexType], n.namespace)
}
}
}
// AddPod should only be called once spec.NodeName is populated.
// It sets up edges for the following relationships (which are immutable for a pod once bound to a node):
//
// pod -> node
//
// secret -> pod
// configmap -> pod
// pvc -> pod
func (g *Graph) AddPod(pod *api.Pod) {
g.lock.Lock()
defer g.lock.Unlock()
g.deleteVertex_locked(podVertexType, pod.Namespace, pod.Name)
podVertex := g.getOrCreateVertex_locked(podVertexType, pod.Namespace, pod.Name)
nodeVertex := g.getOrCreateVertex_locked(nodeVertexType, "", pod.Spec.NodeName)
g.graph.SetEdge(newDestinationEdge(podVertex, nodeVertex, nodeVertex))
podutil.VisitPodSecretNames(pod, func(secret string) bool {
g.graph.SetEdge(newDestinationEdge(g.getOrCreateVertex_locked(secretVertexType, pod.Namespace, secret), podVertex, nodeVertex))
return true
})
podutil.VisitPodConfigmapNames(pod, func(configmap string) bool {
g.graph.SetEdge(newDestinationEdge(g.getOrCreateVertex_locked(configMapVertexType, pod.Namespace, configmap), podVertex, nodeVertex))
return true
})
for _, v := range pod.Spec.Volumes {
if v.PersistentVolumeClaim != nil {
g.graph.SetEdge(newDestinationEdge(g.getOrCreateVertex_locked(pvcVertexType, pod.Namespace, v.PersistentVolumeClaim.ClaimName), podVertex, nodeVertex))
}
}
}
func (g *Graph) DeletePod(name, namespace string) {
g.lock.Lock()
defer g.lock.Unlock()
g.deleteVertex_locked(podVertexType, namespace, name)
}
// AddPV sets up edges for the following relationships:
//
// secret -> pv
//
// pv -> pvc
func (g *Graph) AddPV(pv *api.PersistentVolume) {
g.lock.Lock()
defer g.lock.Unlock()
// clear existing edges
g.deleteVertex_locked(pvVertexType, "", pv.Name)
// if we have a pvc, establish new edges
if pv.Spec.ClaimRef != nil {
pvVertex := g.getOrCreateVertex_locked(pvVertexType, "", pv.Name)
// since we don't know the other end of the pvc -> pod -> node chain (or it may not even exist yet), we can't decorate these edges with kubernetes node info
g.graph.SetEdge(simple.Edge{F: pvVertex, T: g.getOrCreateVertex_locked(pvcVertexType, pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)})
pvutil.VisitPVSecretNames(pv, func(secret string) bool {
// This grants access to the named secret in the same namespace as the bound PVC
g.graph.SetEdge(simple.Edge{F: g.getOrCreateVertex_locked(secretVertexType, pv.Spec.ClaimRef.Namespace, secret), T: pvVertex})
return true
})
}
}
func (g *Graph) DeletePV(name string) {
g.lock.Lock()
defer g.lock.Unlock()
g.deleteVertex_locked(pvVertexType, "", name)
}

View File

@ -0,0 +1,108 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"github.com/golang/glog"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/api"
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion"
)
type graphPopulator struct {
graph *Graph
}
func AddGraphEventHandlers(graph *Graph, pods coreinformers.PodInformer, pvs coreinformers.PersistentVolumeInformer) {
g := &graphPopulator{
graph: graph,
}
pods.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: g.addPod,
UpdateFunc: g.updatePod,
DeleteFunc: g.deletePod,
})
pvs.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: g.addPV,
UpdateFunc: g.updatePV,
DeleteFunc: g.deletePV,
})
}
func (g *graphPopulator) addPod(obj interface{}) {
g.updatePod(nil, obj)
}
func (g *graphPopulator) updatePod(oldObj, obj interface{}) {
pod := obj.(*api.Pod)
if len(pod.Spec.NodeName) == 0 {
// No node assigned
glog.V(5).Infof("updatePod %s/%s, no node", pod.Namespace, pod.Name)
return
}
if oldPod, ok := oldObj.(*api.Pod); ok && oldPod != nil {
if (pod.Spec.NodeName == oldPod.Spec.NodeName) && (pod.UID == oldPod.UID) {
// Node and uid are unchanged, all object references in the pod spec are immutable
glog.V(5).Infof("updatePod %s/%s, node unchanged", pod.Namespace, pod.Name)
return
}
}
glog.V(4).Infof("updatePod %s/%s for node %s", pod.Namespace, pod.Name, pod.Spec.NodeName)
g.graph.AddPod(pod)
}
func (g *graphPopulator) deletePod(obj interface{}) {
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
obj = tombstone.Obj
}
pod, ok := obj.(*api.Pod)
if !ok {
glog.Infof("unexpected type %T", obj)
return
}
if len(pod.Spec.NodeName) == 0 {
glog.V(5).Infof("deletePod %s/%s, no node", pod.Namespace, pod.Name)
return
}
glog.V(4).Infof("deletePod %s/%s for node %s", pod.Namespace, pod.Name, pod.Spec.NodeName)
g.graph.DeletePod(pod.Name, pod.Namespace)
}
func (g *graphPopulator) addPV(obj interface{}) {
g.updatePV(nil, obj)
}
func (g *graphPopulator) updatePV(oldObj, obj interface{}) {
pv := obj.(*api.PersistentVolume)
// TODO: skip add if uid, pvc, and secrets are all identical between old and new
g.graph.AddPV(pv)
}
func (g *graphPopulator) deletePV(obj interface{}) {
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
obj = tombstone.Obj
}
pv, ok := obj.(*api.PersistentVolume)
if !ok {
glog.Infof("unexpected type %T", obj)
return
}
g.graph.DeletePV(pv.Name)
}

View File

@ -0,0 +1,164 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/kubernetes/pkg/api"
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
"k8s.io/kubernetes/third_party/forked/gonum/graph"
"k8s.io/kubernetes/third_party/forked/gonum/graph/traverse"
)
// NodeAuthorizer authorizes requests from kubelets, with the following logic:
// 1. If a request is not from a node (IdentifyNode() returns isNode=false), reject
// 2. If a specific node cannot be identified (IdentifyNode() returns nodeName=""), reject
// 3. If a request is for a secret, configmap, persistent volume or persistent volume claim, reject unless the verb is get, and the requested object is related to the requesting node:
// node <- pod
// node <- pod <- secret
// node <- pod <- configmap
// node <- pod <- pvc
// node <- pod <- pvc <- pv
// node <- pod <- pvc <- pv <- secret
// 4. For other resources, authorize all nodes uniformly using statically defined rules
type NodeAuthorizer struct {
graph *Graph
identifier nodeidentifier.NodeIdentifier
nodeRules []rbacapi.PolicyRule
}
// New returns a new node authorizer
func NewAuthorizer(graph *Graph, identifier nodeidentifier.NodeIdentifier, rules []rbacapi.PolicyRule) authorizer.Authorizer {
return &NodeAuthorizer{
graph: graph,
identifier: identifier,
nodeRules: rules,
}
}
var (
configMapResource = api.Resource("configmaps")
secretResource = api.Resource("secrets")
pvcResource = api.Resource("persistentvolumeclaims")
pvResource = api.Resource("persistentvolumes")
)
func (r *NodeAuthorizer) Authorize(attrs authorizer.Attributes) (bool, string, error) {
nodeName, isNode := r.identifier.NodeIdentity(attrs.GetUser())
if !isNode {
// reject requests from non-nodes
return false, "", nil
}
if len(nodeName) == 0 {
// reject requests from unidentifiable nodes
glog.V(2).Infof("NODE DENY: unknown node for user %q", attrs.GetUser().GetName())
return false, fmt.Sprintf("unknown node for user %q", attrs.GetUser().GetName()), nil
}
// subdivide access to specific resources
if attrs.IsResourceRequest() {
requestResource := schema.GroupResource{Group: attrs.GetAPIGroup(), Resource: attrs.GetResource()}
switch requestResource {
case secretResource:
return r.authorizeGet(nodeName, secretVertexType, attrs)
case configMapResource:
return r.authorizeGet(nodeName, configMapVertexType, attrs)
case pvcResource:
return r.authorizeGet(nodeName, pvcVertexType, attrs)
case pvResource:
return r.authorizeGet(nodeName, pvVertexType, attrs)
}
}
// Access to other resources is not subdivided, so just evaluate against the statically defined node rules
return rbac.RulesAllow(attrs, r.nodeRules...), "", nil
}
// authorizeGet authorizes "get" requests to objects of the specified type if they are related to the specified node
func (r *NodeAuthorizer) authorizeGet(nodeName string, startingType vertexType, attrs authorizer.Attributes) (bool, string, error) {
if attrs.GetVerb() != "get" || len(attrs.GetName()) == 0 {
glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs)
return false, "can only get individual resources of this type", nil
}
if len(attrs.GetSubresource()) > 0 {
glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs)
return false, "cannot get subresource", nil
}
ok, err := r.hasPathFrom(nodeName, startingType, attrs.GetNamespace(), attrs.GetName())
if err != nil {
glog.V(2).Infof("NODE DENY: %v", err)
return false, "no path found to object", nil
}
if !ok {
glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs)
return false, "no path found to object", nil
}
return ok, "", nil
}
// hasPathFrom returns true if there is a directed path from the specified type/namespace/name to the specified Node
func (r *NodeAuthorizer) hasPathFrom(nodeName string, startingType vertexType, startingNamespace, startingName string) (bool, error) {
r.graph.lock.RLock()
defer r.graph.lock.RUnlock()
nodeVertex, exists := r.graph.getVertex_rlocked(nodeVertexType, "", nodeName)
if !exists {
return false, fmt.Errorf("unknown node %s cannot get %s %s/%s", nodeName, vertexTypes[startingType], startingNamespace, startingName)
}
startingVertex, exists := r.graph.getVertex_rlocked(startingType, startingNamespace, startingName)
if !exists {
return false, fmt.Errorf("node %s cannot get unknown %s %s/%s", nodeName, vertexTypes[startingType], startingNamespace, startingName)
}
found := false
traversal := &traverse.VisitingDepthFirst{
EdgeFilter: func(edge graph.Edge) bool {
if destinationEdge, ok := edge.(*destinationEdge); ok {
if destinationEdge.DestinationID() != nodeVertex.ID() {
// Don't follow edges leading to other nodes
return false
}
// We found an edge leading to the node we want
found = true
}
// Visit this edge
return true
},
}
traversal.Walk(r.graph.graph, startingVertex, func(n graph.Node) bool {
if n.ID() == nodeVertex.ID() {
// We found the node we want
found = true
}
// Stop visiting if we've found the node we want
return found
})
if !found {
return false, fmt.Errorf("node %s cannot get %s %s/%s, no path was found", nodeName, vertexTypes[startingType], startingNamespace, startingName)
}
return true, nil
}

View File

@ -0,0 +1,437 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"runtime"
"runtime/pprof"
"testing"
"os"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"
)
func TestAuthorizer(t *testing.T) {
g := NewGraph()
opts := sampleDataOpts{
nodes: 2,
namespaces: 2,
podsPerNode: 2,
sharedConfigMapsPerPod: 0,
uniqueConfigMapsPerPod: 1,
sharedSecretsPerPod: 1,
uniqueSecretsPerPod: 1,
sharedPVCsPerPod: 0,
uniquePVCsPerPod: 1,
}
pods, pvs := generate(opts)
populate(g, pods, pvs)
identifier := nodeidentifier.NewDefaultNodeIdentifier()
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules())
node0 := &user.DefaultInfo{Name: "system:node:node0", Groups: []string{"system:nodes"}}
tests := []struct {
name string
attrs authorizer.AttributesRecord
expect bool
}{
{
name: "allowed configmap",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "configmap0-pod0-node0", Namespace: "ns0"},
expect: true,
},
{
name: "allowed secret via pod",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret0-pod0-node0", Namespace: "ns0"},
expect: true,
},
{
name: "allowed shared secret via pod",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret0-shared", Namespace: "ns0"},
expect: true,
},
{
name: "allowed shared secret via pvc",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret-pv0-pod0-node0-ns0", Namespace: "ns0"},
expect: true,
},
{
name: "allowed pvc",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "persistentvolumeclaims", Name: "pvc0-pod0-node0", Namespace: "ns0"},
expect: true,
},
{
name: "allowed pv",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "persistentvolumes", Name: "pv0-pod0-node0-ns0", Namespace: ""},
expect: true,
},
{
name: "disallowed configmap",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "configmap0-pod0-node1", Namespace: "ns0"},
expect: false,
},
{
name: "disallowed secret via pod",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret0-pod0-node1", Namespace: "ns0"},
expect: false,
},
{
name: "disallowed shared secret via pvc",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret-pv0-pod0-node1-ns0", Namespace: "ns0"},
expect: false,
},
{
name: "disallowed pvc",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "persistentvolumeclaims", Name: "pvc0-pod0-node1", Namespace: "ns0"},
expect: false,
},
{
name: "disallowed pv",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "persistentvolumes", Name: "pv0-pod0-node1-ns0", Namespace: ""},
expect: false,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ok, _, _ := authz.Authorize(tc.attrs)
if ok != tc.expect {
t.Errorf("expected %v, got %v", tc.expect, ok)
}
})
}
}
func TestAuthorizerSharedResources(t *testing.T) {
g := NewGraph()
identifier := nodeidentifier.NewDefaultNodeIdentifier()
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules())
node1 := &user.DefaultInfo{Name: "system:node:node1", Groups: []string{"system:nodes"}}
node2 := &user.DefaultInfo{Name: "system:node:node2", Groups: []string{"system:nodes"}}
node3 := &user.DefaultInfo{Name: "system:node:node3", Groups: []string{"system:nodes"}}
g.AddPod(&api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod1-node1", Namespace: "ns1"},
Spec: api.PodSpec{
NodeName: "node1",
Volumes: []api.Volume{
{VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "node1-only"}}},
{VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "node1-node2-only"}}},
{VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "shared-all"}}},
},
},
})
g.AddPod(&api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod2-node2", Namespace: "ns1"},
Spec: api.PodSpec{
NodeName: "node2",
Volumes: []api.Volume{
{VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "node1-node2-only"}}},
{VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "shared-all"}}},
},
},
})
g.AddPod(&api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod3-node3", Namespace: "ns1"},
Spec: api.PodSpec{
NodeName: "node3",
Volumes: []api.Volume{
{VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "shared-all"}}},
},
},
})
testcases := []struct {
User user.Info
Secret string
ExpectAllowed bool
}{
{User: node1, ExpectAllowed: true, Secret: "node1-only"},
{User: node1, ExpectAllowed: true, Secret: "node1-node2-only"},
{User: node1, ExpectAllowed: true, Secret: "shared-all"},
{User: node2, ExpectAllowed: false, Secret: "node1-only"},
{User: node2, ExpectAllowed: true, Secret: "node1-node2-only"},
{User: node2, ExpectAllowed: true, Secret: "shared-all"},
{User: node3, ExpectAllowed: false, Secret: "node1-only"},
{User: node3, ExpectAllowed: false, Secret: "node1-node2-only"},
{User: node3, ExpectAllowed: true, Secret: "shared-all"},
}
for i, tc := range testcases {
ok, _, err := authz.Authorize(authorizer.AttributesRecord{User: tc.User, ResourceRequest: true, Verb: "get", Resource: "secrets", Namespace: "ns1", Name: tc.Secret})
if err != nil {
t.Errorf("%d: unexpected error: %v", i, err)
continue
}
if ok != tc.ExpectAllowed {
t.Errorf("%d: expected %v, got %v", i, tc.ExpectAllowed, ok)
}
}
}
type sampleDataOpts struct {
nodes int
namespaces int
podsPerNode int
sharedConfigMapsPerPod int
sharedSecretsPerPod int
sharedPVCsPerPod int
uniqueSecretsPerPod int
uniqueConfigMapsPerPod int
uniquePVCsPerPod int
}
func BenchmarkPopulationAllocation(b *testing.B) {
opts := sampleDataOpts{
nodes: 500,
namespaces: 200,
podsPerNode: 200,
sharedConfigMapsPerPod: 0,
uniqueConfigMapsPerPod: 1,
sharedSecretsPerPod: 1,
uniqueSecretsPerPod: 1,
sharedPVCsPerPod: 0,
uniquePVCsPerPod: 1,
}
pods, pvs := generate(opts)
b.ResetTimer()
for i := 0; i < b.N; i++ {
g := NewGraph()
populate(g, pods, pvs)
}
}
func BenchmarkPopulationRetention(b *testing.B) {
// Run with:
// go test ./plugin/pkg/auth/authorizer/node -benchmem -bench . -run None -v -o node.test -timeout 300m
// Evaluate retained memory with:
// go tool pprof --inuse_space node.test plugin/pkg/auth/authorizer/node/BenchmarkPopulationRetention.profile
// list populate
opts := sampleDataOpts{
nodes: 500,
namespaces: 200,
podsPerNode: 200,
sharedConfigMapsPerPod: 0,
uniqueConfigMapsPerPod: 1,
sharedSecretsPerPod: 1,
uniqueSecretsPerPod: 1,
sharedPVCsPerPod: 0,
uniquePVCsPerPod: 1,
}
pods, pvs := generate(opts)
// Garbage collect before the first iteration
runtime.GC()
b.ResetTimer()
for i := 0; i < b.N; i++ {
g := NewGraph()
populate(g, pods, pvs)
if i == 0 {
f, _ := os.Create("BenchmarkPopulationRetention.profile")
runtime.GC()
pprof.WriteHeapProfile(f)
f.Close()
// reference the graph to keep it from getting garbage collected
_ = fmt.Sprintf("%T\n", g)
}
}
}
func BenchmarkAuthorization(b *testing.B) {
g := NewGraph()
opts := sampleDataOpts{
nodes: 500,
namespaces: 200,
podsPerNode: 200,
sharedConfigMapsPerPod: 0,
uniqueConfigMapsPerPod: 1,
sharedSecretsPerPod: 1,
uniqueSecretsPerPod: 1,
sharedPVCsPerPod: 0,
uniquePVCsPerPod: 1,
}
pods, pvs := generate(opts)
populate(g, pods, pvs)
identifier := nodeidentifier.NewDefaultNodeIdentifier()
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules())
node0 := &user.DefaultInfo{Name: "system:node:node0", Groups: []string{"system:nodes"}}
tests := []struct {
name string
attrs authorizer.AttributesRecord
expect bool
}{
{
name: "allowed configmap",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "configmap0-pod0-node0", Namespace: "ns0"},
expect: true,
},
{
name: "allowed secret via pod",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret0-pod0-node0", Namespace: "ns0"},
expect: true,
},
{
name: "allowed shared secret via pod",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret0-shared", Namespace: "ns0"},
expect: true,
},
{
name: "disallowed configmap",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "configmap0-pod0-node1", Namespace: "ns0"},
expect: false,
},
{
name: "disallowed secret via pod",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret0-pod0-node1", Namespace: "ns0"},
expect: false,
},
{
name: "disallowed shared secret via pvc",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret-pv0-pod0-node1-ns0", Namespace: "ns0"},
expect: false,
},
{
name: "disallowed pvc",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "persistentvolumeclaims", Name: "pvc0-pod0-node1", Namespace: "ns0"},
expect: false,
},
{
name: "disallowed pv",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "persistentvolumes", Name: "pv0-pod0-node1-ns0", Namespace: ""},
expect: false,
},
}
b.ResetTimer()
for _, tc := range tests {
b.Run(tc.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
ok, _, _ := authz.Authorize(tc.attrs)
if ok != tc.expect {
b.Errorf("expected %v, got %v", tc.expect, ok)
}
}
})
}
}
func populate(graph *Graph, pods []*api.Pod, pvs []*api.PersistentVolume) {
p := &graphPopulator{}
p.graph = graph
for _, pod := range pods {
p.addPod(pod)
}
for _, pv := range pvs {
p.addPV(pv)
}
}
// generate creates sample pods and persistent volumes based on the provided options.
// the secret/configmap/pvc/node references in the pod and pv objects are named to indicate the connections between the objects.
// for example, secret0-pod0-node0 is a secret referenced by pod0 which is bound to node0.
// when populated into the graph, the node authorizer should allow node0 to access that secret, but not node1.
func generate(opts sampleDataOpts) ([]*api.Pod, []*api.PersistentVolume) {
pods := make([]*api.Pod, 0, opts.nodes*opts.podsPerNode)
pvs := make([]*api.PersistentVolume, 0, (opts.nodes*opts.podsPerNode*opts.uniquePVCsPerPod)+(opts.sharedPVCsPerPod*opts.namespaces))
for n := 0; n < opts.nodes; n++ {
nodeName := fmt.Sprintf("node%d", n)
for p := 0; p < opts.podsPerNode; p++ {
pod := &api.Pod{}
pod.Namespace = fmt.Sprintf("ns%d", p%opts.namespaces)
pod.Name = fmt.Sprintf("pod%d-%s", p, nodeName)
pod.Spec.NodeName = nodeName
for i := 0; i < opts.uniqueSecretsPerPod; i++ {
pod.Spec.Volumes = append(pod.Spec.Volumes, api.Volume{VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{SecretName: fmt.Sprintf("secret%d-%s", i, pod.Name)},
}})
}
for i := 0; i < opts.sharedSecretsPerPod; i++ {
pod.Spec.Volumes = append(pod.Spec.Volumes, api.Volume{VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{SecretName: fmt.Sprintf("secret%d-shared", i)},
}})
}
for i := 0; i < opts.uniqueConfigMapsPerPod; i++ {
pod.Spec.Volumes = append(pod.Spec.Volumes, api.Volume{VolumeSource: api.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: fmt.Sprintf("configmap%d-%s", i, pod.Name)}},
}})
}
for i := 0; i < opts.sharedConfigMapsPerPod; i++ {
pod.Spec.Volumes = append(pod.Spec.Volumes, api.Volume{VolumeSource: api.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: fmt.Sprintf("configmap%d-shared", i)}},
}})
}
for i := 0; i < opts.uniquePVCsPerPod; i++ {
pv := &api.PersistentVolume{}
pv.Name = fmt.Sprintf("pv%d-%s-%s", i, pod.Name, pod.Namespace)
pv.Spec.FlexVolume = &api.FlexVolumeSource{SecretRef: &api.LocalObjectReference{Name: fmt.Sprintf("secret-%s", pv.Name)}}
pv.Spec.ClaimRef = &api.ObjectReference{Name: fmt.Sprintf("pvc%d-%s", i, pod.Name), Namespace: pod.Namespace}
pvs = append(pvs, pv)
pod.Spec.Volumes = append(pod.Spec.Volumes, api.Volume{VolumeSource: api.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: pv.Spec.ClaimRef.Name},
}})
}
for i := 0; i < opts.sharedPVCsPerPod; i++ {
pv := &api.PersistentVolume{}
pv.Name = fmt.Sprintf("pv%d-shared-%s", i, pod.Namespace)
pv.Spec.FlexVolume = &api.FlexVolumeSource{SecretRef: &api.LocalObjectReference{Name: fmt.Sprintf("secret-%s", pv.Name)}}
pv.Spec.ClaimRef = &api.ObjectReference{Name: fmt.Sprintf("pvc%d-shared", i), Namespace: pod.Namespace}
pvs = append(pvs, pv)
pod.Spec.Volumes = append(pod.Spec.Volumes, api.Volume{VolumeSource: api.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: pv.Spec.ClaimRef.Name},
}})
}
pods = append(pods, pod)
}
}
return pods, pvs
}

View File

@ -91,37 +91,37 @@ func NodeRules() []rbac.PolicyRule {
// Needed to check API access. These creates are non-mutating
rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
rbac.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(),
// Needed to build serviceLister, to populate env vars for services
rbac.NewRule(Read...).Groups(legacyGroup).Resources("services").RuleOrDie(),
// Nodes can register themselves
// TODO: restrict to creating a node with the same name they announce
// Nodes can register Node API objects and report status.
// Use the NodeRestriction admission plugin to limit a node to creating/updating its own API object.
rbac.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
// TODO: restrict to the bound node once supported
rbac.NewRule("update", "patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
rbac.NewRule("update", "patch", "delete").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
// TODO: restrict to the bound node as creator once supported
// TODO: restrict to the bound node as creator in the NodeRestrictions admission plugin
rbac.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(),
// TODO: restrict to pods scheduled on the bound node once supported
// TODO: restrict to pods scheduled on the bound node once field selectors are supported by list/watch authorization
rbac.NewRule(Read...).Groups(legacyGroup).Resources("pods").RuleOrDie(),
// TODO: remove once mirror pods are removed
// TODO: restrict deletion to mirror pods created by the bound node once supported
// Needed for the node to create/delete mirror pods
// Needed for the node to create/delete mirror pods.
// Use the NodeRestriction admission plugin to limit a node to creating/deleting mirror pods bound to itself.
rbac.NewRule("create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
// TODO: restrict to pods scheduled on the bound node once supported
// Needed for the node to report status of pods it is running.
// Use the NodeRestriction admission plugin to limit a node to updating status of pods bound to itself.
rbac.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
// TODO: restrict to secrets and configmaps used by pods scheduled on bound node once supported
// Needed for imagepullsecrets, rbd/ceph and secret volumes, and secrets in envs
// Needed for configmap volume and envs
// Use the NodeRestriction admission plugin to limit a node to get secrets/configmaps referenced by pods bound to itself.
rbac.NewRule("get").Groups(legacyGroup).Resources("secrets", "configmaps").RuleOrDie(),
// TODO: restrict to claims/volumes used by pods scheduled on bound node once supported
// Needed for persistent volumes
// Use the NodeRestriction admission plugin to limit a node to get pv/pvc objects referenced by pods bound to itself.
rbac.NewRule("get").Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
// TODO: restrict to namespaces of pods scheduled on bound node once supported
// TODO: change glusterfs to use DNS lookup so this isn't needed?
// TODO: add to the Node authorizer and restrict to endpoints referenced by pods or PVs bound to the node
// Needed for glusterfs volumes
rbac.NewRule("get").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
// Used to create a certificatesigningrequest for a node-specific client certificate, and watch
@ -362,18 +362,70 @@ func ClusterRoles() []rbac.ClusterRole {
return roles
}
// ClusterRoleBindingFilter can modify and return or omit (by returning nil) a role binding
type ClusterRoleBindingFilter func(*rbac.ClusterRoleBinding) *rbac.ClusterRoleBinding
// AddClusterRoleBindingFilter adds the given filter to the list that is invoked when determing bootstrap roles to reconcile.
func AddClusterRoleBindingFilter(filter ClusterRoleBindingFilter) {
clusterRoleBindingFilters = append(clusterRoleBindingFilters, filter)
}
// ClearClusterRoleBindingFilters removes any filters added using AddClusterRoleBindingFilter
func ClearClusterRoleBindingFilters() {
clusterRoleBindingFilters = nil
}
const systemNodeRoleName = "system:node"
var clusterRoleBindingFilters []ClusterRoleBindingFilter
// OmitNodesGroupBinding is a filter that omits the deprecated binding for the system:nodes group to the system:node role.
var OmitNodesGroupBinding = ClusterRoleBindingFilter(func(binding *rbac.ClusterRoleBinding) *rbac.ClusterRoleBinding {
if binding.RoleRef.Name == systemNodeRoleName {
subjects := []rbac.Subject{}
for _, subject := range binding.Subjects {
if subject.Kind == rbac.GroupKind && subject.Name == user.NodesGroup {
continue
}
subjects = append(subjects, subject)
}
binding.Subjects = subjects
}
return binding
})
// ClusterRoleBindings return default rolebindings to the default roles
func ClusterRoleBindings() []rbac.ClusterRoleBinding {
rolebindings := []rbac.ClusterRoleBinding{
rbac.NewClusterBinding("cluster-admin").Groups(user.SystemPrivilegedGroup).BindingOrDie(),
rbac.NewClusterBinding("system:discovery").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(),
rbac.NewClusterBinding("system:basic-user").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(),
rbac.NewClusterBinding("system:node").Groups(user.NodesGroup).BindingOrDie(),
rbac.NewClusterBinding("system:node-proxier").Users(user.KubeProxy).BindingOrDie(),
rbac.NewClusterBinding("system:kube-controller-manager").Users(user.KubeControllerManager).BindingOrDie(),
rbac.NewClusterBinding("system:kube-dns").SAs("kube-system", "kube-dns").BindingOrDie(),
rbac.NewClusterBinding("system:kube-scheduler").Users(user.KubeScheduler).BindingOrDie(),
// This default system:nodes binding is deprecated in 1.7 with the availability of the Node authorizer.
// If an admin wants to grant the system:node role (which cannot partition Node API access), they will need to create their own clusterrolebinding.
// TODO: Remove the subjects from this binding in 1.8 (leave the empty binding for tightening reconciliation), and remove AddClusterRoleBindingFilter()
rbac.NewClusterBinding(systemNodeRoleName).Groups(user.NodesGroup).BindingOrDie(),
}
addClusterRoleBindingLabel(rolebindings)
return rolebindings
retval := []rbac.ClusterRoleBinding{}
for i := range rolebindings {
binding := &rolebindings[i]
for _, filter := range clusterRoleBindingFilters {
binding = filter(binding)
if binding == nil {
break
}
}
if binding != nil {
retval = append(retval, *binding)
}
}
return retval
}

View File

@ -12,6 +12,7 @@ go_test(
srcs = [
"accessreview_test.go",
"auth_test.go",
"node_test.go",
"rbac_test.go",
],
tags = [
@ -27,7 +28,10 @@ go_test(
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/rbac:go_default_library",
"//pkg/auth/authorizer/abac:go_default_library",
"//pkg/auth/nodeidentifier:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
"//pkg/kubeapiserver/authorizer:go_default_library",
"//pkg/master:go_default_library",
"//pkg/registry/rbac/clusterrole:go_default_library",
"//pkg/registry/rbac/clusterrole/storage:go_default_library",
@ -38,10 +42,14 @@ go_test(
"//pkg/registry/rbac/rolebinding:go_default_library",
"//pkg/registry/rbac/rolebinding/storage:go_default_library",
"//plugin/pkg/admission/admit:go_default_library",
"//plugin/pkg/admission/noderestriction:go_default_library",
"//plugin/pkg/auth/authorizer/rbac:go_default_library",
"//plugin/pkg/auth/authorizer/rbac/bootstrappolicy:go_default_library",
"//test/integration:go_default_library",
"//test/integration/framework:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",

View File

@ -0,0 +1,303 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"net/http"
"net/http/httptest"
"path/filepath"
"runtime"
"testing"
"time"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer"
"k8s.io/kubernetes/plugin/pkg/admission/noderestriction"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"
"k8s.io/kubernetes/test/integration/framework"
)
func TestNodeAuthorizer(t *testing.T) {
// Start the server so we know the address
h := &framework.MasterHolder{Initialized: make(chan struct{})}
apiServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
<-h.Initialized
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
}))
// Build client config, clientset, and informers
clientConfig := &restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
superuserClient := clientsetForUser("admin/system:masters", clientConfig)
informerFactory := informers.NewSharedInformerFactory(superuserClient, time.Minute)
// Set up Node+RBAC authorizer
authorizerConfig := &authorizer.AuthorizationConfig{
AuthorizationModes: []string{"Node", "RBAC"},
InformerFactory: informerFactory,
}
nodeRBACAuthorizer, err := authorizerConfig.New()
if err != nil {
t.Fatal(err)
}
defer bootstrappolicy.ClearClusterRoleBindingFilters()
// Set up NodeRestriction admission
nodeRestrictionAdmission := noderestriction.NewPlugin(nodeidentifier.NewDefaultNodeIdentifier())
nodeRestrictionAdmission.SetInternalKubeClientSet(superuserClient)
if err := nodeRestrictionAdmission.Validate(); err != nil {
t.Fatal(err)
}
// Start the server
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.GenericConfig.Authenticator = newFakeAuthenticator()
masterConfig.GenericConfig.Authorizer = nodeRBACAuthorizer
masterConfig.GenericConfig.AdmissionControl = nodeRestrictionAdmission
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, apiServer, h)
defer closeFn()
// Start the informers
stopCh := make(chan struct{})
defer close(stopCh)
informerFactory.Start(stopCh)
// Wait for a healthy server
for {
result := superuserClient.Core().RESTClient().Get().AbsPath("/healthz").Do()
_, err := result.Raw()
if err == nil {
break
}
t.Log(err)
time.Sleep(time.Second)
}
// Create objects
if _, err := superuserClient.Core().Secrets("ns").Create(&api.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mysecret"}}); err != nil {
t.Fatal(err)
}
if _, err := superuserClient.Core().Secrets("ns").Create(&api.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mypvsecret"}}); err != nil {
t.Fatal(err)
}
if _, err := superuserClient.Core().ConfigMaps("ns").Create(&api.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}); err != nil {
t.Fatal(err)
}
if _, err := superuserClient.Core().PersistentVolumeClaims("ns").Create(&api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{Name: "mypvc"},
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany},
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceStorage: resource.MustParse("1")}},
},
}); err != nil {
t.Fatal(err)
}
if _, err := superuserClient.Core().PersistentVolumes().Create(&api.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{Name: "mypv"},
Spec: api.PersistentVolumeSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany},
Capacity: api.ResourceList{api.ResourceStorage: resource.MustParse("1")},
ClaimRef: &api.ObjectReference{Namespace: "ns", Name: "mypvc"},
PersistentVolumeSource: api.PersistentVolumeSource{AzureFile: &api.AzureFileVolumeSource{ShareName: "default", SecretName: "mypvsecret"}},
},
}); err != nil {
t.Fatal(err)
}
getSecret := func(client clientset.Interface) error {
_, err := client.Core().Secrets("ns").Get("mysecret", metav1.GetOptions{})
return err
}
getPVSecret := func(client clientset.Interface) error {
_, err := client.Core().Secrets("ns").Get("mypvsecret", metav1.GetOptions{})
return err
}
getConfigMap := func(client clientset.Interface) error {
_, err := client.Core().ConfigMaps("ns").Get("myconfigmap", metav1.GetOptions{})
return err
}
getPVC := func(client clientset.Interface) error {
_, err := client.Core().PersistentVolumeClaims("ns").Get("mypvc", metav1.GetOptions{})
return err
}
getPV := func(client clientset.Interface) error {
_, err := client.Core().PersistentVolumes().Get("mypv", metav1.GetOptions{})
return err
}
createNode2NormalPod := func(client clientset.Interface) error {
_, err := client.Core().Pods("ns").Create(&api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"},
Spec: api.PodSpec{
NodeName: "node2",
Containers: []api.Container{{Name: "image", Image: "busybox"}},
Volumes: []api.Volume{
{Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "mysecret"}}},
{Name: "cm", VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: "myconfigmap"}}}},
{Name: "pvc", VolumeSource: api.VolumeSource{PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc"}}},
},
},
})
return err
}
updateNode2NormalPodStatus := func(client clientset.Interface) error {
startTime := metav1.NewTime(time.Now())
_, err := client.Core().Pods("ns").UpdateStatus(&api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"},
Status: api.PodStatus{StartTime: &startTime},
})
return err
}
deleteNode2NormalPod := func(client clientset.Interface) error {
zero := int64(0)
return client.Core().Pods("ns").Delete("node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero})
}
createNode2MirrorPod := func(client clientset.Interface) error {
_, err := client.Core().Pods("ns").Create(&api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "node2mirrorpod",
Annotations: map[string]string{api.MirrorPodAnnotationKey: "true"},
},
Spec: api.PodSpec{
NodeName: "node2",
Containers: []api.Container{{Name: "image", Image: "busybox"}},
},
})
return err
}
deleteNode2MirrorPod := func(client clientset.Interface) error {
zero := int64(0)
return client.Core().Pods("ns").Delete("node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero})
}
createNode2 := func(client clientset.Interface) error {
_, err := client.Core().Nodes().Create(&api.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}})
return err
}
updateNode2Status := func(client clientset.Interface) error {
_, err := client.Core().Nodes().UpdateStatus(&api.Node{
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
Status: api.NodeStatus{},
})
return err
}
deleteNode2 := func(client clientset.Interface) error {
return client.Core().Nodes().Delete("node2", nil)
}
nodeanonClient := clientsetForUser("unknown/system:nodes", clientConfig)
node1Client := clientsetForUser("system:node:node1/system:nodes", clientConfig)
node2Client := clientsetForUser("system:node:node2/system:nodes", clientConfig)
// all node requests from node1 and unknown node fail
expectForbidden(t, getSecret(nodeanonClient))
expectForbidden(t, getPVSecret(nodeanonClient))
expectForbidden(t, getConfigMap(nodeanonClient))
expectForbidden(t, getPVC(nodeanonClient))
expectForbidden(t, getPV(nodeanonClient))
expectForbidden(t, createNode2NormalPod(nodeanonClient))
expectForbidden(t, createNode2MirrorPod(nodeanonClient))
expectForbidden(t, deleteNode2MirrorPod(nodeanonClient))
expectForbidden(t, createNode2(nodeanonClient))
expectForbidden(t, updateNode2Status(nodeanonClient))
expectForbidden(t, deleteNode2(nodeanonClient))
expectForbidden(t, getSecret(node1Client))
expectForbidden(t, getPVSecret(node1Client))
expectForbidden(t, getConfigMap(node1Client))
expectForbidden(t, getPVC(node1Client))
expectForbidden(t, getPV(node1Client))
expectForbidden(t, createNode2NormalPod(nodeanonClient))
expectForbidden(t, createNode2MirrorPod(node1Client))
expectForbidden(t, deleteNode2MirrorPod(node1Client))
expectForbidden(t, createNode2(node1Client))
expectForbidden(t, updateNode2Status(node1Client))
expectForbidden(t, deleteNode2(node1Client))
// related object requests from node2 fail
expectForbidden(t, getSecret(node2Client))
expectForbidden(t, getPVSecret(node2Client))
expectForbidden(t, getConfigMap(node2Client))
expectForbidden(t, getPVC(node2Client))
expectForbidden(t, getPV(node2Client))
expectForbidden(t, createNode2NormalPod(nodeanonClient))
// mirror pod and self node lifecycle is allowed
expectAllowed(t, createNode2MirrorPod(node2Client))
expectAllowed(t, deleteNode2MirrorPod(node2Client))
expectAllowed(t, createNode2(node2Client))
expectAllowed(t, updateNode2Status(node2Client))
expectAllowed(t, deleteNode2(node2Client))
// create a pod as an admin to add object references
expectAllowed(t, createNode2NormalPod(superuserClient))
// unidentifiable node and node1 are still forbidden
expectForbidden(t, getSecret(nodeanonClient))
expectForbidden(t, getPVSecret(nodeanonClient))
expectForbidden(t, getConfigMap(nodeanonClient))
expectForbidden(t, getPVC(nodeanonClient))
expectForbidden(t, getPV(nodeanonClient))
expectForbidden(t, createNode2NormalPod(nodeanonClient))
expectForbidden(t, updateNode2NormalPodStatus(nodeanonClient))
expectForbidden(t, deleteNode2NormalPod(nodeanonClient))
expectForbidden(t, createNode2MirrorPod(nodeanonClient))
expectForbidden(t, deleteNode2MirrorPod(nodeanonClient))
expectForbidden(t, getSecret(node1Client))
expectForbidden(t, getPVSecret(node1Client))
expectForbidden(t, getConfigMap(node1Client))
expectForbidden(t, getPVC(node1Client))
expectForbidden(t, getPV(node1Client))
expectForbidden(t, createNode2NormalPod(node1Client))
expectForbidden(t, updateNode2NormalPodStatus(node1Client))
expectForbidden(t, deleteNode2NormalPod(node1Client))
expectForbidden(t, createNode2MirrorPod(node1Client))
expectForbidden(t, deleteNode2MirrorPod(node1Client))
// node2 can get referenced objects now
expectAllowed(t, getSecret(node2Client))
expectAllowed(t, getPVSecret(node2Client))
expectAllowed(t, getConfigMap(node2Client))
expectAllowed(t, getPVC(node2Client))
expectAllowed(t, getPV(node2Client))
expectForbidden(t, createNode2NormalPod(node2Client))
expectAllowed(t, updateNode2NormalPodStatus(node2Client))
expectAllowed(t, deleteNode2NormalPod(node2Client))
expectAllowed(t, createNode2MirrorPod(node2Client))
expectAllowed(t, deleteNode2MirrorPod(node2Client))
}
func expectForbidden(t *testing.T, err error) {
if !errors.IsForbidden(err) {
_, file, line, _ := runtime.Caller(1)
t.Errorf("%s:%d: Expected forbidden error, got %v", filepath.Base(file), line, err)
}
}
func expectAllowed(t *testing.T, err error) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
t.Errorf("%s:%d: Expected no error, got %v", filepath.Base(file), line, err)
}
}