mirror of https://github.com/k3s-io/k3s
Merge pull request #5142 from derekwaynecarr/unify_admission_control_salt
Unify admission control plug-ins across providerspull/6/head
commit
5d31e1ab08
|
@ -65,3 +65,6 @@ ENABLE_CLUSTER_DNS=true
|
|||
DNS_SERVER_IP="10.0.0.10"
|
||||
DNS_DOMAIN="kubernetes.local"
|
||||
DNS_REPLICAS=1
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
ADMISSION_CONTROL=NamespaceAutoProvision,LimitRanger,ResourceQuota
|
||||
|
|
|
@ -33,6 +33,7 @@ enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
|
|||
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
|
||||
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
|
||||
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
|
||||
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/nginx
|
||||
|
|
|
@ -314,6 +314,7 @@ function kube-up {
|
|||
echo "readonly DNS_REPLICAS='${DNS_REPLICAS:-}'"
|
||||
echo "readonly DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
|
||||
echo "readonly DNS_DOMAIN='${DNS_DOMAIN:-}'"
|
||||
echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/common.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/create-dynamic-salt-files.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/download-release.sh"
|
||||
|
|
|
@ -44,3 +44,6 @@ LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp
|
|||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
ADMISSION_CONTROL=NamespaceAutoProvision,LimitRanger,ResourceQuota
|
||||
|
|
|
@ -23,6 +23,7 @@ cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
|||
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
||||
node_instance_prefix: $NODE_INSTANCE_PREFIX
|
||||
portal_net: $PORTAL_NET
|
||||
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/nginx
|
||||
|
|
|
@ -349,6 +349,7 @@ function kube-up {
|
|||
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
|
||||
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
|
||||
echo "readonly PORTAL_NET='${PORTAL_NET}'"
|
||||
echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/create-dynamic-salt-files.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh"
|
||||
|
|
|
@ -105,3 +105,6 @@ ENABLE_CLUSTER_DNS=true
|
|||
DNS_SERVER_IP="10.0.0.10"
|
||||
DNS_DOMAIN="kubernetes.local"
|
||||
DNS_REPLICAS=1
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
ADMISSION_CONTROL=NamespaceAutoProvision,LimitRanger,ResourceQuota
|
||||
|
|
|
@ -200,6 +200,7 @@ enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
|
|||
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
|
||||
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
|
||||
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
|
||||
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
||||
if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
|
||||
|
|
|
@ -452,6 +452,7 @@ DNS_REPLICAS: $(yaml-quote ${DNS_REPLICAS:-})
|
|||
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
|
||||
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
|
||||
MASTER_HTPASSWD: $(yaml-quote ${MASTER_HTPASSWD})
|
||||
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
|
||||
EOF
|
||||
|
||||
if [[ "${master}" != "true" ]]; then
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{% if grains.cloud is defined and grains.cloud == 'gce' -%}
|
||||
# On GCE, there is no Salt mine. We run standalone.
|
||||
{% if grains.cloud != 'gce' -%}
|
||||
|
||||
{% else %}
|
||||
# Allow everyone to see cached values of who sits at what IP
|
||||
{% set networkInterfaceName = "eth0" %}
|
||||
{% if grains.networkInterfaceName is defined %}
|
||||
|
@ -9,5 +9,4 @@
|
|||
mine_functions:
|
||||
network.ip_addrs: [{{networkInterfaceName}}]
|
||||
grains.items: []
|
||||
|
||||
{% endif -%}
|
||||
|
|
|
@ -59,8 +59,8 @@
|
|||
{% endif -%}
|
||||
|
||||
{% set admission_control = "" -%}
|
||||
{% if grains.admission_control is defined -%}
|
||||
{% set admission_control = "--admission_control=" + grains.admission_control -%}
|
||||
{% if pillar['admission_control'] is defined -%}
|
||||
{% set admission_control = "--admission_control=" + pillar['admission_control'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set runtime_config = "" -%}
|
||||
|
|
|
@ -49,7 +49,7 @@ MASTER_USER=vagrant
|
|||
MASTER_PASSWD=vagrant
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
ADMISSION_CONTROL=NamespaceExists,LimitRanger,ResourceQuota,AlwaysAdmit
|
||||
ADMISSION_CONTROL=NamespaceAutoProvision,LimitRanger,ResourceQuota
|
||||
|
||||
# Optional: Install node monitoring.
|
||||
ENABLE_NODE_MONITORING=true
|
||||
|
|
|
@ -83,7 +83,6 @@ grains:
|
|||
cloud_provider: vagrant
|
||||
roles:
|
||||
- kubernetes-master
|
||||
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
|
||||
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
||||
|
@ -102,6 +101,7 @@ cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
|||
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
|
||||
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
|
||||
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
||||
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
||||
# Configure the salt-master
|
||||
|
|
|
@ -64,6 +64,7 @@ EOF
|
|||
# Our minions will have a pool role to distinguish them from the master.
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
grains:
|
||||
cloud: vagrant
|
||||
network_mode: openvswitch
|
||||
node_ip: '$(echo "$MINION_IP" | sed -e "s/'/''/g")'
|
||||
etcd_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||
|
|
|
@ -45,6 +45,10 @@ type provision struct {
|
|||
}
|
||||
|
||||
func (p *provision) Admit(a admission.Attributes) (err error) {
|
||||
// only handle create requests
|
||||
if a.GetOperation() != "CREATE" {
|
||||
return nil
|
||||
}
|
||||
defaultVersion, kind, err := latest.RESTMapper.VersionAndKindForResource(a.GetResource())
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -15,3 +15,91 @@ limitations under the License.
|
|||
*/
|
||||
|
||||
package autoprovision
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/admission"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
|
||||
)
|
||||
|
||||
// TestAdmission verifies a namespace is created on create requests for namespace managed resources
|
||||
func TestAdmission(t *testing.T) {
|
||||
namespace := "test"
|
||||
mockClient := &client.Fake{}
|
||||
handler := &provision{
|
||||
client: mockClient,
|
||||
store: cache.NewStore(cache.MetaNamespaceKeyFunc),
|
||||
}
|
||||
pod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image"}},
|
||||
},
|
||||
}
|
||||
err := handler.Admit(admission.NewAttributesRecord(&pod, namespace, "pods", "CREATE"))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error returned from admission handler")
|
||||
}
|
||||
if len(mockClient.Actions) != 1 {
|
||||
t.Errorf("Expected a create-namespace request")
|
||||
}
|
||||
if mockClient.Actions[0].Action != "create-namespace" {
|
||||
t.Errorf("Expected a create-namespace request to be made via the client")
|
||||
}
|
||||
}
|
||||
|
||||
// TestAdmissionNamespaceExists verifies that no client call is made when a namespace already exists
|
||||
func TestAdmissionNamespaceExists(t *testing.T) {
|
||||
namespace := "test"
|
||||
mockClient := &client.Fake{}
|
||||
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
store.Add(&api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{Name: namespace},
|
||||
})
|
||||
handler := &provision{
|
||||
client: mockClient,
|
||||
store: store,
|
||||
}
|
||||
pod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image"}},
|
||||
},
|
||||
}
|
||||
err := handler.Admit(admission.NewAttributesRecord(&pod, namespace, "pods", "CREATE"))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error returned from admission handler")
|
||||
}
|
||||
if len(mockClient.Actions) != 0 {
|
||||
t.Errorf("No client request should have been made")
|
||||
}
|
||||
}
|
||||
|
||||
// TestIgnoreAdmission validates that a request is ignored if its not a create
|
||||
func TestIgnoreAdmission(t *testing.T) {
|
||||
namespace := "test"
|
||||
mockClient := &client.Fake{}
|
||||
handler := &provision{
|
||||
client: mockClient,
|
||||
store: cache.NewStore(cache.MetaNamespaceKeyFunc),
|
||||
}
|
||||
pod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image"}},
|
||||
},
|
||||
}
|
||||
err := handler.Admit(admission.NewAttributesRecord(&pod, namespace, "pods", "UPDATE"))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error returned from admission handler")
|
||||
}
|
||||
if len(mockClient.Actions) != 0 {
|
||||
t.Errorf("No client request should have been made")
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue