k3s/test/integration/framework/master_utils.go

370 lines
14 KiB
Go
Raw Normal View History

/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
goruntime "runtime"
"sync"
"testing"
"time"
2015-08-05 22:05:17 +00:00
"github.com/golang/glog"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
2016-04-15 22:30:15 +00:00
"k8s.io/kubernetes/pkg/apis/apps"
2016-02-08 14:03:59 +00:00
"k8s.io/kubernetes/pkg/apis/autoscaling"
2016-02-17 22:06:35 +00:00
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/certificates"
2015-12-08 14:21:04 +00:00
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/policy"
2016-05-25 21:25:56 +00:00
"k8s.io/kubernetes/pkg/apis/rbac"
2016-09-01 15:29:26 +00:00
"k8s.io/kubernetes/pkg/apis/storage"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record"
2016-02-12 18:58:43 +00:00
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/genericapiserver"
"k8s.io/kubernetes/pkg/genericapiserver/authorizer"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/kubectl"
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/runtime"
2016-04-20 04:11:39 +00:00
"k8s.io/kubernetes/pkg/storage/storagebackend"
utilnet "k8s.io/kubernetes/pkg/util/net"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/plugin/pkg/admission/admit"
"github.com/go-openapi/spec"
"github.com/pborman/uuid"
"k8s.io/kubernetes/pkg/generated/openapi"
)
const (
2015-05-21 21:10:25 +00:00
// Timeout used in benchmarks, to eg: scale an rc
DefaultTimeout = 30 * time.Minute
// Rc manifest used to create pods for benchmarks.
// TODO: Convert this to a full path?
TestRCManifest = "benchmark-controller.json"
)
// MasterComponents is a control struct for all master components started via NewMasterComponents.
// TODO: Include all master components (scheduler, nodecontroller).
// TODO: Reconcile with integration.go, currently the master used there doesn't understand
// how to restart cleanly, which is required for each iteration of a benchmark. The integration
// tests also don't make it easy to isolate and turn off components at will.
type MasterComponents struct {
// Raw http server in front of the master
ApiServer *httptest.Server
2015-07-24 11:09:49 +00:00
// Kubernetes master, contains an embedded etcd storage
KubeMaster *master.Master
// Restclient used to talk to the kubernetes master
RestClient *client.Client
// Replication controller manager
2015-07-31 11:38:04 +00:00
ControllerManager *replicationcontroller.ReplicationManager
// Channel for stop signals to rc manager
rcStopCh chan struct{}
// Used to stop master components individually, and via MasterComponents.Stop
once sync.Once
}
// Config is a struct of configuration directives for NewMasterComponents.
type Config struct {
// If nil, a default is used, partially filled configs will not get populated.
MasterConfig *master.Config
StartReplicationManager bool
// Client throttling qps
QPS float32
// Client burst qps, also burst replicas allowed in rc manager
Burst int
// TODO: Add configs for endpoints controller, scheduler etc
}
// NewMasterComponents creates, initializes and starts master components based on the given config.
func NewMasterComponents(c *Config) *MasterComponents {
2015-09-30 07:56:51 +00:00
m, s := startMasterOrDie(c.MasterConfig)
// TODO: Allow callers to pipe through a different master url and create a client/start components using it.
glog.Infof("Master %+v", s.URL)
// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
2016-02-12 18:58:43 +00:00
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
rcStopCh := make(chan struct{})
controllerManager := replicationcontroller.NewReplicationManagerFromClient(clientset, controller.NoResyncPeriodFunc, c.Burst, 4096)
// TODO: Support events once we can cleanly shutdown an event recorder.
controllerManager.SetEventRecorder(&record.FakeRecorder{})
if c.StartReplicationManager {
go controllerManager.Run(goruntime.NumCPU(), rcStopCh)
}
return &MasterComponents{
ApiServer: s,
KubeMaster: m,
RestClient: restClient,
ControllerManager: controllerManager,
rcStopCh: rcStopCh,
}
}
// startMasterOrDie starts a kubernetes master and an httpserver to handle api requests
2015-09-30 07:56:51 +00:00
func startMasterOrDie(masterConfig *master.Config) (*master.Master, *httptest.Server) {
var m *master.Master
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
m.Handler.ServeHTTP(w, req)
}))
if masterConfig == nil {
masterConfig = NewMasterConfig()
masterConfig.GenericConfig.EnableProfiling = true
masterConfig.GenericConfig.EnableSwaggerSupport = true
masterConfig.GenericConfig.EnableOpenAPISupport = true
masterConfig.GenericConfig.OpenAPIInfo = spec.Info{
InfoProps: spec.InfoProps{
Title: "Kubernetes",
Version: "unversioned",
},
}
masterConfig.GenericConfig.OpenAPIDefaultResponse = spec.Response{
ResponseProps: spec.ResponseProps{
Description: "Default Response.",
},
}
}
m, err := masterConfig.Complete().New()
if err != nil {
glog.Fatalf("error in bringing up the master: %v", err)
}
// TODO have this start method actually use the normal start sequence for the API server
// this method never actually calls the `Run` method for the API server
// fire the post hooks ourselves
m.GenericAPIServer.RunPostStartHooks(genericapiserver.PostStartHookContext{})
2015-09-30 07:56:51 +00:00
return m, s
}
func parseCIDROrDie(cidr string) *net.IPNet {
_, parsed, err := net.ParseCIDR(cidr)
if err != nil {
glog.Fatalf("error while parsing CIDR: %s", cidr)
}
return parsed
}
// Returns a basic master config.
func NewMasterConfig() *master.Config {
2016-04-20 04:11:39 +00:00
config := storagebackend.Config{
ServerList: []string{GetEtcdURLFromEnv()},
2016-06-16 21:27:12 +00:00
// This causes the integration tests to exercise the etcd
// prefix code, so please don't change without ensuring
// sufficient coverage in other ways.
Prefix: uuid.New(),
}
negotiatedSerializer := NewSingleContentTypeSerializer(api.Scheme, testapi.Default.Codec(), runtime.ContentTypeJSON)
storageFactory := genericapiserver.NewDefaultStorageFactory(config, runtime.ContentTypeJSON, negotiatedSerializer, genericapiserver.NewDefaultResourceEncodingConfig(), master.DefaultAPIResourceConfigSource())
storageFactory.SetSerializer(
unversioned.GroupResource{Group: api.GroupName, Resource: genericapiserver.AllResources},
"",
NewSingleContentTypeSerializer(api.Scheme, testapi.Default.Codec(), runtime.ContentTypeJSON))
storageFactory.SetSerializer(
unversioned.GroupResource{Group: autoscaling.GroupName, Resource: genericapiserver.AllResources},
"",
NewSingleContentTypeSerializer(api.Scheme, testapi.Autoscaling.Codec(), runtime.ContentTypeJSON))
storageFactory.SetSerializer(
unversioned.GroupResource{Group: batch.GroupName, Resource: genericapiserver.AllResources},
"",
NewSingleContentTypeSerializer(api.Scheme, testapi.Batch.Codec(), runtime.ContentTypeJSON))
storageFactory.SetSerializer(
unversioned.GroupResource{Group: apps.GroupName, Resource: genericapiserver.AllResources},
"",
NewSingleContentTypeSerializer(api.Scheme, testapi.Apps.Codec(), runtime.ContentTypeJSON))
storageFactory.SetSerializer(
unversioned.GroupResource{Group: extensions.GroupName, Resource: genericapiserver.AllResources},
"",
NewSingleContentTypeSerializer(api.Scheme, testapi.Extensions.Codec(), runtime.ContentTypeJSON))
storageFactory.SetSerializer(
unversioned.GroupResource{Group: policy.GroupName, Resource: genericapiserver.AllResources},
"",
NewSingleContentTypeSerializer(api.Scheme, testapi.Policy.Codec(), runtime.ContentTypeJSON))
2016-05-25 21:25:56 +00:00
storageFactory.SetSerializer(
unversioned.GroupResource{Group: rbac.GroupName, Resource: genericapiserver.AllResources},
"",
NewSingleContentTypeSerializer(api.Scheme, testapi.Rbac.Codec(), runtime.ContentTypeJSON))
storageFactory.SetSerializer(
unversioned.GroupResource{Group: certificates.GroupName, Resource: genericapiserver.AllResources},
"",
NewSingleContentTypeSerializer(api.Scheme, testapi.Certificates.Codec(), runtime.ContentTypeJSON))
2016-09-01 15:29:26 +00:00
storageFactory.SetSerializer(
unversioned.GroupResource{Group: storage.GroupName, Resource: genericapiserver.AllResources},
"",
NewSingleContentTypeSerializer(api.Scheme, testapi.Storage.Codec(), runtime.ContentTypeJSON))
return &master.Config{
GenericConfig: &genericapiserver.Config{
APIResourceConfigSource: master.DefaultAPIResourceConfigSource(),
APIPrefix: "/api",
APIGroupPrefix: "/apis",
Authorizer: authorizer.NewAlwaysAllowAuthorizer(),
AdmissionControl: admit.NewAlwaysAdmit(),
Serializer: api.Codecs,
// Set those values to avoid annoying warnings in logs.
ServiceClusterIPRange: parseCIDROrDie("10.0.0.0/24"),
ServiceNodePortRange: utilnet.PortRange{Base: 30000, Size: 2768},
EnableVersion: true,
OpenAPIDefinitions: openapi.OpenAPIDefinitions,
EnableOpenAPISupport: true,
},
StorageFactory: storageFactory,
EnableWatchCache: true,
KubeletClient: kubeletclient.FakeKubeletClient{},
}
}
// Returns the master config appropriate for most integration tests.
func NewIntegrationTestMasterConfig() *master.Config {
masterConfig := NewMasterConfig()
masterConfig.EnableCoreControllers = true
masterConfig.GenericConfig.EnableIndex = true
masterConfig.GenericConfig.EnableVersion = true
masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
masterConfig.GenericConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()
return masterConfig
}
func (m *MasterComponents) stopRCManager() {
close(m.rcStopCh)
}
func (m *MasterComponents) Stop(apiServer, rcManager bool) {
glog.Infof("Stopping master components")
if rcManager {
// Ordering matters because the apiServer will only shutdown when pending
// requests are done
m.once.Do(m.stopRCManager)
}
if apiServer {
m.ApiServer.Close()
}
}
func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *testing.T) *api.Namespace {
// TODO: Create a namespace with a given basename.
// Currently we neither create the namespace nor delete all its contents at the end.
// But as long as tests are not using the same namespaces, this should work fine.
return &api.Namespace{
ObjectMeta: api.ObjectMeta{
// TODO: Once we start creating namespaces, switch to GenerateName.
Name: baseName,
},
}
}
func DeleteTestingNamespace(ns *api.Namespace, apiserver *httptest.Server, t *testing.T) {
// TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace.
}
// RCFromManifest reads a .json file and returns the rc in it.
func RCFromManifest(fileName string) *api.ReplicationController {
data, err := ioutil.ReadFile(fileName)
if err != nil {
glog.Fatalf("Unexpected error reading rc manifest %v", err)
}
var controller api.ReplicationController
if err := runtime.DecodeInto(testapi.Default.Codec(), data, &controller); err != nil {
glog.Fatalf("Unexpected error reading rc manifest %v", err)
}
return &controller
}
// StopRC stops the rc via kubectl's stop library
2016-09-21 14:20:25 +00:00
func StopRC(rc *api.ReplicationController, clientset clientset.Interface) error {
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset)
if err != nil || reaper == nil {
return err
}
err = reaper.Stop(rc.Namespace, rc.Name, 0, nil)
if err != nil {
return err
}
return nil
}
2015-05-21 21:10:25 +00:00
// ScaleRC scales the given rc to the given replicas.
2016-09-21 14:20:25 +00:00
func ScaleRC(name, ns string, replicas int32, clientset clientset.Interface) (*api.ReplicationController, error) {
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
if err != nil {
return nil, err
}
2015-08-08 01:52:23 +00:00
retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
2015-05-21 21:10:25 +00:00
err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas)
if err != nil {
return nil, err
}
2016-09-21 14:20:25 +00:00
scaled, err := clientset.Core().ReplicationControllers(ns).Get(name)
if err != nil {
return nil, err
}
2015-05-21 21:10:25 +00:00
return scaled, nil
}
2016-07-04 13:16:13 +00:00
func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server) {
if masterConfig == nil {
masterConfig = NewMasterConfig()
masterConfig.GenericConfig.EnableProfiling = true
}
2016-07-04 13:16:13 +00:00
return startMasterOrDie(masterConfig)
}
// Task is a function passed to worker goroutines by RunParallel.
// The function needs to implement its own thread safety.
type Task func(id int) error
// RunParallel spawns a goroutine per task in the given queue
func RunParallel(task Task, numTasks, numWorkers int) {
start := time.Now()
if numWorkers <= 0 {
numWorkers = numTasks
}
defer func() {
glog.Infof("RunParallel took %v for %d tasks and %d workers", time.Since(start), numTasks, numWorkers)
}()
var wg sync.WaitGroup
semCh := make(chan struct{}, numWorkers)
wg.Add(numTasks)
for id := 0; id < numTasks; id++ {
go func(id int) {
semCh <- struct{}{}
err := task(id)
if err != nil {
glog.Fatalf("Worker failed with %v", err)
}
<-semCh
wg.Done()
}(id)
}
wg.Wait()
close(semCh)
}