Merge pull request #58149 from CaoShuFeng/duplicated_import_2

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

remove duplicated import

**Release note**:
```release-note
NONE
```
pull/6/head
Kubernetes Submit Queue 2018-01-17 01:41:36 -08:00 committed by GitHub
commit 99fb21f61f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 99 additions and 130 deletions

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
@ -120,7 +119,7 @@ func Run(s *options.CloudControllerManagerServer) error {
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = s.KubeAPIQPS
kubeconfig.Burst = int(s.KubeAPIBurst)
kubeClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "cloud-controller-manager"))
kubeClient, err := kubernetes.NewForConfig(restclient.AddUserAgent(kubeconfig, "cloud-controller-manager"))
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
@ -195,7 +194,7 @@ func Run(s *options.CloudControllerManagerServer) error {
// StartControllers starts the cloud specific controller loops.
func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restclient.Config, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}, recorder record.EventRecorder, cloud cloudprovider.Interface) error {
// Function to build the kube client object
client := func(serviceAccountName string) clientset.Interface {
client := func(serviceAccountName string) kubernetes.Interface {
return clientBuilder.ClientOrDie(serviceAccountName)
}
@ -299,7 +298,7 @@ func startHTTP(s *options.CloudControllerManagerServer) {
glog.Fatal(server.ListenAndServe())
}
func createRecorder(kubeClient *clientset.Clientset) record.EventRecorder {
func createRecorder(kubeClient *kubernetes.Clientset) record.EventRecorder {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})

View File

@ -23,7 +23,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
kuberuntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
clientsetscheme "k8s.io/client-go/kubernetes/scheme"
@ -148,7 +147,7 @@ func decodeUnversionedIntoAPIObject(action core.Action, unversionedObj runtime.O
if err != nil {
return nil, err
}
newObj, err := kuberuntime.Decode(clientsetscheme.Codecs.UniversalDecoder(action.GetResource().GroupVersion()), objBytes)
newObj, err := runtime.Decode(clientsetscheme.Codecs.UniversalDecoder(action.GetResource().GroupVersion()), objBytes)
if err != nil {
return nil, err
}

View File

@ -35,7 +35,6 @@ import (
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
clientfake "k8s.io/client-go/kubernetes/fake"
scalefake "k8s.io/client-go/scale/fake"
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/api/legacyscheme"
@ -557,7 +556,7 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
testCMClient,
)
eventClient := &clientfake.Clientset{}
eventClient := &fake.Clientset{}
eventClient.AddReactor("create", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()

View File

@ -37,7 +37,6 @@ import (
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
clientfake "k8s.io/client-go/kubernetes/fake"
restclient "k8s.io/client-go/rest"
scalefake "k8s.io/client-go/scale/fake"
core "k8s.io/client-go/testing"
@ -461,7 +460,7 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
testClient, testScaleClient := tc.prepareTestClient(t)
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
eventClient := &clientfake.Clientset{}
eventClient := &fake.Clientset{}
eventClient.AddReactor("*", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()

View File

@ -28,7 +28,6 @@ import (
autoscaling "k8s.io/api/autoscaling/v2beta1"
"k8s.io/api/core/v1"
clientgov1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
@ -91,7 +90,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
podSum := int64(0)
missing := len(m.Containers) == 0
for _, c := range m.Containers {
resValue, found := c.Usage[clientgov1.ResourceName(resource)]
resValue, found := c.Usage[v1.ResourceName(resource)]
if !found {
missing = true
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)

View File

@ -23,7 +23,6 @@ import (
autoscalingapi "k8s.io/api/autoscaling/v2beta1"
"k8s.io/api/core/v1"
kv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -186,7 +185,7 @@ func (tc *restClientTestCase) runTest(t *testing.T) {
metricsClient := NewRESTMetricsClient(testMetricsClient.MetricsV1beta1(), testCMClient)
isResource := len(tc.resourceName) > 0
if isResource {
info, timestamp, err := metricsClient.GetResourceMetric(kv1.ResourceName(tc.resourceName), tc.namespace, tc.selector)
info, timestamp, err := metricsClient.GetResourceMetric(v1.ResourceName(tc.resourceName), tc.namespace, tc.selector)
tc.verifyResults(t, info, timestamp, err)
} else if tc.singleObject == nil {
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector)

View File

@ -37,7 +37,6 @@ import (
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -112,7 +111,7 @@ func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer,
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
return NewBaseController(rsInformer, podInformer, kubeClient, burstReplicas,
v1beta1.SchemeGroupVersion.WithKind("ReplicaSet"),
extensions.SchemeGroupVersion.WithKind("ReplicaSet"),
"replicaset_controller",
"replicaset",
controller.RealPodControl{

View File

@ -40,7 +40,6 @@ import (
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
fakeclientset "k8s.io/client-go/kubernetes/fake"
restclient "k8s.io/client-go/rest"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
@ -1107,7 +1106,7 @@ func TestDeletionTimestamp(t *testing.T) {
// setupManagerWithGCEnabled creates a RS manager with a fakePodControl
func setupManagerWithGCEnabled(stopCh chan struct{}, objs ...runtime.Object) (manager *ReplicaSetController, fakePodControl *controller.FakePodControl, informers informers.SharedInformerFactory) {
c := fakeclientset.NewSimpleClientset(objs...)
c := fake.NewSimpleClientset(objs...)
fakePodControl = &controller.FakePodControl{}
manager, informers = testNewReplicaSetControllerFromClient(c, stopCh, BurstReplicas)

View File

@ -33,7 +33,6 @@ import (
"k8s.io/client-go/tools/record"
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/container"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
)
@ -55,7 +54,7 @@ type ImageGCManager interface {
// Start async garbage collection of images.
Start()
GetImageList() ([]kubecontainer.Image, error)
GetImageList() ([]container.Image, error)
// Delete all unused images and returns the number of bytes freed. The number of bytes freed is always returned.
DeleteUnusedImages() (int64, error)
@ -108,18 +107,18 @@ type imageCache struct {
// sync.RWMutex is the mutex protects the image cache.
sync.RWMutex
// images is the image cache.
images []kubecontainer.Image
images []container.Image
}
// set updates image cache.
func (i *imageCache) set(images []kubecontainer.Image) {
func (i *imageCache) set(images []container.Image) {
i.Lock()
defer i.Unlock()
i.images = images
}
// get gets image list from image cache.
func (i *imageCache) get() []kubecontainer.Image {
func (i *imageCache) get() []container.Image {
i.RLock()
defer i.RUnlock()
return i.images
@ -190,7 +189,7 @@ func (im *realImageGCManager) Start() {
}
// Get a list of images on this node
func (im *realImageGCManager) GetImageList() ([]kubecontainer.Image, error) {
func (im *realImageGCManager) GetImageList() ([]container.Image, error) {
return im.imageCache.get(), nil
}

View File

@ -29,7 +29,6 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
kubetypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/credentialprovider"
@ -379,7 +378,7 @@ func TestGetPods(t *testing.T) {
expected := []*kubecontainer.Pod{
{
ID: kubetypes.UID("12345678"),
ID: types.UID("12345678"),
Name: "foo",
Namespace: "new",
Containers: []*kubecontainer.Container{containers[0], containers[1]},

View File

@ -27,7 +27,6 @@ import (
"golang.org/x/net/context"
"google.golang.org/grpc"
"k8s.io/apimachinery/pkg/types"
kubetypes "k8s.io/apimachinery/pkg/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
@ -204,7 +203,7 @@ func newfakeUnitGetter() *fakeUnitGetter {
}
}
func (f *fakeUnitGetter) getNetworkNamespace(uid kubetypes.UID, latestPod *rktapi.Pod) (kubecontainer.ContainerID, error) {
func (f *fakeUnitGetter) getNetworkNamespace(uid types.UID, latestPod *rktapi.Pod) (kubecontainer.ContainerID, error) {
return kubecontainer.ContainerID{ID: "42"}, nil
}

View File

@ -32,7 +32,6 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/container"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/kubelet/util/format"
@ -153,7 +152,7 @@ func NewVolumeManager(
podStatusProvider status.PodStatusProvider,
kubeClient clientset.Interface,
volumePluginMgr *volume.VolumePluginMgr,
kubeContainerRuntime kubecontainer.Runtime,
kubeContainerRuntime container.Runtime,
mounter mount.Interface,
kubeletPodsDir string,
recorder record.EventRecorder,

View File

@ -33,7 +33,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/configmap"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/pod"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
"k8s.io/kubernetes/pkg/kubelet/secret"
@ -213,7 +212,7 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
}
}
func newTestVolumeManager(tmpDir string, podManager pod.Manager, kubeClient clientset.Interface) VolumeManager {
func newTestVolumeManager(tmpDir string, podManager kubepod.Manager, kubeClient clientset.Interface) VolumeManager {
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
fakeRecorder := &record.FakeRecorder{}
plugMgr := &volume.VolumePluginMgr{}

View File

@ -33,7 +33,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
algorithmpredicates "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
algorithmpriorities "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
@ -43,7 +42,7 @@ import (
)
var (
order = []string{"false", "true", "matches", "nopods", predicates.MatchInterPodAffinityPred}
order = []string{"false", "true", "matches", "nopods", algorithmpredicates.MatchInterPodAffinityPred}
)
func falsePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
@ -185,7 +184,7 @@ func TestSelectHost(t *testing.T) {
}
func TestGenericScheduler(t *testing.T) {
predicates.SetPredicatesOrdering(order)
algorithmpredicates.SetPredicatesOrdering(order)
tests := []struct {
name string
predicates map[string]algorithm.FitPredicate
@ -423,7 +422,7 @@ func TestGenericScheduler(t *testing.T) {
}
func TestFindFitAllError(t *testing.T) {
predicates.SetPredicatesOrdering(order)
algorithmpredicates.SetPredicatesOrdering(order)
nodes := []string{"3", "2", "1"}
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate}
nodeNameToInfo := map[string]*schedulercache.NodeInfo{
@ -453,7 +452,7 @@ func TestFindFitAllError(t *testing.T) {
}
func TestFindFitSomeError(t *testing.T) {
predicates.SetPredicatesOrdering(order)
algorithmpredicates.SetPredicatesOrdering(order)
nodes := []string{"3", "2", "1"}
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "matches": matchesPredicate}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1"}}
@ -765,7 +764,7 @@ var negPriority, lowPriority, midPriority, highPriority, veryHighPriority = int3
// TestSelectNodesForPreemption tests selectNodesForPreemption. This test assumes
// that podsFitsOnNode works correctly and is tested separately.
func TestSelectNodesForPreemption(t *testing.T) {
predicates.SetPredicatesOrdering(order)
algorithmpredicates.SetPredicatesOrdering(order)
tests := []struct {
name string
predicates map[string]algorithm.FitPredicate
@ -889,7 +888,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
nodes = append(nodes, node)
}
if test.addAffinityPredicate {
test.predicates[predicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods))
test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods))
}
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
nodeToPods, err := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
@ -904,7 +903,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
// TestPickOneNodeForPreemption tests pickOneNodeForPreemption.
func TestPickOneNodeForPreemption(t *testing.T) {
predicates.SetPredicatesOrdering(order)
algorithmpredicates.SetPredicatesOrdering(order)
tests := []struct {
name string
predicates map[string]algorithm.FitPredicate
@ -1081,10 +1080,10 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
{
name: "No node should be attempted",
failedPredMap: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{predicates.ErrNodeSelectorNotMatch},
"machine2": []algorithm.PredicateFailureReason{predicates.ErrPodNotMatchHostName},
"machine3": []algorithm.PredicateFailureReason{predicates.ErrTaintsTolerationsNotMatch},
"machine4": []algorithm.PredicateFailureReason{predicates.ErrNodeLabelPresenceViolated},
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrTaintsTolerationsNotMatch},
"machine4": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeLabelPresenceViolated},
},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}},
expected: map[string]bool{},
@ -1092,9 +1091,9 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
{
name: "pod affinity should be tried",
failedPredMap: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{predicates.ErrPodAffinityNotMatch},
"machine2": []algorithm.PredicateFailureReason{predicates.ErrPodNotMatchHostName},
"machine3": []algorithm.PredicateFailureReason{predicates.ErrNodeUnschedulable},
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnschedulable},
},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}, Spec: v1.PodSpec{Affinity: &v1.Affinity{
PodAffinity: &v1.PodAffinity{
@ -1118,8 +1117,8 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
{
name: "pod with both pod affinity and anti-affinity should be tried",
failedPredMap: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{predicates.ErrPodAffinityNotMatch},
"machine2": []algorithm.PredicateFailureReason{predicates.ErrPodNotMatchHostName},
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}, Spec: v1.PodSpec{Affinity: &v1.Affinity{
PodAffinity: &v1.PodAffinity{
@ -1160,9 +1159,9 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
{
name: "Mix of failed predicates works fine",
failedPredMap: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{predicates.ErrNodeSelectorNotMatch, predicates.ErrNodeOutOfDisk, predicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
"machine2": []algorithm.PredicateFailureReason{predicates.ErrPodNotMatchHostName, predicates.ErrDiskConflict},
"machine3": []algorithm.PredicateFailureReason{predicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch, algorithmpredicates.ErrNodeOutOfDisk, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName, algorithmpredicates.ErrDiskConflict},
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
"machine4": []algorithm.PredicateFailureReason{},
},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}},
@ -1185,9 +1184,9 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
func TestPreempt(t *testing.T) {
failedPredMap := FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{predicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
"machine2": []algorithm.PredicateFailureReason{predicates.ErrDiskConflict},
"machine3": []algorithm.PredicateFailureReason{predicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrDiskConflict},
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
}
// Prepare 3 node names.
nodeNames := []string{}

View File

@ -21,13 +21,12 @@ import (
"testing"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
)
func TestGetDeviceName_Volume(t *testing.T) {
@ -309,7 +308,7 @@ func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeNam
return expected.isAttached, expected.ret
}
func (testcase *testcase) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
func (testcase *testcase) DisksAreAttached(nodeVolumes map[types.NodeName][]string) (map[types.NodeName]map[string]bool, error) {
return nil, errors.New("Not implemented")
}

View File

@ -25,7 +25,6 @@ import (
"k8s.io/apiserver/pkg/authentication/user"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/policy"
policyapi "k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
coreinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
@ -82,7 +81,7 @@ func Test_nodePlugin_Admit(t *testing.T) {
podResource = api.Resource("pods").WithVersion("v1")
podKind = api.Kind("Pod").WithVersion("v1")
evictionKind = policyapi.Kind("Eviction").WithVersion("v1beta1")
evictionKind = policy.Kind("Eviction").WithVersion("v1beta1")
nodeResource = api.Resource("nodes").WithVersion("v1")
nodeKind = api.Kind("Node").WithVersion("v1")

View File

@ -20,7 +20,6 @@ import (
"reflect"
"testing"
"k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
kadmission "k8s.io/apiserver/pkg/admission"
@ -426,16 +425,16 @@ func TestAdmitConflictWithDifferentNamespaceShouldDoNothing(t *testing.T) {
}
pip := &settings.PodPreset{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "hello",
Namespace: "othernamespace",
},
Spec: settings.PodPresetSpec{
Selector: v1.LabelSelector{
MatchExpressions: []v1.LabelSelectorRequirement{
Selector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "security",
Operator: v1.LabelSelectorOpIn,
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S2"},
},
},
@ -472,16 +471,16 @@ func TestAdmitConflictWithNonMatchingLabelsShouldNotError(t *testing.T) {
}
pip := &settings.PodPreset{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "hello",
Namespace: "namespace",
},
Spec: settings.PodPresetSpec{
Selector: v1.LabelSelector{
MatchExpressions: []v1.LabelSelectorRequirement{
Selector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "security",
Operator: v1.LabelSelectorOpIn,
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S3"},
},
},
@ -519,16 +518,16 @@ func TestAdmitConflictShouldNotModifyPod(t *testing.T) {
origPod := *pod
pip := &settings.PodPreset{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "hello",
Namespace: "namespace",
},
Spec: settings.PodPresetSpec{
Selector: v1.LabelSelector{
MatchExpressions: []v1.LabelSelectorRequirement{
Selector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "security",
Operator: v1.LabelSelectorOpIn,
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S2"},
},
},
@ -568,16 +567,16 @@ func TestAdmit(t *testing.T) {
}
pip := &settings.PodPreset{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "hello",
Namespace: "namespace",
},
Spec: settings.PodPresetSpec{
Selector: v1.LabelSelector{
MatchExpressions: []v1.LabelSelectorRequirement{
Selector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "security",
Operator: v1.LabelSelectorOpIn,
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S2"},
},
},
@ -628,16 +627,16 @@ func TestAdmitMirrorPod(t *testing.T) {
}
pip := &settings.PodPreset{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "hello",
Namespace: "namespace",
},
Spec: settings.PodPresetSpec{
Selector: v1.LabelSelector{
MatchExpressions: []v1.LabelSelectorRequirement{
Selector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "security",
Operator: v1.LabelSelectorOpIn,
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S2"},
},
},
@ -698,16 +697,16 @@ func TestExclusionNoAdmit(t *testing.T) {
}
pip := &settings.PodPreset{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "hello",
Namespace: "namespace",
},
Spec: settings.PodPresetSpec{
Selector: v1.LabelSelector{
MatchExpressions: []v1.LabelSelectorRequirement{
Selector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "security",
Operator: v1.LabelSelectorOpIn,
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S2"},
},
},
@ -762,16 +761,16 @@ func TestAdmitEmptyPodNamespace(t *testing.T) {
}
pip := &settings.PodPreset{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "hello",
Namespace: "different", // (pod will be submitted to namespace 'namespace')
},
Spec: settings.PodPresetSpec{
Selector: v1.LabelSelector{
MatchExpressions: []v1.LabelSelectorRequirement{
Selector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "security",
Operator: v1.LabelSelectorOpIn,
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S2"},
},
},

View File

@ -25,7 +25,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apiserver/pkg/apis/apiserver"
apiserverapi "k8s.io/apiserver/pkg/apis/apiserver"
apiserverapiv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1"
)
@ -138,7 +137,7 @@ func TestReadAdmissionConfiguration(t *testing.T) {
}
scheme := runtime.NewScheme()
apiserverapi.AddToScheme(scheme)
apiserver.AddToScheme(scheme)
apiserverapiv1alpha1.AddToScheme(scheme)
for testName, testCase := range testCases {
@ -210,7 +209,7 @@ func TestEmbeddedConfiguration(t *testing.T) {
for desc, test := range testCases {
scheme := runtime.NewScheme()
apiserverapi.AddToScheme(scheme)
apiserver.AddToScheme(scheme)
apiserverapiv1alpha1.AddToScheme(scheme)
if err = ioutil.WriteFile(configFileName, []byte(test.ConfigBody), 0644); err != nil {

View File

@ -32,7 +32,6 @@ import (
registrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
@ -381,7 +380,7 @@ func TestAdmit(t *testing.T) {
t.Errorf(" expected an error saying %q, but got %v", tt.errorContains, err)
}
}
if _, isStatusErr := err.(*apierrors.StatusError); err != nil && !isStatusErr {
if _, isStatusErr := err.(*errors.StatusError); err != nil && !isStatusErr {
t.Errorf("%s: expected a StatusError, got %T", name, err)
}
})

View File

@ -32,7 +32,6 @@ import (
registrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
@ -403,7 +402,7 @@ func TestValidate(t *testing.T) {
t.Errorf(" expected an error saying %q, but got %v", tt.errorContains, err)
}
}
if _, isStatusErr := err.(*apierrors.StatusError); err != nil && !isStatusErr {
if _, isStatusErr := err.(*errors.StatusError); err != nil && !isStatusErr {
t.Errorf("%s: expected a StatusError, got %T", name, err)
}
})

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apiserver/pkg/apis/audit"
auditinternal "k8s.io/apiserver/pkg/apis/audit"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
@ -88,7 +87,7 @@ func NewEventFromRequest(req *http.Request, level auditinternal.Level, attribs a
// LogImpersonatedUser fills in the impersonated user attributes into an audit event.
func LogImpersonatedUser(ae *auditinternal.Event, user user.Info) {
if ae == nil || ae.Level.Less(audit.LevelMetadata) {
if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) {
return
}
ae.ImpersonatedUser = &auditinternal.UserInfo{
@ -104,14 +103,14 @@ func LogImpersonatedUser(ae *auditinternal.Event, user user.Info) {
// LogRequestObject fills in the request object into an audit event. The passed runtime.Object
// will be converted to the given gv.
func LogRequestObject(ae *audit.Event, obj runtime.Object, gvr schema.GroupVersionResource, subresource string, s runtime.NegotiatedSerializer) {
if ae == nil || ae.Level.Less(audit.LevelMetadata) {
func LogRequestObject(ae *auditinternal.Event, obj runtime.Object, gvr schema.GroupVersionResource, subresource string, s runtime.NegotiatedSerializer) {
if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) {
return
}
// complete ObjectRef
if ae.ObjectRef == nil {
ae.ObjectRef = &audit.ObjectReference{}
ae.ObjectRef = &auditinternal.ObjectReference{}
}
if acc, ok := obj.(metav1.ObjectMetaAccessor); ok {
meta := acc.GetObjectMeta()
@ -140,7 +139,7 @@ func LogRequestObject(ae *audit.Event, obj runtime.Object, gvr schema.GroupVersi
ae.ObjectRef.Subresource = subresource
}
if ae.Level.Less(audit.LevelRequest) {
if ae.Level.Less(auditinternal.LevelRequest) {
return
}
@ -155,8 +154,8 @@ func LogRequestObject(ae *audit.Event, obj runtime.Object, gvr schema.GroupVersi
}
// LogRquestPatch fills in the given patch as the request object into an audit event.
func LogRequestPatch(ae *audit.Event, patch []byte) {
if ae == nil || ae.Level.Less(audit.LevelRequest) {
func LogRequestPatch(ae *auditinternal.Event, patch []byte) {
if ae == nil || ae.Level.Less(auditinternal.LevelRequest) {
return
}
@ -168,15 +167,15 @@ func LogRequestPatch(ae *audit.Event, patch []byte) {
// LogResponseObject fills in the response object into an audit event. The passed runtime.Object
// will be converted to the given gv.
func LogResponseObject(ae *audit.Event, obj runtime.Object, gv schema.GroupVersion, s runtime.NegotiatedSerializer) {
if ae == nil || ae.Level.Less(audit.LevelMetadata) {
func LogResponseObject(ae *auditinternal.Event, obj runtime.Object, gv schema.GroupVersion, s runtime.NegotiatedSerializer) {
if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) {
return
}
if status, ok := obj.(*metav1.Status); ok {
ae.ResponseStatus = status
}
if ae.Level.Less(audit.LevelRequestResponse) {
if ae.Level.Less(auditinternal.LevelRequestResponse) {
return
}
// TODO(audit): hook into the serializer to avoid double conversion

View File

@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
"k8s.io/apiserver/pkg/endpoints/request"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
)
@ -82,7 +81,7 @@ func WithAuthentication(handler http.Handler, mapper genericapirequest.RequestCo
)
}
func Unauthorized(requestContextMapper request.RequestContextMapper, s runtime.NegotiatedSerializer, supportsBasicAuth bool) http.Handler {
func Unauthorized(requestContextMapper genericapirequest.RequestContextMapper, s runtime.NegotiatedSerializer, supportsBasicAuth bool) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if supportsBasicAuth {
w.Header().Set("WWW-Authenticate", `Basic realm="kubernetes-master"`)
@ -92,7 +91,7 @@ func Unauthorized(requestContextMapper request.RequestContextMapper, s runtime.N
responsewriters.InternalError(w, req, errors.New("no context found for request"))
return
}
requestInfo, found := request.RequestInfoFrom(ctx)
requestInfo, found := genericapirequest.RequestInfoFrom(ctx)
if !found {
responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context"))
return

View File

@ -37,7 +37,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/endpoints/request"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
@ -681,7 +680,7 @@ func (e *Store) Get(ctx genericapirequest.Context, name string, options *metav1.
// qualifiedResourceFromContext attempts to retrieve a GroupResource from the context's request info.
// If the context has no request info, DefaultQualifiedResource is used.
func (e *Store) qualifiedResourceFromContext(ctx genericapirequest.Context) schema.GroupResource {
if info, ok := request.RequestInfoFrom(ctx); ok {
if info, ok := genericapirequest.RequestInfoFrom(ctx); ok {
return schema.GroupResource{Group: info.APIGroup, Resource: info.Resource}
}
// some implementations access storage directly and thus the context has no RequestInfo

View File

@ -43,7 +43,6 @@ import (
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/apis/example"
examplev1 "k8s.io/apiserver/pkg/apis/example/v1"
"k8s.io/apiserver/pkg/endpoints/request"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/registry/generic"
@ -1920,8 +1919,8 @@ func TestFinalizeDelete(t *testing.T) {
}
}
func fakeRequestInfo(resource, apiGroup string) *request.RequestInfo {
return &request.RequestInfo{
func fakeRequestInfo(resource, apiGroup string) *genericapirequest.RequestInfo {
return &genericapirequest.RequestInfo{
IsResourceRequest: true,
Path: "/api/v1/test",
Verb: "test",

View File

@ -34,7 +34,6 @@ import (
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/server/mux"
genericmux "k8s.io/apiserver/pkg/server/mux"
)
// APIServerHandlers holds the different http.Handlers used by the API server.
@ -74,7 +73,7 @@ type APIServerHandler struct {
type HandlerChainBuilderFn func(apiHandler http.Handler) http.Handler
func NewAPIServerHandler(name string, contextMapper request.RequestContextMapper, s runtime.NegotiatedSerializer, handlerChainBuilder HandlerChainBuilderFn, notFoundHandler http.Handler) *APIServerHandler {
nonGoRestfulMux := genericmux.NewPathRecorderMux(name)
nonGoRestfulMux := mux.NewPathRecorderMux(name)
if notFoundHandler != nil {
nonGoRestfulMux.NotFoundHandler(notFoundHandler)
}

View File

@ -26,7 +26,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/rest"
@ -71,7 +70,7 @@ func newGenericWebhook(registry *registered.APIRegistrationManager, codecFactory
clientConfig.Timeout = requestTimeout
codec := codecFactory.LegacyCodec(groupVersions...)
clientConfig.ContentConfig.NegotiatedSerializer = runtimeserializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec})
clientConfig.ContentConfig.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec})
restClient, err := rest.UnversionedRESTClientFor(clientConfig)
if err != nil {

View File

@ -20,7 +20,6 @@ import (
"fmt"
"k8s.io/api/core/v1"
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
@ -57,7 +56,7 @@ var (
Spec: extensionsv1beta1.PodSecurityPolicySpec{
Privileged: false,
AllowPrivilegeEscalation: boolPtr(false),
RequiredDropCapabilities: []corev1.Capability{
RequiredDropCapabilities: []v1.Capability{
"AUDIT_WRITE",
"CHOWN",
"DAC_OVERRIDE",

View File

@ -28,7 +28,6 @@ import (
storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -175,7 +174,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName))
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))

View File

@ -28,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8stype "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
@ -71,7 +70,7 @@ func verifyVSphereDiskAttached(c clientset.Interface, vsp *vsphere.VSphere, volu
}
// Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes
func waitForVSphereDisksToDetach(c clientset.Interface, vsp *vsphere.VSphere, nodeVolumes map[k8stype.NodeName][]string) error {
func waitForVSphereDisksToDetach(c clientset.Interface, vsp *vsphere.VSphere, nodeVolumes map[types.NodeName][]string) error {
var (
err error
disksAttached = true
@ -425,7 +424,7 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste
namespace := pod.Namespace
for index, pv := range persistentvolumes {
// Verify disks are attached to the node
isAttached, err := verifyVSphereDiskAttached(c, vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
isAttached, err := verifyVSphereDiskAttached(c, vsp, pv.Spec.VsphereVolume.VolumePath, types.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible

View File

@ -45,7 +45,6 @@ import (
authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
authauthorizer "k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
authorizerunion "k8s.io/apiserver/pkg/authorization/union"
genericapiserver "k8s.io/apiserver/pkg/server"
@ -53,7 +52,6 @@ import (
serverstorage "k8s.io/apiserver/pkg/server/storage"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/client-go/informers"
extinformers "k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
@ -143,7 +141,7 @@ func NewMasterComponents(c *Config) *MasterComponents {
// alwaysAllow always allows an action
type alwaysAllow struct{}
func (alwaysAllow) Authorize(requestAttributes authauthorizer.Attributes) (authorizer.Decision, string, error) {
func (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) {
return authorizer.DecisionAllow, "always allow", nil
}
@ -245,7 +243,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
glog.Fatal(err)
}
sharedInformers := extinformers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.EmptyDelegate)
if err != nil {
closeFn()