diff --git a/cmd/kubeadm/app/phases/kubelet/BUILD b/cmd/kubeadm/app/phases/kubelet/BUILD index 2d639a8c00..6215de46da 100644 --- a/cmd/kubeadm/app/phases/kubelet/BUILD +++ b/cmd/kubeadm/app/phases/kubelet/BUILD @@ -45,7 +45,6 @@ go_test( deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis/config:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cmd/kubeadm/app/phases/kubelet/dynamic_test.go b/cmd/kubeadm/app/phases/kubelet/dynamic_test.go index c9d11e0629..36ab695557 100644 --- a/cmd/kubeadm/app/phases/kubelet/dynamic_test.go +++ b/cmd/kubeadm/app/phases/kubelet/dynamic_test.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) func TestEnableDynamicConfigForNode(t *testing.T) { @@ -35,7 +34,7 @@ func TestEnableDynamicConfigForNode(t *testing.T) { return true, &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, - Labels: map[string]string{kubeletapis.LabelHostname: nodeName}, + Labels: map[string]string{v1.LabelHostname: nodeName}, }, Spec: v1.NodeSpec{ ConfigSource: &v1.NodeConfigSource{ diff --git a/cmd/kubeadm/app/phases/markcontrolplane/BUILD b/cmd/kubeadm/app/phases/markcontrolplane/BUILD index 039975f049..9c42626dd8 100644 --- a/cmd/kubeadm/app/phases/markcontrolplane/BUILD +++ b/cmd/kubeadm/app/phases/markcontrolplane/BUILD @@ -12,7 +12,6 @@ go_test( embed = [":go_default_library"], deps = [ "//cmd/kubeadm/app/constants:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cmd/kubeadm/app/phases/markcontrolplane/markcontrolplane_test.go b/cmd/kubeadm/app/phases/markcontrolplane/markcontrolplane_test.go index ab26bddd65..eb8f3b27c6 100644 --- a/cmd/kubeadm/app/phases/markcontrolplane/markcontrolplane_test.go +++ b/cmd/kubeadm/app/phases/markcontrolplane/markcontrolplane_test.go @@ -29,7 +29,6 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/node" ) @@ -116,7 +115,7 @@ func TestMarkControlPlane(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: hostname, Labels: map[string]string{ - kubeletapis.LabelHostname: hostname, + v1.LabelHostname: hostname, }, }, } diff --git a/cmd/kubeadm/app/util/apiclient/BUILD b/cmd/kubeadm/app/util/apiclient/BUILD index 666c2bada9..c16c612c4f 100644 --- a/cmd/kubeadm/app/util/apiclient/BUILD +++ b/cmd/kubeadm/app/util/apiclient/BUILD @@ -19,7 +19,6 @@ go_library( deps = [ "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", @@ -67,7 +66,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/kubelet/apis:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/rbac/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cmd/kubeadm/app/util/apiclient/idempotency.go b/cmd/kubeadm/app/util/apiclient/idempotency.go index 02727c5a54..cb8635a840 100644 --- a/cmd/kubeadm/app/util/apiclient/idempotency.go +++ b/cmd/kubeadm/app/util/apiclient/idempotency.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/cmd/kubeadm/app/constants" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) // TODO: We should invent a dynamic mechanism for this using the dynamic client instead of hard-coding these functions per-type @@ -210,7 +209,7 @@ func PatchNodeOnce(client clientset.Interface, nodeName string, patchFn func(*v1 // The node may appear to have no labels at first, // so we wait for it to get hostname label. - if _, found := n.ObjectMeta.Labels[kubeletapis.LabelHostname]; !found { + if _, found := n.ObjectMeta.Labels[v1.LabelHostname]; !found { return false, nil } diff --git a/cmd/kubeadm/app/util/apiclient/idempotency_test.go b/cmd/kubeadm/app/util/apiclient/idempotency_test.go index 1564c7bcc2..b4ac6a4363 100644 --- a/cmd/kubeadm/app/util/apiclient/idempotency_test.go +++ b/cmd/kubeadm/app/util/apiclient/idempotency_test.go @@ -22,7 +22,6 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) func TestPatchNodeNonErrorCases(t *testing.T) { @@ -38,7 +37,7 @@ func TestPatchNodeNonErrorCases(t *testing.T) { node: v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "testnode", - Labels: map[string]string{kubeletapis.LabelHostname: ""}, + Labels: map[string]string{v1.LabelHostname: ""}, }, }, success: true, diff --git a/pkg/cloudprovider/providers/aws/BUILD b/pkg/cloudprovider/providers/aws/BUILD index 760881fa04..1c901bd97d 100644 --- a/pkg/cloudprovider/providers/aws/BUILD +++ b/pkg/cloudprovider/providers/aws/BUILD @@ -28,7 +28,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", deps = [ "//pkg/credentialprovider/aws:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -79,7 +78,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/kubelet/apis:go_default_library", "//pkg/volume:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index 2fc3307042..94c3a8ef25 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -60,7 +60,6 @@ import ( cloudprovider "k8s.io/cloud-provider" nodehelpers "k8s.io/cloud-provider/node/helpers" servicehelpers "k8s.io/cloud-provider/service/helpers" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -2473,12 +2472,12 @@ func (c *Cloud) GetVolumeLabels(volumeName KubernetesVolumeID) (map[string]strin return nil, fmt.Errorf("volume did not have AZ information: %q", aws.StringValue(info.VolumeId)) } - labels[kubeletapis.LabelZoneFailureDomain] = az + labels[v1.LabelZoneFailureDomain] = az region, err := azToRegion(az) if err != nil { return nil, err } - labels[kubeletapis.LabelZoneRegion] = region + labels[v1.LabelZoneRegion] = region return labels, nil } diff --git a/pkg/cloudprovider/providers/aws/aws_test.go b/pkg/cloudprovider/providers/aws/aws_test.go index 62f7186af0..613d1baefa 100644 --- a/pkg/cloudprovider/providers/aws/aws_test.go +++ b/pkg/cloudprovider/providers/aws/aws_test.go @@ -35,7 +35,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume" ) @@ -1191,8 +1190,8 @@ func TestGetVolumeLabels(t *testing.T) { assert.Nil(t, err, "Error creating Volume %v", err) assert.Equal(t, map[string]string{ - kubeletapis.LabelZoneFailureDomain: "us-east-1a", - kubeletapis.LabelZoneRegion: "us-east-1"}, labels) + v1.LabelZoneFailureDomain: "us-east-1a", + v1.LabelZoneRegion: "us-east-1"}, labels) awsServices.ec2.(*MockedFakeEC2).AssertExpectations(t) } @@ -1265,8 +1264,8 @@ func TestGetLabelsForVolume(t *testing.T) { AvailabilityZone: aws.String("us-east-1a"), }}, map[string]string{ - kubeletapis.LabelZoneFailureDomain: "us-east-1a", - kubeletapis.LabelZoneRegion: "us-east-1", + v1.LabelZoneFailureDomain: "us-east-1a", + v1.LabelZoneRegion: "us-east-1", }, nil, }, diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index 12b75d7966..b0f6e887d2 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -37,7 +37,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", deps = [ "//pkg/cloudprovider/providers/azure/auth:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -94,7 +93,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/cloudprovider/providers/azure/auth:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 02c9267ad7..47826f1bb0 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -37,7 +37,6 @@ import ( "k8s.io/client-go/util/flowcontrol" cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" @@ -521,8 +520,8 @@ func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { UpdateFunc: func(prev, obj interface{}) { prevNode := prev.(*v1.Node) newNode := obj.(*v1.Node) - if newNode.Labels[kubeletapis.LabelZoneFailureDomain] == - prevNode.Labels[kubeletapis.LabelZoneFailureDomain] { + if newNode.Labels[v1.LabelZoneFailureDomain] == + prevNode.Labels[v1.LabelZoneFailureDomain] { return } az.updateNodeCaches(prevNode, newNode) @@ -556,7 +555,7 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { if prevNode != nil { // Remove from nodeZones cache. - prevZone, ok := prevNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] + prevZone, ok := prevNode.ObjectMeta.Labels[v1.LabelZoneFailureDomain] if ok && az.isAvailabilityZone(prevZone) { az.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name) if az.nodeZones[prevZone].Len() == 0 { @@ -579,7 +578,7 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { if newNode != nil { // Add to nodeZones cache. - newZone, ok := newNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] + newZone, ok := newNode.ObjectMeta.Labels[v1.LabelZoneFailureDomain] if ok && az.isAvailabilityZone(newZone) { if az.nodeZones[newZone] == nil { az.nodeZones[newZone] = sets.NewString() diff --git a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go index 110636531d..1e296ba475 100644 --- a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go @@ -30,7 +30,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" kwait "k8s.io/apimachinery/pkg/util/wait" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) @@ -323,8 +322,8 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) { zone := c.makeZone(zoneID) klog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName) labels := map[string]string{ - kubeletapis.LabelZoneRegion: c.Location, - kubeletapis.LabelZoneFailureDomain: zone, + v1.LabelZoneRegion: c.Location, + v1.LabelZoneFailureDomain: zone, } return labels, nil } diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index b90d3f2723..4cfb3a1d0f 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" servicehelpers "k8s.io/cloud-provider/service/helpers" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" @@ -1093,7 +1092,7 @@ func getClusterResources(az *Cloud, vmCount int, availabilitySetCount int) (clus ObjectMeta: metav1.ObjectMeta{ Name: vmName, Labels: map[string]string{ - kubeletapis.LabelHostname: vmName, + v1.LabelHostname: vmName, }, }, } diff --git a/pkg/cloudprovider/providers/gce/BUILD b/pkg/cloudprovider/providers/gce/BUILD index 4843e8b085..b28a1178d9 100644 --- a/pkg/cloudprovider/providers/gce/BUILD +++ b/pkg/cloudprovider/providers/gce/BUILD @@ -47,6 +47,8 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", deps = [ "//pkg/kubelet/apis:go_default_library", + "//pkg/api/v1/service:go_default_library", + "//pkg/features:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -109,7 +111,15 @@ go_test( ], embed = [":go_default_library"], deps = [ +<<<<<<< HEAD +======= + "//pkg/api/v1/service:go_default_library", +<<<<<<< HEAD +>>>>>>> move pkg/kubelet/apis/well_known_labels.go to staging/src/k8s.io/api/core/v1/ "//pkg/kubelet/apis:go_default_library", +======= + "//pkg/util/net/sets:go_default_library", +>>>>>>> move pkg/kubelet/apis/well_known_labels.go to staging/src/k8s.io/api/core/v1/ "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 6e45eef162..0716a96877 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -52,7 +52,6 @@ import ( "k8s.io/client-go/util/flowcontrol" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) const ( @@ -709,8 +708,8 @@ func (g *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { UpdateFunc: func(prev, obj interface{}) { prevNode := prev.(*v1.Node) newNode := obj.(*v1.Node) - if newNode.Labels[kubeletapis.LabelZoneFailureDomain] == - prevNode.Labels[kubeletapis.LabelZoneFailureDomain] { + if newNode.Labels[v1.LabelZoneFailureDomain] == + prevNode.Labels[v1.LabelZoneFailureDomain] { return } g.updateNodeZones(prevNode, newNode) @@ -741,7 +740,7 @@ func (g *Cloud) updateNodeZones(prevNode, newNode *v1.Node) { g.nodeZonesLock.Lock() defer g.nodeZonesLock.Unlock() if prevNode != nil { - prevZone, ok := prevNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] + prevZone, ok := prevNode.ObjectMeta.Labels[v1.LabelZoneFailureDomain] if ok { g.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name) if g.nodeZones[prevZone].Len() == 0 { @@ -750,7 +749,7 @@ func (g *Cloud) updateNodeZones(prevNode, newNode *v1.Node) { } } if newNode != nil { - newZone, ok := newNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] + newZone, ok := newNode.ObjectMeta.Labels[v1.LabelZoneFailureDomain] if ok { if g.nodeZones[newZone] == nil { g.nodeZones[newZone] = sets.NewString() diff --git a/pkg/cloudprovider/providers/gce/gce_disks.go b/pkg/cloudprovider/providers/gce/gce_disks.go index 268d83b5c4..8968b1fbc3 100644 --- a/pkg/cloudprovider/providers/gce/gce_disks.go +++ b/pkg/cloudprovider/providers/gce/gce_disks.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" cloudprovider "k8s.io/cloud-provider" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -510,7 +509,7 @@ func (g *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) } // If the zone is already labeled, honor the hint - zone := pv.Labels[kubeletapis.LabelZoneFailureDomain] + zone := pv.Labels[v1.LabelZoneFailureDomain] labels, err := g.GetAutoLabelsForPD(pv.Spec.GCEPersistentDisk.PDName, zone) if err != nil { @@ -845,16 +844,16 @@ func (g *Cloud) GetAutoLabelsForPD(name string, zone string) (map[string]string, // Unexpected, but sanity-check return nil, fmt.Errorf("PD did not have zone/region information: %v", disk) } - labels[kubeletapis.LabelZoneFailureDomain] = zoneInfo.zone - labels[kubeletapis.LabelZoneRegion] = disk.Region + labels[v1.LabelZoneFailureDomain] = zoneInfo.zone + labels[v1.LabelZoneRegion] = disk.Region case multiZone: if zoneInfo.replicaZones == nil || zoneInfo.replicaZones.Len() <= 0 { // Unexpected, but sanity-check return nil, fmt.Errorf("PD is regional but does not have any replicaZones specified: %v", disk) } - labels[kubeletapis.LabelZoneFailureDomain] = + labels[v1.LabelZoneFailureDomain] = volumeutil.ZonesSetToLabelValue(zoneInfo.replicaZones) - labels[kubeletapis.LabelZoneRegion] = disk.Region + labels[v1.LabelZoneRegion] = disk.Region case nil: // Unexpected, but sanity-check return nil, fmt.Errorf("PD did not have ZoneInfo: %v", disk) diff --git a/pkg/cloudprovider/providers/gce/gce_disks_test.go b/pkg/cloudprovider/providers/gce/gce_disks_test.go index 6b45c86868..27531cb809 100644 --- a/pkg/cloudprovider/providers/gce/gce_disks_test.go +++ b/pkg/cloudprovider/providers/gce/gce_disks_test.go @@ -25,9 +25,9 @@ import ( computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" cloudprovider "k8s.io/cloud-provider" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) // TODO TODO write a test for GetDiskByNameUnknownZone and make sure casting logic works @@ -463,12 +463,12 @@ func TestGetAutoLabelsForPD_Basic(t *testing.T) { if err != nil { t.Error(err) } - if labels[kubeletapis.LabelZoneFailureDomain] != zone { + if labels[v1.LabelZoneFailureDomain] != zone { t.Errorf("Failure domain is '%v', but zone is '%v'", - labels[kubeletapis.LabelZoneFailureDomain], zone) + labels[v1.LabelZoneFailureDomain], zone) } - if labels[kubeletapis.LabelZoneRegion] != gceRegion { - t.Errorf("Region is '%v', but region is 'us-central1'", labels[kubeletapis.LabelZoneRegion]) + if labels[v1.LabelZoneRegion] != gceRegion { + t.Errorf("Region is '%v', but region is 'us-central1'", labels[v1.LabelZoneRegion]) } } @@ -499,12 +499,12 @@ func TestGetAutoLabelsForPD_NoZone(t *testing.T) { if err != nil { t.Error(err) } - if labels[kubeletapis.LabelZoneFailureDomain] != zone { + if labels[v1.LabelZoneFailureDomain] != zone { t.Errorf("Failure domain is '%v', but zone is '%v'", - labels[kubeletapis.LabelZoneFailureDomain], zone) + labels[v1.LabelZoneFailureDomain], zone) } - if labels[kubeletapis.LabelZoneRegion] != gceRegion { - t.Errorf("Region is '%v', but region is 'europe-west1'", labels[kubeletapis.LabelZoneRegion]) + if labels[v1.LabelZoneRegion] != gceRegion { + t.Errorf("Region is '%v', but region is 'europe-west1'", labels[v1.LabelZoneRegion]) } } @@ -585,12 +585,12 @@ func TestGetAutoLabelsForPD_DupDisk(t *testing.T) { if err != nil { t.Error("Disk name and zone uniquely identifies a disk, yet an error is returned.") } - if labels[kubeletapis.LabelZoneFailureDomain] != zone { + if labels[v1.LabelZoneFailureDomain] != zone { t.Errorf("Failure domain is '%v', but zone is '%v'", - labels[kubeletapis.LabelZoneFailureDomain], zone) + labels[v1.LabelZoneFailureDomain], zone) } - if labels[kubeletapis.LabelZoneRegion] != gceRegion { - t.Errorf("Region is '%v', but region is 'us-west1'", labels[kubeletapis.LabelZoneRegion]) + if labels[v1.LabelZoneRegion] != gceRegion { + t.Errorf("Region is '%v', but region is 'us-west1'", labels[v1.LabelZoneRegion]) } } diff --git a/pkg/cloudprovider/providers/gce/gce_instances.go b/pkg/cloudprovider/providers/gce/gce_instances.go index f93e0fff00..ef705fa808 100644 --- a/pkg/cloudprovider/providers/gce/gce_instances.go +++ b/pkg/cloudprovider/providers/gce/gce_instances.go @@ -37,7 +37,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" cloudprovider "k8s.io/cloud-provider" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) const ( @@ -60,7 +59,7 @@ func splitNodesByZone(nodes []*v1.Node) map[string][]*v1.Node { } func getZone(n *v1.Node) string { - zone, ok := n.Labels[kubeletapis.LabelZoneFailureDomain] + zone, ok := n.Labels[v1.LabelZoneFailureDomain] if !ok { return defaultZone } diff --git a/pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go b/pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go index 293d1e5309..87fcf1d5dc 100644 --- a/pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go +++ b/pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go @@ -36,7 +36,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" servicehelpers "k8s.io/cloud-provider/service/helpers" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) // TODO(yankaiz): Create shared error types for both test/non-test codes. @@ -99,8 +98,8 @@ func createAndInsertNodes(gce *Cloud, nodeNames []string, zoneName string) ([]*v ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ - kubeletapis.LabelHostname: name, - kubeletapis.LabelZoneFailureDomain: zoneName, + v1.LabelHostname: name, + v1.LabelZoneFailureDomain: zoneName, }, }, Status: v1.NodeStatus{ diff --git a/pkg/cloudprovider/providers/openstack/BUILD b/pkg/cloudprovider/providers/openstack/BUILD index 313a3b2723..db74a84eaa 100644 --- a/pkg/cloudprovider/providers/openstack/BUILD +++ b/pkg/cloudprovider/providers/openstack/BUILD @@ -20,7 +20,8 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack", deps = [ - "//pkg/kubelet/apis:go_default_library", + "//pkg/api/v1/service:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", diff --git a/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/pkg/cloudprovider/providers/openstack/openstack_volumes.go index fa4c4d5976..b6b0fc5976 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -30,7 +30,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" k8s_volume "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -715,8 +714,8 @@ func (os *OpenStack) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVo // Construct Volume Labels labels := make(map[string]string) - labels[kubeletapis.LabelZoneFailureDomain] = volume.AvailabilityZone - labels[kubeletapis.LabelZoneRegion] = os.region + labels[v1.LabelZoneFailureDomain] = volume.AvailabilityZone + labels[v1.LabelZoneRegion] = os.region klog.V(4).Infof("The Volume %s has labels %v", pv.Spec.Cinder.VolumeID, labels) return labels, nil diff --git a/pkg/controller/cloud/node_controller.go b/pkg/controller/cloud/node_controller.go index 312bb550be..781c3e7c34 100644 --- a/pkg/controller/cloud/node_controller.go +++ b/pkg/controller/cloud/node_controller.go @@ -284,8 +284,8 @@ func (cnc *CloudNodeController) initializeNode(node *v1.Node) { if instanceType, err := getInstanceTypeByProviderIDOrName(instances, curNode); err != nil { return err } else if instanceType != "" { - klog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType) - curNode.ObjectMeta.Labels[kubeletapis.LabelInstanceType] = instanceType + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceType, instanceType) + curNode.ObjectMeta.Labels[v1.LabelInstanceType] = instanceType } if zones, ok := cnc.cloud.Zones(); ok { @@ -294,12 +294,12 @@ func (cnc *CloudNodeController) initializeNode(node *v1.Node) { return fmt.Errorf("failed to get zone from cloud provider: %v", err) } if zone.FailureDomain != "" { - klog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain) - curNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = zone.FailureDomain + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomain, zone.FailureDomain) + curNode.ObjectMeta.Labels[v1.LabelZoneFailureDomain] = zone.FailureDomain } if zone.Region != "" { - klog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region) - curNode.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] = zone.Region + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegion, zone.Region) + curNode.ObjectMeta.Labels[v1.LabelZoneRegion] = zone.Region } } diff --git a/pkg/controller/cloud/node_controller_test.go b/pkg/controller/cloud/node_controller_test.go index e5ccc73642..f78b2f73c8 100644 --- a/pkg/controller/cloud/node_controller_test.go +++ b/pkg/controller/cloud/node_controller_test.go @@ -462,9 +462,9 @@ func TestZoneInitialized(t *testing.T) { assert.Equal(t, "node0", fnh.UpdatedNodes[0].Name, "Node was not updated") assert.Equal(t, 2, len(fnh.UpdatedNodes[0].ObjectMeta.Labels), "Node label for Region and Zone were not set") - assert.Equal(t, "us-west", fnh.UpdatedNodes[0].ObjectMeta.Labels[kubeletapis.LabelZoneRegion], + assert.Equal(t, "us-west", fnh.UpdatedNodes[0].ObjectMeta.Labels[v1.LabelZoneRegion], "Node Region not correctly updated") - assert.Equal(t, "us-west-1a", fnh.UpdatedNodes[0].ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain], + assert.Equal(t, "us-west-1a", fnh.UpdatedNodes[0].ObjectMeta.Labels[v1.LabelZoneFailureDomain], "Node FailureDomain not correctly updated") } diff --git a/pkg/controller/cloud/pvlcontroller.go b/pkg/controller/cloud/pvlcontroller.go index 4f05a7a81f..bf81539448 100644 --- a/pkg/controller/cloud/pvlcontroller.go +++ b/pkg/controller/cloud/pvlcontroller.go @@ -40,7 +40,6 @@ import ( cloudprovider "k8s.io/cloud-provider" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/controller" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -210,7 +209,7 @@ func (pvlc *PersistentVolumeLabelController) createPatch(vol *v1.PersistentVolum // Set NodeSelectorRequirements based on the labels if populateAffinity { var values []string - if k == kubeletapis.LabelZoneFailureDomain { + if k == v1.LabelZoneFailureDomain { zones, err := volumeutil.LabelZonesToSet(v) if err != nil { return nil, fmt.Errorf("failed to convert label string for Zone: %s to a Set", v) diff --git a/pkg/controller/cloud/pvlcontroller_test.go b/pkg/controller/cloud/pvlcontroller_test.go index 83fd0a7de2..280af04a95 100644 --- a/pkg/controller/cloud/pvlcontroller_test.go +++ b/pkg/controller/cloud/pvlcontroller_test.go @@ -29,7 +29,6 @@ import ( sets "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" volumeutil "k8s.io/kubernetes/pkg/volume/util" fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" @@ -176,7 +175,7 @@ func TestCreatePatch(t *testing.T) { { MatchExpressions: []v1.NodeSelectorRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Operator: v1.NodeSelectorOpIn, Values: []string{"1"}, }, @@ -191,7 +190,7 @@ func TestCreatePatch(t *testing.T) { { MatchExpressions: []v1.NodeSelectorRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Operator: v1.NodeSelectorOpIn, Values: []string{"1", "2", "3"}, }, @@ -306,7 +305,7 @@ func TestCreatePatch(t *testing.T) { Values: []string{"val3"}, }, { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Operator: v1.NodeSelectorOpIn, Values: []string{"1"}, }, @@ -320,7 +319,7 @@ func TestCreatePatch(t *testing.T) { Values: []string{"val4", "val5"}, }, { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Operator: v1.NodeSelectorOpIn, Values: []string{"1"}, }, @@ -345,7 +344,7 @@ func TestCreatePatch(t *testing.T) { Values: []string{"val3"}, }, { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Operator: v1.NodeSelectorOpIn, Values: []string{"1", "2", "3"}, }, @@ -359,7 +358,7 @@ func TestCreatePatch(t *testing.T) { Values: []string{"val5", "val4"}, }, { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Operator: v1.NodeSelectorOpIn, Values: []string{"3", "2", "1"}, }, @@ -407,22 +406,22 @@ func TestCreatePatch(t *testing.T) { }, "cloudprovider singlezone": { vol: awsPV, - labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "1"}, + labels: map[string]string{v1.LabelZoneFailureDomain: "1"}, expectedAffinity: &expectedAffinityZone1MergedWithAWSPV, }, "cloudprovider singlezone pre-existing affinity non-conflicting": { vol: awsPVWithAffinity, - labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "1"}, + labels: map[string]string{v1.LabelZoneFailureDomain: "1"}, expectedAffinity: &expectedAffinityZone1MergedWithAWSPVWithAffinity, }, "cloudprovider multizone": { vol: awsPV, - labels: map[string]string{kubeletapis.LabelZoneFailureDomain: volumeutil.ZonesSetToLabelValue(zones)}, + labels: map[string]string{v1.LabelZoneFailureDomain: volumeutil.ZonesSetToLabelValue(zones)}, expectedAffinity: &expectedAffinityZonesMergedWithAWSPV, }, "cloudprovider multizone pre-existing affinity non-conflicting": { vol: awsPVWithAffinity, - labels: map[string]string{kubeletapis.LabelZoneFailureDomain: volumeutil.ZonesSetToLabelValue(zones)}, + labels: map[string]string{v1.LabelZoneFailureDomain: volumeutil.ZonesSetToLabelValue(zones)}, expectedAffinity: &expectedAffinityZonesMergedWithAWSPVWithAffinity, }, } diff --git a/pkg/controller/daemon/util/BUILD b/pkg/controller/daemon/util/BUILD index 5d703265ed..d8b01615dd 100644 --- a/pkg/controller/daemon/util/BUILD +++ b/pkg/controller/daemon/util/BUILD @@ -40,7 +40,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/api:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", diff --git a/pkg/controller/daemon/util/daemonset_util_test.go b/pkg/controller/daemon/util/daemonset_util_test.go index 1ed864849c..68d26531db 100644 --- a/pkg/controller/daemon/util/daemonset_util_test.go +++ b/pkg/controller/daemon/util/daemonset_util_test.go @@ -27,7 +27,6 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/kubernetes/pkg/features" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" ) @@ -209,7 +208,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) { { MatchExpressions: []v1.NodeSelectorRequirement{ { - Key: kubeletapis.LabelHostname, + Key: v1.LabelHostname, Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }, @@ -246,7 +245,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) { Preference: v1.NodeSelectorTerm{ MatchExpressions: []v1.NodeSelectorRequirement{ { - Key: kubeletapis.LabelHostname, + Key: v1.LabelHostname, Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }, @@ -264,7 +263,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) { Preference: v1.NodeSelectorTerm{ MatchExpressions: []v1.NodeSelectorRequirement{ { - Key: kubeletapis.LabelHostname, + Key: v1.LabelHostname, Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }, diff --git a/pkg/controller/nodelifecycle/BUILD b/pkg/controller/nodelifecycle/BUILD index 015b150ea8..a7935ff790 100644 --- a/pkg/controller/nodelifecycle/BUILD +++ b/pkg/controller/nodelifecycle/BUILD @@ -73,7 +73,6 @@ go_test( "//pkg/controller/testutil:go_default_library", "//pkg/controller/util/node:go_default_library", "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/taints:go_default_library", diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go index 8158699119..a935a00d88 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go @@ -42,7 +42,6 @@ import ( "k8s.io/kubernetes/pkg/controller/testutil" nodeutil "k8s.io/kubernetes/pkg/controller/util/node" "k8s.io/kubernetes/pkg/features" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/pkg/util/node" taintutils "k8s.io/kubernetes/pkg/util/taints" @@ -175,8 +174,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) evictionTimeout := 10 * time.Minute labels := map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", } // Because of the logic that prevents NC from evicting anything when all Nodes are NotReady @@ -212,8 +211,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node0", CreationTimestamp: fakeNow, Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, }, @@ -222,8 +221,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -292,8 +291,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -312,8 +311,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -356,8 +355,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -376,8 +375,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -447,8 +446,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -467,8 +466,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -511,8 +510,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -531,8 +530,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -575,8 +574,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -595,8 +594,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -734,8 +733,8 @@ func TestPodStatusChange(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -754,8 +753,8 @@ func TestPodStatusChange(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -897,8 +896,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -917,8 +916,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -952,8 +951,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -972,8 +971,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region2", - kubeletapis.LabelZoneFailureDomain: "zone2", + v1.LabelZoneRegion: "region2", + v1.LabelZoneFailureDomain: "zone2", }, }, Status: v1.NodeStatus{ @@ -1014,8 +1013,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1034,8 +1033,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone2", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone2", }, }, Status: v1.NodeStatus{ @@ -1075,8 +1074,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1095,8 +1094,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node-master", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1134,8 +1133,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1154,8 +1153,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone2", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone2", }, }, Status: v1.NodeStatus{ @@ -1196,8 +1195,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1216,8 +1215,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1236,8 +1235,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node2", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1256,8 +1255,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node3", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1276,8 +1275,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node4", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2366,8 +2365,8 @@ func TestApplyNoExecuteTaints(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2388,8 +2387,8 @@ func TestApplyNoExecuteTaints(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2409,8 +2408,8 @@ func TestApplyNoExecuteTaints(t *testing.T) { Name: "node2", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2512,8 +2511,8 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2535,8 +2534,8 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2660,8 +2659,8 @@ func TestTaintsNodeByCondition(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2718,8 +2717,8 @@ func TestTaintsNodeByCondition(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2748,8 +2747,8 @@ func TestTaintsNodeByCondition(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2778,8 +2777,8 @@ func TestTaintsNodeByCondition(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2802,8 +2801,8 @@ func TestTaintsNodeByCondition(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region1", - kubeletapis.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index 80b7ec96a1..4f2779ed86 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -42,7 +42,6 @@ go_library( "//pkg/capabilities:go_default_library", "//pkg/features:go_default_library", "//pkg/fieldpath:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis/config:go_default_library", "//pkg/kubelet/apis/cri:go_default_library", "//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library", @@ -178,7 +177,6 @@ go_test( "//pkg/apis/core/install:go_default_library", "//pkg/capabilities:go_default_library", "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/cadvisor/testing:go_default_library", "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/config:go_default_library", diff --git a/pkg/kubelet/apis/BUILD b/pkg/kubelet/apis/BUILD index 926c8add6c..e1bfa430ca 100644 --- a/pkg/kubelet/apis/BUILD +++ b/pkg/kubelet/apis/BUILD @@ -14,6 +14,7 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/kubelet/apis", deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:windows": [ diff --git a/pkg/kubelet/apis/well_known_labels.go b/pkg/kubelet/apis/well_known_labels.go index 117b97d05f..bdc14c5130 100644 --- a/pkg/kubelet/apis/well_known_labels.go +++ b/pkg/kubelet/apis/well_known_labels.go @@ -19,54 +19,32 @@ package apis import ( "strings" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" ) const ( - LabelHostname = "kubernetes.io/hostname" - LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" - LabelMultiZoneDelimiter = "__" - LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" - - LabelInstanceType = "beta.kubernetes.io/instance-type" - - LabelOS = "kubernetes.io/os" - LabelArch = "kubernetes.io/arch" - // The OS/Arch labels are promoted to GA in 1.14. kubelet applies both beta - // and GA labels to ensure backward compatibility. - // TODO: stop applying the beta OS/Arch labels in Kubernetes 1.17. - LegacyLabelOS = "beta.kubernetes.io/os" - LegacyLabelArch = "beta.kubernetes.io/arch" - // GA versions of the legacy beta labels. // TODO: update kubelet and controllers to set both beta and GA labels, then export these constants labelZoneFailureDomainGA = "failure-domain.kubernetes.io/zone" labelZoneRegionGA = "failure-domain.kubernetes.io/region" labelInstanceTypeGA = "kubernetes.io/instance-type" - - // LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*) - LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io" - // LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*) - LabelNamespaceSuffixNode = "node.kubernetes.io" - - // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled - LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io" ) // When the --failure-domains scheduler flag is not specified, // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. -var DefaultFailureDomains string = LabelHostname + "," + LabelZoneFailureDomain + "," + LabelZoneRegion +var DefaultFailureDomains string = v1.LabelHostname + "," + v1.LabelZoneFailureDomain + "," + v1.LabelZoneRegion var kubeletLabels = sets.NewString( - LabelHostname, - LabelZoneFailureDomain, - LabelZoneRegion, - LabelInstanceType, - LabelOS, - LabelArch, + v1.LabelHostname, + v1.LabelZoneFailureDomain, + v1.LabelZoneRegion, + v1.LabelInstanceType, + v1.LabelOS, + v1.LabelArch, - LegacyLabelOS, - LegacyLabelArch, + v1.LegacyLabelOS, + v1.LegacyLabelArch, labelZoneFailureDomainGA, labelZoneRegionGA, @@ -74,8 +52,8 @@ var kubeletLabels = sets.NewString( ) var kubeletLabelNamespaces = sets.NewString( - LabelNamespaceSuffixKubelet, - LabelNamespaceSuffixNode, + v1.LabelNamespaceSuffixKubelet, + v1.LabelNamespaceSuffixNode, ) // KubeletLabels returns the list of label keys kubelets are allowed to set on their own Node objects diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index bc86e4d006..e584fc126a 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -37,7 +37,6 @@ import ( k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/nodestatus" "k8s.io/kubernetes/pkg/kubelet/util" @@ -145,14 +144,14 @@ func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool { // updateDefaultLabels will set the default labels on the node func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool { defaultLabels := []string{ - kubeletapis.LabelHostname, - kubeletapis.LabelZoneFailureDomain, - kubeletapis.LabelZoneRegion, - kubeletapis.LabelInstanceType, - kubeletapis.LabelOS, - kubeletapis.LabelArch, - kubeletapis.LegacyLabelOS, - kubeletapis.LegacyLabelArch, + v1.LabelHostname, + v1.LabelZoneFailureDomain, + v1.LabelZoneRegion, + v1.LabelInstanceType, + v1.LabelOS, + v1.LabelArch, + v1.LegacyLabelOS, + v1.LegacyLabelArch, } needsUpdate := false @@ -215,11 +214,11 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { ObjectMeta: metav1.ObjectMeta{ Name: string(kl.nodeName), Labels: map[string]string{ - kubeletapis.LabelHostname: kl.hostname, - kubeletapis.LabelOS: goruntime.GOOS, - kubeletapis.LabelArch: goruntime.GOARCH, - kubeletapis.LegacyLabelOS: goruntime.GOOS, - kubeletapis.LegacyLabelArch: goruntime.GOARCH, + v1.LabelHostname: kl.hostname, + v1.LabelOS: goruntime.GOOS, + v1.LabelArch: goruntime.GOARCH, + v1.LegacyLabelOS: goruntime.GOOS, + v1.LegacyLabelArch: goruntime.GOARCH, }, }, Spec: v1.NodeSpec{ @@ -327,8 +326,8 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { return nil, err } if instanceType != "" { - klog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType) - node.ObjectMeta.Labels[kubeletapis.LabelInstanceType] = instanceType + klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceType, instanceType) + node.ObjectMeta.Labels[v1.LabelInstanceType] = instanceType } // If the cloud has zone information, label the node with the zone information zones, ok := kl.cloud.Zones() @@ -338,12 +337,12 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err) } if zone.FailureDomain != "" { - klog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain) - node.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = zone.FailureDomain + klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomain, zone.FailureDomain) + node.ObjectMeta.Labels[v1.LabelZoneFailureDomain] = zone.FailureDomain } if zone.Region != "" { - klog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region) - node.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] = zone.Region + klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegion, zone.Region) + node.ObjectMeta.Labels[v1.LabelZoneRegion] = zone.Region } } } diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index ceb76205aa..75a43b513f 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -49,7 +49,6 @@ import ( "k8s.io/client-go/rest" core "k8s.io/client-go/testing" "k8s.io/kubernetes/pkg/features" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -1046,11 +1045,11 @@ func TestRegisterWithApiServer(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: testKubeletHostname, Labels: map[string]string{ - kubeletapis.LabelHostname: testKubeletHostname, - kubeletapis.LabelOS: goruntime.GOOS, - kubeletapis.LabelArch: goruntime.GOARCH, - kubeletapis.LegacyLabelOS: goruntime.GOOS, - kubeletapis.LegacyLabelArch: goruntime.GOARCH, + v1.LabelHostname: testKubeletHostname, + v1.LabelOS: goruntime.GOOS, + v1.LabelArch: goruntime.GOARCH, + v1.LegacyLabelOS: goruntime.GOOS, + v1.LegacyLabelArch: goruntime.GOARCH, }, }, }, nil @@ -1093,11 +1092,11 @@ func TestTryRegisterWithApiServer(t *testing.T) { node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - kubeletapis.LabelHostname: testKubeletHostname, - kubeletapis.LabelOS: goruntime.GOOS, - kubeletapis.LabelArch: goruntime.GOARCH, - kubeletapis.LegacyLabelOS: goruntime.GOOS, - kubeletapis.LegacyLabelArch: goruntime.GOARCH, + v1.LabelHostname: testKubeletHostname, + v1.LabelOS: goruntime.GOOS, + v1.LabelArch: goruntime.GOARCH, + v1.LegacyLabelOS: goruntime.GOOS, + v1.LegacyLabelArch: goruntime.GOARCH, }, }, } @@ -1325,12 +1324,12 @@ func TestUpdateDefaultLabels(t *testing.T) { initialNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", }, }, }, @@ -1341,12 +1340,12 @@ func TestUpdateDefaultLabels(t *testing.T) { }, needsUpdate: true, finalLabels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", }, }, { @@ -1354,35 +1353,35 @@ func TestUpdateDefaultLabels(t *testing.T) { initialNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", }, }, }, existingNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - kubeletapis.LabelHostname: "old-hostname", - kubeletapis.LabelZoneFailureDomain: "old-zone-failure-domain", - kubeletapis.LabelZoneRegion: "old-zone-region", - kubeletapis.LabelInstanceType: "old-instance-type", - kubeletapis.LabelOS: "old-os", - kubeletapis.LabelArch: "old-arch", + v1.LabelHostname: "old-hostname", + v1.LabelZoneFailureDomain: "old-zone-failure-domain", + v1.LabelZoneRegion: "old-zone-region", + v1.LabelInstanceType: "old-instance-type", + v1.LabelOS: "old-os", + v1.LabelArch: "old-arch", }, }, }, needsUpdate: true, finalLabels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", }, }, { @@ -1390,37 +1389,37 @@ func TestUpdateDefaultLabels(t *testing.T) { initialNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", }, }, }, existingNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", - "please-persist": "foo", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", + "please-persist": "foo", }, }, }, needsUpdate: false, finalLabels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", - "please-persist": "foo", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", + "please-persist": "foo", }, }, { @@ -1433,25 +1432,25 @@ func TestUpdateDefaultLabels(t *testing.T) { existingNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", - "please-persist": "foo", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", + "please-persist": "foo", }, }, }, needsUpdate: false, finalLabels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", - "please-persist": "foo", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", + "please-persist": "foo", }, }, { @@ -1459,35 +1458,35 @@ func TestUpdateDefaultLabels(t *testing.T) { initialNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", }, }, }, existingNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", }, }, }, needsUpdate: false, finalLabels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", }, }, { @@ -1495,12 +1494,12 @@ func TestUpdateDefaultLabels(t *testing.T) { initialNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", }, }, }, @@ -1509,12 +1508,12 @@ func TestUpdateDefaultLabels(t *testing.T) { }, needsUpdate: true, finalLabels: map[string]string{ - kubeletapis.LabelHostname: "new-hostname", - kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", - kubeletapis.LabelZoneRegion: "new-zone-region", - kubeletapis.LabelInstanceType: "new-instance-type", - kubeletapis.LabelOS: "new-os", - kubeletapis.LabelArch: "new-arch", + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + v1.LabelOS: "new-os", + v1.LabelArch: "new-arch", }, }, } diff --git a/pkg/scheduler/BUILD b/pkg/scheduler/BUILD index 5a587510e6..648ed2f9cf 100644 --- a/pkg/scheduler/BUILD +++ b/pkg/scheduler/BUILD @@ -10,7 +10,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/scheduler", visibility = ["//visibility:public"], deps = [ - "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/api:go_default_library", diff --git a/pkg/scheduler/algorithm/predicates/BUILD b/pkg/scheduler/algorithm/predicates/BUILD index 4f979a11c7..778ec193fb 100644 --- a/pkg/scheduler/algorithm/predicates/BUILD +++ b/pkg/scheduler/algorithm/predicates/BUILD @@ -21,7 +21,6 @@ go_library( "//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper/qos:go_default_library", "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/api:go_default_library", @@ -58,7 +57,6 @@ go_test( deps = [ "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/testing:go_default_library", diff --git a/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go b/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go index b734ddd68e..4da8962ad0 100644 --- a/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go +++ b/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go @@ -29,7 +29,6 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/kubernetes/pkg/features" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -875,7 +874,7 @@ func TestMaxVolumeFuncM5(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "node-for-m5-instance", Labels: map[string]string{ - kubeletapis.LabelInstanceType: "m5.large", + v1.LabelInstanceType: "m5.large", }, }, } @@ -892,7 +891,7 @@ func TestMaxVolumeFuncT3(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "node-for-t3-instance", Labels: map[string]string{ - kubeletapis.LabelInstanceType: "t3.medium", + v1.LabelInstanceType: "t3.medium", }, }, } @@ -909,7 +908,7 @@ func TestMaxVolumeFuncR5(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "node-for-r5-instance", Labels: map[string]string{ - kubeletapis.LabelInstanceType: "r5d.xlarge", + v1.LabelInstanceType: "r5d.xlarge", }, }, } @@ -926,7 +925,7 @@ func TestMaxVolumeFuncM4(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "node-for-m4-instance", Labels: map[string]string{ - kubeletapis.LabelInstanceType: "m4.2xlarge", + v1.LabelInstanceType: "m4.2xlarge", }, }, } diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index 61967923ef..f0fc2807db 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -39,7 +39,6 @@ import ( v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/features" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/scheduler/algorithm" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" @@ -366,7 +365,7 @@ func getMaxVolumeFunc(filterName string) func(node *v1.Node) int { var nodeInstanceType string for k, v := range node.ObjectMeta.Labels { - if k == kubeletapis.LabelInstanceType { + if k == v1.LabelInstanceType { nodeInstanceType = v } } @@ -629,7 +628,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeI nodeConstraints := make(map[string]string) for k, v := range node.ObjectMeta.Labels { - if k != kubeletapis.LabelZoneFailureDomain && k != kubeletapis.LabelZoneRegion { + if k != v1.LabelZoneFailureDomain && k != v1.LabelZoneRegion { continue } nodeConstraints[k] = v @@ -688,7 +687,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeI } for k, v := range pv.ObjectMeta.Labels { - if k != kubeletapis.LabelZoneFailureDomain && k != kubeletapis.LabelZoneRegion { + if k != v1.LabelZoneFailureDomain && k != v1.LabelZoneRegion { continue } nodeV, _ := nodeConstraints[k] diff --git a/pkg/scheduler/algorithm/predicates/predicates_test.go b/pkg/scheduler/algorithm/predicates/predicates_test.go index 0a9305aa8e..4fe557fa14 100644 --- a/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -30,7 +30,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" @@ -4565,13 +4564,13 @@ func createPodWithVolume(pod, pv, pvc string) *v1.Pod { func TestVolumeZonePredicate(t *testing.T) { pvInfo := FakePersistentVolumeInfo{ { - ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "us-west1-a"}}, + ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}}, }, { - ObjectMeta: metav1.ObjectMeta{Name: "Vol_2", Labels: map[string]string{kubeletapis.LabelZoneRegion: "us-west1-b", "uselessLabel": "none"}}, + ObjectMeta: metav1.ObjectMeta{Name: "Vol_2", Labels: map[string]string{v1.LabelZoneRegion: "us-west1-b", "uselessLabel": "none"}}, }, { - ObjectMeta: metav1.ObjectMeta{Name: "Vol_3", Labels: map[string]string{kubeletapis.LabelZoneRegion: "us-west1-c"}}, + ObjectMeta: metav1.ObjectMeta{Name: "Vol_3", Labels: map[string]string{v1.LabelZoneRegion: "us-west1-c"}}, }, } @@ -4608,7 +4607,7 @@ func TestVolumeZonePredicate(t *testing.T) { Node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "host1", - Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "us-west1-a"}, + Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}, }, }, Fits: true, @@ -4629,7 +4628,7 @@ func TestVolumeZonePredicate(t *testing.T) { Node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "host1", - Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "us-west1-a", "uselessLabel": "none"}, + Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a", "uselessLabel": "none"}, }, }, Fits: true, @@ -4640,7 +4639,7 @@ func TestVolumeZonePredicate(t *testing.T) { Node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "host1", - Labels: map[string]string{kubeletapis.LabelZoneRegion: "us-west1-b", "uselessLabel": "none"}, + Labels: map[string]string{v1.LabelZoneRegion: "us-west1-b", "uselessLabel": "none"}, }, }, Fits: true, @@ -4651,7 +4650,7 @@ func TestVolumeZonePredicate(t *testing.T) { Node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "host1", - Labels: map[string]string{kubeletapis.LabelZoneRegion: "no_us-west1-b", "uselessLabel": "none"}, + Labels: map[string]string{v1.LabelZoneRegion: "no_us-west1-b", "uselessLabel": "none"}, }, }, Fits: false, @@ -4662,7 +4661,7 @@ func TestVolumeZonePredicate(t *testing.T) { Node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "host1", - Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "no_us-west1-a", "uselessLabel": "none"}, + Labels: map[string]string{v1.LabelZoneFailureDomain: "no_us-west1-a", "uselessLabel": "none"}, }, }, Fits: false, @@ -4694,13 +4693,13 @@ func TestVolumeZonePredicate(t *testing.T) { func TestVolumeZonePredicateMultiZone(t *testing.T) { pvInfo := FakePersistentVolumeInfo{ { - ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "us-west1-a"}}, + ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}}, }, { - ObjectMeta: metav1.ObjectMeta{Name: "Vol_2", Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "us-west1-b", "uselessLabel": "none"}}, + ObjectMeta: metav1.ObjectMeta{Name: "Vol_2", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-b", "uselessLabel": "none"}}, }, { - ObjectMeta: metav1.ObjectMeta{Name: "Vol_3", Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "us-west1-c__us-west1-a"}}, + ObjectMeta: metav1.ObjectMeta{Name: "Vol_3", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-c__us-west1-a"}}, }, } @@ -4745,7 +4744,7 @@ func TestVolumeZonePredicateMultiZone(t *testing.T) { Node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "host1", - Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "us-west1-a", "uselessLabel": "none"}, + Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a", "uselessLabel": "none"}, }, }, Fits: true, @@ -4756,7 +4755,7 @@ func TestVolumeZonePredicateMultiZone(t *testing.T) { Node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "host1", - Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "us-west1-b", "uselessLabel": "none"}, + Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-b", "uselessLabel": "none"}, }, }, Fits: false, @@ -4806,7 +4805,7 @@ func TestVolumeZonePredicateWithVolumeBinding(t *testing.T) { pvInfo := FakePersistentVolumeInfo{ { - ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "us-west1-a"}}, + ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}}, }, } @@ -4835,7 +4834,7 @@ func TestVolumeZonePredicateWithVolumeBinding(t *testing.T) { testNode := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "host1", - Labels: map[string]string{kubeletapis.LabelZoneFailureDomain: "us-west1-a", "uselessLabel": "none"}, + Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a", "uselessLabel": "none"}, }, } diff --git a/pkg/scheduler/algorithm/priorities/BUILD b/pkg/scheduler/algorithm/priorities/BUILD index 5050bba242..ffdac256a9 100644 --- a/pkg/scheduler/algorithm/priorities/BUILD +++ b/pkg/scheduler/algorithm/priorities/BUILD @@ -71,7 +71,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/api:go_default_library", diff --git a/pkg/scheduler/algorithm/priorities/selector_spreading_test.go b/pkg/scheduler/algorithm/priorities/selector_spreading_test.go index eacc7bd768..173fd0bf4c 100644 --- a/pkg/scheduler/algorithm/priorities/selector_spreading_test.go +++ b/pkg/scheduler/algorithm/priorities/selector_spreading_test.go @@ -24,7 +24,6 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" @@ -392,7 +391,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { buildNodeLabels := func(failureDomain string) map[string]string { labels := map[string]string{ - kubeletapis.LabelZoneFailureDomain: failureDomain, + v1.LabelZoneFailureDomain: failureDomain, } return labels } diff --git a/pkg/scheduler/eventhandlers.go b/pkg/scheduler/eventhandlers.go index 5c00f04b9d..e2d504f8d5 100644 --- a/pkg/scheduler/eventhandlers.go +++ b/pkg/scheduler/eventhandlers.go @@ -29,7 +29,6 @@ import ( policyinformers "k8s.io/client-go/informers/policy/v1beta1" storageinformers "k8s.io/client-go/informers/storage/v1" "k8s.io/client-go/tools/cache" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) func (sched *Scheduler) onPvAdd(obj interface{}) { @@ -270,11 +269,6 @@ func responsibleForPod(pod *v1.Pod, schedulerName string) bool { return schedulerName == pod.Spec.SchedulerName } -// isZoneRegionLabel check if given key of label is zone or region label. -func isZoneRegionLabel(k string) bool { - return k == kubeletapis.LabelZoneFailureDomain || k == kubeletapis.LabelZoneRegion -} - // skipPodUpdate checks whether the specified pod update should be ignored. // This function will return true if // - The pod has already been assumed, AND diff --git a/pkg/scheduler/internal/cache/BUILD b/pkg/scheduler/internal/cache/BUILD index 0f710594d2..ec7d7f756f 100644 --- a/pkg/scheduler/internal/cache/BUILD +++ b/pkg/scheduler/internal/cache/BUILD @@ -32,7 +32,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/scheduler/internal/cache/node_tree_test.go b/pkg/scheduler/internal/cache/node_tree_test.go index 9b4371589f..e8cb35ba78 100644 --- a/pkg/scheduler/internal/cache/node_tree_test.go +++ b/pkg/scheduler/internal/cache/node_tree_test.go @@ -22,7 +22,6 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) var allNodes = []*v1.Node{ @@ -37,7 +36,7 @@ var allNodes = []*v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node-1", Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region-1", + v1.LabelZoneRegion: "region-1", }, }, }, @@ -46,7 +45,7 @@ var allNodes = []*v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node-2", Labels: map[string]string{ - kubeletapis.LabelZoneFailureDomain: "zone-2", + v1.LabelZoneFailureDomain: "zone-2", }, }, }, @@ -55,8 +54,8 @@ var allNodes = []*v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node-3", Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region-1", - kubeletapis.LabelZoneFailureDomain: "zone-2", + v1.LabelZoneRegion: "region-1", + v1.LabelZoneFailureDomain: "zone-2", }, }, }, @@ -65,8 +64,8 @@ var allNodes = []*v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node-4", Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region-1", - kubeletapis.LabelZoneFailureDomain: "zone-2", + v1.LabelZoneRegion: "region-1", + v1.LabelZoneFailureDomain: "zone-2", }, }, }, @@ -75,8 +74,8 @@ var allNodes = []*v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node-5", Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region-1", - kubeletapis.LabelZoneFailureDomain: "zone-3", + v1.LabelZoneRegion: "region-1", + v1.LabelZoneFailureDomain: "zone-3", }, }, }, @@ -85,8 +84,8 @@ var allNodes = []*v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node-6", Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region-2", - kubeletapis.LabelZoneFailureDomain: "zone-2", + v1.LabelZoneRegion: "region-2", + v1.LabelZoneFailureDomain: "zone-2", }, }, }, @@ -95,8 +94,8 @@ var allNodes = []*v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node-7", Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region-2", - kubeletapis.LabelZoneFailureDomain: "zone-2", + v1.LabelZoneRegion: "region-2", + v1.LabelZoneFailureDomain: "zone-2", }, }, }, @@ -105,8 +104,8 @@ var allNodes = []*v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node-8", Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region-2", - kubeletapis.LabelZoneFailureDomain: "zone-2", + v1.LabelZoneRegion: "region-2", + v1.LabelZoneFailureDomain: "zone-2", }, }, }} @@ -252,8 +251,8 @@ func TestNodeTree_UpdateNode(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "node-0", Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region-1", - kubeletapis.LabelZoneFailureDomain: "zone-2", + v1.LabelZoneRegion: "region-1", + v1.LabelZoneFailureDomain: "zone-2", }, }, }, @@ -272,8 +271,8 @@ func TestNodeTree_UpdateNode(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "node-0", Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region-1", - kubeletapis.LabelZoneFailureDomain: "zone-2", + v1.LabelZoneRegion: "region-1", + v1.LabelZoneFailureDomain: "zone-2", }, }, }, @@ -288,8 +287,8 @@ func TestNodeTree_UpdateNode(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "node-new", Labels: map[string]string{ - kubeletapis.LabelZoneRegion: "region-1", - kubeletapis.LabelZoneFailureDomain: "zone-2", + v1.LabelZoneRegion: "region-1", + v1.LabelZoneFailureDomain: "zone-2", }, }, }, diff --git a/pkg/util/node/BUILD b/pkg/util/node/BUILD index 7e1696cefe..08692c148a 100644 --- a/pkg/util/node/BUILD +++ b/pkg/util/node/BUILD @@ -11,7 +11,6 @@ go_library( srcs = ["node.go"], importpath = "k8s.io/kubernetes/pkg/util/node", deps = [ - "//pkg/kubelet/apis:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", @@ -27,7 +26,6 @@ go_test( srcs = ["node_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/kubelet/apis:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/pkg/util/node/node.go b/pkg/util/node/node.go index ce9ce636a9..087a0bc82d 100644 --- a/pkg/util/node/node.go +++ b/pkg/util/node/node.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) const ( @@ -130,8 +129,8 @@ func GetZoneKey(node *v1.Node) string { return "" } - region, _ := labels[kubeletapis.LabelZoneRegion] - failureDomain, _ := labels[kubeletapis.LabelZoneFailureDomain] + region, _ := labels[v1.LabelZoneRegion] + failureDomain, _ := labels[v1.LabelZoneFailureDomain] if region == "" && failureDomain == "" { return "" diff --git a/pkg/util/node/node_test.go b/pkg/util/node/node_test.go index 964bf73279..2d7d2d6277 100644 --- a/pkg/util/node/node_test.go +++ b/pkg/util/node/node_test.go @@ -21,7 +21,6 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) func TestGetPreferredAddress(t *testing.T) { @@ -53,7 +52,7 @@ func TestGetPreferredAddress(t *testing.T) { ExpectAddress: "1.2.3.5", }, "found hostname address": { - Labels: map[string]string{kubeletapis.LabelHostname: "label-hostname"}, + Labels: map[string]string{v1.LabelHostname: "label-hostname"}, Addresses: []v1.NodeAddress{ {Type: v1.NodeExternalIP, Address: "1.2.3.5"}, {Type: v1.NodeHostName, Address: "status-hostname"}, @@ -62,7 +61,7 @@ func TestGetPreferredAddress(t *testing.T) { ExpectAddress: "status-hostname", }, "label address ignored": { - Labels: map[string]string{kubeletapis.LabelHostname: "label-hostname"}, + Labels: map[string]string{v1.LabelHostname: "label-hostname"}, Addresses: []v1.NodeAddress{ {Type: v1.NodeExternalIP, Address: "1.2.3.5"}, }, diff --git a/pkg/volume/azure_dd/BUILD b/pkg/volume/azure_dd/BUILD index 74dd5b980a..8888651b9c 100644 --- a/pkg/volume/azure_dd/BUILD +++ b/pkg/volume/azure_dd/BUILD @@ -24,7 +24,6 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", diff --git a/pkg/volume/azure_dd/azure_provision.go b/pkg/volume/azure_dd/azure_provision.go index c915f7b47c..15f543469a 100644 --- a/pkg/volume/azure_dd/azure_provision.go +++ b/pkg/volume/azure_dd/azure_provision.go @@ -30,7 +30,6 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" "k8s.io/kubernetes/pkg/features" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) @@ -333,12 +332,12 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie for i := 0; i < 3; i++ { requirements := []v1.NodeSelectorRequirement{ { - Key: kubeletapis.LabelZoneRegion, + Key: v1.LabelZoneRegion, Operator: v1.NodeSelectorOpIn, Values: []string{diskController.GetLocation()}, }, { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Operator: v1.NodeSelectorOpIn, Values: []string{strconv.Itoa(i)}, }, diff --git a/pkg/volume/cinder/BUILD b/pkg/volume/cinder/BUILD index 8b99eb6451..a594d03866 100644 --- a/pkg/volume/cinder/BUILD +++ b/pkg/volume/cinder/BUILD @@ -19,7 +19,6 @@ go_library( deps = [ "//pkg/cloudprovider/providers/openstack:go_default_library", "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", @@ -49,7 +48,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", diff --git a/pkg/volume/cinder/cinder_test.go b/pkg/volume/cinder/cinder_test.go index 7a4a526677..df1b08f883 100644 --- a/pkg/volume/cinder/cinder_test.go +++ b/pkg/volume/cinder/cinder_test.go @@ -26,7 +26,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utiltesting "k8s.io/client-go/util/testing" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -119,7 +118,7 @@ func (fake *fakePDManager) DetachDisk(c *cinderVolumeUnmounter) error { func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) { labels = make(map[string]string) - labels[kubeletapis.LabelZoneFailureDomain] = "nova" + labels[v1.LabelZoneFailureDomain] = "nova" return "test-volume-name", 1, labels, "", nil } @@ -234,7 +233,7 @@ func TestPlugin(t *testing.T) { req := persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0] - if req.Key != kubeletapis.LabelZoneFailureDomain { + if req.Key != v1.LabelZoneFailureDomain { t.Errorf("Provision() returned unexpected requirement key in NodeAffinity %v", req.Key) } diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go index 57c5254e80..c082dad0b4 100644 --- a/pkg/volume/cinder/cinder_util.go +++ b/pkg/volume/cinder/cinder_util.go @@ -30,7 +30,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/utils/exec" @@ -153,7 +152,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) { return zones, err } for _, node := range nodes.Items { - if zone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]; ok { + if zone, ok := node.Labels[v1.LabelZoneFailureDomain]; ok { zones.Insert(zone) } } @@ -226,10 +225,10 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, al volumeLabels = make(map[string]string) if IgnoreVolumeAZ == false { if volumeAZ != "" { - volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ + volumeLabels[v1.LabelZoneFailureDomain] = volumeAZ } if volumeRegion != "" { - volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion + volumeLabels[v1.LabelZoneRegion] = volumeRegion } } return volumeID, volSizeGiB, volumeLabels, fstype, nil diff --git a/pkg/volume/gcepd/BUILD b/pkg/volume/gcepd/BUILD index f4ff333f4e..04ba262e9d 100644 --- a/pkg/volume/gcepd/BUILD +++ b/pkg/volume/gcepd/BUILD @@ -19,7 +19,6 @@ go_library( deps = [ "//pkg/cloudprovider/providers/gce:go_default_library", "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", @@ -49,7 +48,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", diff --git a/pkg/volume/gcepd/attacher_test.go b/pkg/volume/gcepd/attacher_test.go index 6f9ab41c82..929157e0c0 100644 --- a/pkg/volume/gcepd/attacher_test.go +++ b/pkg/volume/gcepd/attacher_test.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/klog" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "strings" ) @@ -284,9 +283,9 @@ func createPVSpec(name string, readOnly bool, zones []string) *volume.Spec { } if zones != nil { - zonesLabel := strings.Join(zones, kubeletapis.LabelMultiZoneDelimiter) + zonesLabel := strings.Join(zones, v1.LabelMultiZoneDelimiter) spec.PersistentVolume.ObjectMeta.Labels = map[string]string{ - kubeletapis.LabelZoneFailureDomain: zonesLabel, + v1.LabelZoneFailureDomain: zonesLabel, } } diff --git a/pkg/volume/gcepd/gce_pd.go b/pkg/volume/gcepd/gce_pd.go index bf1bf9ccb4..1ed7661208 100644 --- a/pkg/volume/gcepd/gce_pd.go +++ b/pkg/volume/gcepd/gce_pd.go @@ -32,7 +32,6 @@ import ( "k8s.io/klog" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/features" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -544,7 +543,7 @@ func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedT for k, v := range labels { pv.Labels[k] = v var values []string - if k == kubeletapis.LabelZoneFailureDomain { + if k == v1.LabelZoneFailureDomain { values, err = util.LabelZonesToList(v) if err != nil { return nil, fmt.Errorf("failed to convert label string for Zone: %s to a List: %v", v, err) diff --git a/pkg/volume/gcepd/gce_pd_test.go b/pkg/volume/gcepd/gce_pd_test.go index a3200de579..41f1b43c9e 100644 --- a/pkg/volume/gcepd/gce_pd_test.go +++ b/pkg/volume/gcepd/gce_pd_test.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/fake" utiltesting "k8s.io/client-go/util/testing" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -85,7 +84,7 @@ type fakePDManager struct { func (fake *fakePDManager) CreateVolume(c *gcePersistentDiskProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) { labels = make(map[string]string) labels["fakepdmanager"] = "yes" - labels[kubeletapis.LabelZoneFailureDomain] = "zone1__zone2" + labels[v1.LabelZoneFailureDomain] = "zone1__zone2" return "test-gce-volume-name", 100, labels, "", nil } @@ -199,8 +198,8 @@ func TestPlugin(t *testing.T) { t.Errorf("Provision() returned unexpected value for fakepdmanager: %v", persistentSpec.Labels["fakepdmanager"]) } - if persistentSpec.Labels[kubeletapis.LabelZoneFailureDomain] != "zone1__zone2" { - t.Errorf("Provision() returned unexpected value for %s: %v", kubeletapis.LabelZoneFailureDomain, persistentSpec.Labels[kubeletapis.LabelZoneFailureDomain]) + if persistentSpec.Labels[v1.LabelZoneFailureDomain] != "zone1__zone2" { + t.Errorf("Provision() returned unexpected value for %s: %v", v1.LabelZoneFailureDomain, persistentSpec.Labels[v1.LabelZoneFailureDomain]) } if persistentSpec.Spec.NodeAffinity == nil { @@ -218,9 +217,9 @@ func TestPlugin(t *testing.T) { t.Errorf("NodeSelectorRequirement fakepdmanager-in-yes not found in volume NodeAffinity") } zones, _ := volumeutil.ZonesToSet("zone1,zone2") - r, _ = getNodeSelectorRequirementWithKey(kubeletapis.LabelZoneFailureDomain, term) + r, _ = getNodeSelectorRequirementWithKey(v1.LabelZoneFailureDomain, term) if r == nil { - t.Errorf("NodeSelectorRequirement %s-in-%v not found in volume NodeAffinity", kubeletapis.LabelZoneFailureDomain, zones) + t.Errorf("NodeSelectorRequirement %s-in-%v not found in volume NodeAffinity", v1.LabelZoneFailureDomain, zones) } sort.Strings(r.Values) if !reflect.DeepEqual(r.Values, zones.List()) { diff --git a/pkg/volume/gcepd/gce_util.go b/pkg/volume/gcepd/gce_util.go index bb5ff8c844..876bf201e9 100644 --- a/pkg/volume/gcepd/gce_util.go +++ b/pkg/volume/gcepd/gce_util.go @@ -31,7 +31,6 @@ import ( cloudfeatures "k8s.io/cloud-provider/features" "k8s.io/klog" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -354,8 +353,8 @@ func udevadmChangeToDrive(drivePath string) error { // Checks whether the given GCE PD volume spec is associated with a regional PD. func isRegionalPD(spec *volume.Spec) bool { if spec.PersistentVolume != nil { - zonesLabel := spec.PersistentVolume.Labels[kubeletapis.LabelZoneFailureDomain] - zones := strings.Split(zonesLabel, kubeletapis.LabelMultiZoneDelimiter) + zonesLabel := spec.PersistentVolume.Labels[v1.LabelZoneFailureDomain] + zones := strings.Split(zonesLabel, v1.LabelMultiZoneDelimiter) return len(zones) > 1 } return false diff --git a/pkg/volume/util/BUILD b/pkg/volume/util/BUILD index d325599af3..a23800eb26 100644 --- a/pkg/volume/util/BUILD +++ b/pkg/volume/util/BUILD @@ -23,7 +23,6 @@ go_library( "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/resizefs:go_default_library", "//pkg/volume:go_default_library", @@ -60,7 +59,6 @@ go_test( deps = [ "//pkg/apis/core/install:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/util/slice:go_default_library", "//pkg/volume:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index 34018f8f0e..41bf22dcdf 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -42,7 +42,6 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util/types" @@ -249,9 +248,9 @@ func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zone // pick node's zone for one of the replicas var ok bool - zoneFromNode, ok = node.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] + zoneFromNode, ok = node.ObjectMeta.Labels[v1.LabelZoneFailureDomain] if !ok { - return nil, fmt.Errorf("%s Label for node missing", kubeletapis.LabelZoneFailureDomain) + return nil, fmt.Errorf("%s Label for node missing", v1.LabelZoneFailureDomain) } // if single replica volume and node with zone found, return immediately if numReplicas == 1 { @@ -266,7 +265,7 @@ func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zone } if (len(allowedTopologies) > 0) && (allowedZones.Len() == 0) { - return nil, fmt.Errorf("no matchLabelExpressions with %s key found in allowedTopologies. Please specify matchLabelExpressions with %s key", kubeletapis.LabelZoneFailureDomain, kubeletapis.LabelZoneFailureDomain) + return nil, fmt.Errorf("no matchLabelExpressions with %s key found in allowedTopologies. Please specify matchLabelExpressions with %s key", v1.LabelZoneFailureDomain, v1.LabelZoneFailureDomain) } if allowedZones.Len() > 0 { @@ -316,7 +315,7 @@ func ZonesFromAllowedTopologies(allowedTopologies []v1.TopologySelectorTerm) (se zones := make(sets.String) for _, term := range allowedTopologies { for _, exp := range term.MatchLabelExpressions { - if exp.Key == kubeletapis.LabelZoneFailureDomain { + if exp.Key == v1.LabelZoneFailureDomain { for _, value := range exp.Values { zones.Insert(value) } @@ -330,7 +329,7 @@ func ZonesFromAllowedTopologies(allowedTopologies []v1.TopologySelectorTerm) (se // ZonesSetToLabelValue converts zones set to label value func ZonesSetToLabelValue(strSet sets.String) string { - return strings.Join(strSet.UnsortedList(), kubeletapis.LabelMultiZoneDelimiter) + return strings.Join(strSet.UnsortedList(), v1.LabelMultiZoneDelimiter) } // ZonesToSet converts a string containing a comma separated list of zones to set @@ -344,7 +343,7 @@ func ZonesToSet(zonesString string) (sets.String, error) { // LabelZonesToSet converts a PV label value from string containing a delimited list of zones to set func LabelZonesToSet(labelZonesValue string) (sets.String, error) { - return stringToSet(labelZonesValue, kubeletapis.LabelMultiZoneDelimiter) + return stringToSet(labelZonesValue, v1.LabelMultiZoneDelimiter) } // StringToSet converts a string containing list separated by specified delimiter to a set @@ -366,7 +365,7 @@ func stringToSet(str, delimiter string) (sets.String, error) { // LabelZonesToList converts a PV label value from string containing a delimited list of zones to list func LabelZonesToList(labelZonesValue string) ([]string, error) { - return stringToList(labelZonesValue, kubeletapis.LabelMultiZoneDelimiter) + return stringToList(labelZonesValue, v1.LabelMultiZoneDelimiter) } // StringToList converts a string containing list separated by specified delimiter to a list diff --git a/pkg/volume/util/util_test.go b/pkg/volume/util/util_test.go index 257410e226..4fa3dc95bc 100644 --- a/pkg/volume/util/util_test.go +++ b/pkg/volume/util/util_test.go @@ -36,7 +36,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/slice" "k8s.io/kubernetes/pkg/volume" @@ -1007,7 +1006,7 @@ func TestValidateZone(t *testing.T) { func TestSelectZoneForVolume(t *testing.T) { nodeWithZoneLabels := &v1.Node{} - nodeWithZoneLabels.Labels = map[string]string{kubeletapis.LabelZoneFailureDomain: "zoneX"} + nodeWithZoneLabels.Labels = map[string]string{v1.LabelZoneFailureDomain: "zoneX"} nodeWithNoLabels := &v1.Node{} @@ -1089,7 +1088,7 @@ func TestSelectZoneForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX"}, }, }, @@ -1111,7 +1110,7 @@ func TestSelectZoneForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX"}, }, }, @@ -1135,7 +1134,7 @@ func TestSelectZoneForVolume(t *testing.T) { Values: []string{"zoneX"}, }, { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneY"}, }, }, @@ -1223,7 +1222,7 @@ func TestSelectZoneForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneZ", "zoneY"}, }, }, @@ -1245,7 +1244,7 @@ func TestSelectZoneForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX", "zoneY"}, }, }, @@ -1266,7 +1265,7 @@ func TestSelectZoneForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX"}, }, }, @@ -1274,7 +1273,7 @@ func TestSelectZoneForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneY"}, }, }, @@ -1296,7 +1295,7 @@ func TestSelectZoneForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX"}, }, }, @@ -1361,7 +1360,7 @@ func TestSelectZoneForVolume(t *testing.T) { func TestSelectZonesForVolume(t *testing.T) { nodeWithZoneLabels := &v1.Node{} - nodeWithZoneLabels.Labels = map[string]string{kubeletapis.LabelZoneFailureDomain: "zoneX"} + nodeWithZoneLabels.Labels = map[string]string{v1.LabelZoneFailureDomain: "zoneX"} nodeWithNoLabels := &v1.Node{} @@ -1455,7 +1454,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX"}, }, }, @@ -1479,7 +1478,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX"}, }, }, @@ -1505,7 +1504,7 @@ func TestSelectZonesForVolume(t *testing.T) { Values: []string{"zoneX"}, }, { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneY"}, }, }, @@ -1589,7 +1588,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX"}, }, }, @@ -1713,7 +1712,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneV", "zoneW", "zoneX", "zoneY", "zoneZ"}, }, }, @@ -1739,7 +1738,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX", "zoneY"}, }, }, @@ -1766,7 +1765,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX", "zoneY", "zoneZ"}, }, }, @@ -1790,7 +1789,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX", "zoneY"}, }, }, @@ -1818,7 +1817,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneV"}, }, }, @@ -1826,7 +1825,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneW"}, }, }, @@ -1834,7 +1833,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX"}, }, }, @@ -1842,7 +1841,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneY"}, }, }, @@ -1850,7 +1849,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneZ"}, }, }, @@ -1876,7 +1875,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX"}, }, }, @@ -1884,7 +1883,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneY"}, }, }, @@ -1911,7 +1910,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX"}, }, }, @@ -1919,7 +1918,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneY"}, }, }, @@ -1927,7 +1926,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneZ"}, }, }, @@ -1951,7 +1950,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneX"}, }, }, @@ -1959,7 +1958,7 @@ func TestSelectZonesForVolume(t *testing.T) { { MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{"zoneY"}, }, }, diff --git a/plugin/pkg/admission/antiaffinity/BUILD b/plugin/pkg/admission/antiaffinity/BUILD index a9dcc98505..3a6b033752 100644 --- a/plugin/pkg/admission/antiaffinity/BUILD +++ b/plugin/pkg/admission/antiaffinity/BUILD @@ -15,7 +15,7 @@ go_library( importpath = "k8s.io/kubernetes/plugin/pkg/admission/antiaffinity", deps = [ "//pkg/apis/core:go_default_library", - "//pkg/kubelet/apis:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", ], @@ -27,7 +27,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", - "//pkg/kubelet/apis:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/plugin/pkg/admission/antiaffinity/admission.go b/plugin/pkg/admission/antiaffinity/admission.go index b337697397..5e626c50d4 100644 --- a/plugin/pkg/admission/antiaffinity/admission.go +++ b/plugin/pkg/admission/antiaffinity/admission.go @@ -20,10 +20,10 @@ import ( "fmt" "io" + "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apiserver/pkg/admission" api "k8s.io/kubernetes/pkg/apis/core" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) const PluginName = "LimitPodHardAntiAffinityTopology" @@ -49,7 +49,7 @@ func NewInterPodAntiAffinity() *Plugin { } } -// Validate will deny any pod that defines AntiAffinity topology key other than kubeletapis.LabelHostname i.e. "kubernetes.io/hostname" +// Validate will deny any pod that defines AntiAffinity topology key other than v1.LabelHostname i.e. "kubernetes.io/hostname" // in requiredDuringSchedulingRequiredDuringExecution and requiredDuringSchedulingIgnoredDuringExecution. func (p *Plugin) Validate(attributes admission.Attributes) (err error) { // Ignore all calls to subresources or resources other than pods. @@ -71,8 +71,8 @@ func (p *Plugin) Validate(attributes admission.Attributes) (err error) { // podAntiAffinityTerms = append(podAntiAffinityTerms, affinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...) //} for _, v := range podAntiAffinityTerms { - if v.TopologyKey != kubeletapis.LabelHostname { - return apierrors.NewForbidden(attributes.GetResource().GroupResource(), pod.Name, fmt.Errorf("affinity.PodAntiAffinity.RequiredDuringScheduling has TopologyKey %v but only key %v is allowed", v.TopologyKey, kubeletapis.LabelHostname)) + if v.TopologyKey != v1.LabelHostname { + return apierrors.NewForbidden(attributes.GetResource().GroupResource(), pod.Name, fmt.Errorf("affinity.PodAntiAffinity.RequiredDuringScheduling has TopologyKey %v but only key %v is allowed", v.TopologyKey, v1.LabelHostname)) } } } diff --git a/plugin/pkg/admission/antiaffinity/admission_test.go b/plugin/pkg/admission/antiaffinity/admission_test.go index 6d59059a95..abb5faea58 100644 --- a/plugin/pkg/admission/antiaffinity/admission_test.go +++ b/plugin/pkg/admission/antiaffinity/admission_test.go @@ -19,11 +19,11 @@ package antiaffinity import ( "testing" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" api "k8s.io/kubernetes/pkg/apis/core" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) // ensures the hard PodAntiAffinity is denied if it defines TopologyKey other than kubernetes.io/hostname. @@ -101,7 +101,7 @@ func TestInterPodAffinityAdmission(t *testing.T) { }, }, }, - TopologyKey: kubeletapis.LabelHostname, + TopologyKey: v1.LabelHostname, }, }, }, @@ -123,7 +123,7 @@ func TestInterPodAffinityAdmission(t *testing.T) { }, }, }, - TopologyKey: kubeletapis.LabelHostname, + TopologyKey: v1.LabelHostname, }, }, }, @@ -167,7 +167,7 @@ func TestInterPodAffinityAdmission(t *testing.T) { }, }, }, - TopologyKey: kubeletapis.LabelHostname, + TopologyKey: v1.LabelHostname, }, { LabelSelector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ @@ -189,7 +189,7 @@ func TestInterPodAffinityAdmission(t *testing.T) { }, }, }, - TopologyKey: kubeletapis.LabelHostname, + TopologyKey: v1.LabelHostname, }, }, }, diff --git a/plugin/pkg/admission/antiaffinity/doc.go b/plugin/pkg/admission/antiaffinity/doc.go index d8a9b87ede..28db3a0434 100644 --- a/plugin/pkg/admission/antiaffinity/doc.go +++ b/plugin/pkg/admission/antiaffinity/doc.go @@ -16,7 +16,7 @@ limitations under the License. // LimitPodHardAntiAffinityTopology admission controller rejects any pod // that specifies "hard" (RequiredDuringScheduling) anti-affinity -// with a TopologyKey other than kubeletapis.LabelHostname. +// with a TopologyKey other than v1.LabelHostname. // Because anti-affinity is symmetric, without this admission controller, // a user could maliciously or accidentally specify that their pod (once it has scheduled) // should block other pods from scheduling into the same zone or some other large topology, diff --git a/plugin/pkg/admission/noderestriction/BUILD b/plugin/pkg/admission/noderestriction/BUILD index 5c3bd1e93d..6a632ef272 100644 --- a/plugin/pkg/admission/noderestriction/BUILD +++ b/plugin/pkg/admission/noderestriction/BUILD @@ -19,6 +19,7 @@ go_library( "//pkg/auth/nodeidentifier:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/plugin/pkg/admission/noderestriction/admission.go b/plugin/pkg/admission/noderestriction/admission.go index e44f026392..e5ef4578e5 100644 --- a/plugin/pkg/admission/noderestriction/admission.go +++ b/plugin/pkg/admission/noderestriction/admission.go @@ -21,6 +21,7 @@ import ( "io" "strings" + "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -434,7 +435,7 @@ func (c *nodePlugin) getForbiddenCreateLabels(modifiedLabels sets.String) sets.S for label := range modifiedLabels { namespace := getLabelNamespace(label) // forbid kubelets from setting node-restriction labels - if namespace == kubeletapis.LabelNamespaceNodeRestriction || strings.HasSuffix(namespace, "."+kubeletapis.LabelNamespaceNodeRestriction) { + if namespace == v1.LabelNamespaceNodeRestriction || strings.HasSuffix(namespace, "."+v1.LabelNamespaceNodeRestriction) { forbiddenLabels.Insert(label) } } @@ -451,7 +452,7 @@ func (c *nodePlugin) getForbiddenUpdateLabels(modifiedLabels sets.String) sets.S for label := range modifiedLabels { namespace := getLabelNamespace(label) // forbid kubelets from setting node-restriction labels - if namespace == kubeletapis.LabelNamespaceNodeRestriction || strings.HasSuffix(namespace, "."+kubeletapis.LabelNamespaceNodeRestriction) { + if namespace == v1.LabelNamespaceNodeRestriction || strings.HasSuffix(namespace, "."+v1.LabelNamespaceNodeRestriction) { forbiddenLabels.Insert(label) } // forbid kubelets from setting unknown kubernetes.io and k8s.io labels on update diff --git a/plugin/pkg/admission/storage/persistentvolume/label/BUILD b/plugin/pkg/admission/storage/persistentvolume/label/BUILD index 115603d0cf..e5cb1db920 100644 --- a/plugin/pkg/admission/storage/persistentvolume/label/BUILD +++ b/plugin/pkg/admission/storage/persistentvolume/label/BUILD @@ -17,7 +17,6 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/apis/core/v1:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -33,7 +32,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/plugin/pkg/admission/storage/persistentvolume/label/admission.go b/plugin/pkg/admission/storage/persistentvolume/label/admission.go index ddaa5189e0..037dcd420a 100644 --- a/plugin/pkg/admission/storage/persistentvolume/label/admission.go +++ b/plugin/pkg/admission/storage/persistentvolume/label/admission.go @@ -31,7 +31,6 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" vol "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -153,7 +152,7 @@ func (l *persistentVolumeLabel) Admit(a admission.Attributes) (err error) { // Set NodeSelectorRequirements based on the labels var values []string - if k == kubeletapis.LabelZoneFailureDomain { + if k == v1.LabelZoneFailureDomain { zones, err := volumeutil.LabelZonesToSet(v) if err != nil { return admission.NewForbidden(a, fmt.Errorf("failed to convert label string for Zone: %s to a Set", v)) diff --git a/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go b/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go index a35a845971..588e679ffb 100644 --- a/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go +++ b/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go @@ -30,7 +30,6 @@ import ( "k8s.io/apiserver/pkg/admission" cloudprovider "k8s.io/cloud-provider" api "k8s.io/kubernetes/pkg/apis/core" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) type mockVolumes struct { @@ -65,9 +64,9 @@ func Test_PVLAdmission(t *testing.T) { name: "non-cloud PV ignored", handler: newPersistentVolumeLabel(), pvlabeler: mockVolumeLabels(map[string]string{ - "a": "1", - "b": "2", - kubeletapis.LabelZoneFailureDomain: "1__2__3", + "a": "1", + "b": "2", + v1.LabelZoneFailureDomain: "1__2__3", }), preAdmissionPV: &api.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{Name: "noncloud", Namespace: "myns"}, @@ -173,9 +172,9 @@ func Test_PVLAdmission(t *testing.T) { name: "AWS EBS PV labeled correctly", handler: newPersistentVolumeLabel(), pvlabeler: mockVolumeLabels(map[string]string{ - "a": "1", - "b": "2", - kubeletapis.LabelZoneFailureDomain: "1__2__3", + "a": "1", + "b": "2", + v1.LabelZoneFailureDomain: "1__2__3", }), preAdmissionPV: &api.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{Name: "awsebs", Namespace: "myns"}, @@ -192,9 +191,9 @@ func Test_PVLAdmission(t *testing.T) { Name: "awsebs", Namespace: "myns", Labels: map[string]string{ - "a": "1", - "b": "2", - kubeletapis.LabelZoneFailureDomain: "1__2__3", + "a": "1", + "b": "2", + v1.LabelZoneFailureDomain: "1__2__3", }, }, Spec: api.PersistentVolumeSpec{ @@ -219,7 +218,7 @@ func Test_PVLAdmission(t *testing.T) { Values: []string{"2"}, }, { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Operator: api.NodeSelectorOpIn, Values: []string{"1", "2", "3"}, }, @@ -236,9 +235,9 @@ func Test_PVLAdmission(t *testing.T) { name: "GCE PD PV labeled correctly", handler: newPersistentVolumeLabel(), pvlabeler: mockVolumeLabels(map[string]string{ - "a": "1", - "b": "2", - kubeletapis.LabelZoneFailureDomain: "1__2__3", + "a": "1", + "b": "2", + v1.LabelZoneFailureDomain: "1__2__3", }), preAdmissionPV: &api.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{Name: "gcepd", Namespace: "myns"}, @@ -255,9 +254,9 @@ func Test_PVLAdmission(t *testing.T) { Name: "gcepd", Namespace: "myns", Labels: map[string]string{ - "a": "1", - "b": "2", - kubeletapis.LabelZoneFailureDomain: "1__2__3", + "a": "1", + "b": "2", + v1.LabelZoneFailureDomain: "1__2__3", }, }, Spec: api.PersistentVolumeSpec{ @@ -282,7 +281,7 @@ func Test_PVLAdmission(t *testing.T) { Values: []string{"2"}, }, { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Operator: api.NodeSelectorOpIn, Values: []string{"1", "2", "3"}, }, @@ -299,9 +298,9 @@ func Test_PVLAdmission(t *testing.T) { name: "Azure Disk PV labeled correctly", handler: newPersistentVolumeLabel(), pvlabeler: mockVolumeLabels(map[string]string{ - "a": "1", - "b": "2", - kubeletapis.LabelZoneFailureDomain: "1__2__3", + "a": "1", + "b": "2", + v1.LabelZoneFailureDomain: "1__2__3", }), preAdmissionPV: &api.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ @@ -321,9 +320,9 @@ func Test_PVLAdmission(t *testing.T) { Name: "azurepd", Namespace: "myns", Labels: map[string]string{ - "a": "1", - "b": "2", - kubeletapis.LabelZoneFailureDomain: "1__2__3", + "a": "1", + "b": "2", + v1.LabelZoneFailureDomain: "1__2__3", }, }, Spec: api.PersistentVolumeSpec{ @@ -348,7 +347,7 @@ func Test_PVLAdmission(t *testing.T) { Values: []string{"2"}, }, { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Operator: api.NodeSelectorOpIn, Values: []string{"1", "2", "3"}, }, @@ -365,9 +364,9 @@ func Test_PVLAdmission(t *testing.T) { name: "Cinder Disk PV labeled correctly", handler: newPersistentVolumeLabel(), pvlabeler: mockVolumeLabels(map[string]string{ - "a": "1", - "b": "2", - kubeletapis.LabelZoneFailureDomain: "1__2__3", + "a": "1", + "b": "2", + v1.LabelZoneFailureDomain: "1__2__3", }), preAdmissionPV: &api.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ @@ -387,9 +386,9 @@ func Test_PVLAdmission(t *testing.T) { Name: "azurepd", Namespace: "myns", Labels: map[string]string{ - "a": "1", - "b": "2", - kubeletapis.LabelZoneFailureDomain: "1__2__3", + "a": "1", + "b": "2", + v1.LabelZoneFailureDomain: "1__2__3", }, }, Spec: api.PersistentVolumeSpec{ @@ -414,7 +413,7 @@ func Test_PVLAdmission(t *testing.T) { Values: []string{"2"}, }, { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Operator: api.NodeSelectorOpIn, Values: []string{"1", "2", "3"}, }, @@ -431,9 +430,9 @@ func Test_PVLAdmission(t *testing.T) { name: "AWS EBS PV overrides user applied labels", handler: newPersistentVolumeLabel(), pvlabeler: mockVolumeLabels(map[string]string{ - "a": "1", - "b": "2", - kubeletapis.LabelZoneFailureDomain: "1__2__3", + "a": "1", + "b": "2", + v1.LabelZoneFailureDomain: "1__2__3", }), preAdmissionPV: &api.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ @@ -456,9 +455,9 @@ func Test_PVLAdmission(t *testing.T) { Name: "awsebs", Namespace: "myns", Labels: map[string]string{ - "a": "1", - "b": "2", - kubeletapis.LabelZoneFailureDomain: "1__2__3", + "a": "1", + "b": "2", + v1.LabelZoneFailureDomain: "1__2__3", }, }, Spec: api.PersistentVolumeSpec{ @@ -483,7 +482,7 @@ func Test_PVLAdmission(t *testing.T) { Values: []string{"2"}, }, { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Operator: api.NodeSelectorOpIn, Values: []string{"1", "2", "3"}, }, diff --git a/staging/src/k8s.io/api/core/v1/BUILD b/staging/src/k8s.io/api/core/v1/BUILD index b4b965ad5f..ee4450562a 100644 --- a/staging/src/k8s.io/api/core/v1/BUILD +++ b/staging/src/k8s.io/api/core/v1/BUILD @@ -28,6 +28,7 @@ go_library( "toleration.go", "types.go", "types_swagger_doc_generated.go", + "well_known_labels.go", "zz_generated.deepcopy.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/api/core/v1", diff --git a/staging/src/k8s.io/api/core/v1/well_known_labels.go b/staging/src/k8s.io/api/core/v1/well_known_labels.go new file mode 100644 index 0000000000..aacf654fa6 --- /dev/null +++ b/staging/src/k8s.io/api/core/v1/well_known_labels.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + LabelHostname = "kubernetes.io/hostname" + LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" + LabelMultiZoneDelimiter = "__" + LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" + + LabelInstanceType = "beta.kubernetes.io/instance-type" + + LabelOS = "kubernetes.io/os" + LabelArch = "kubernetes.io/arch" + // The OS/Arch labels are promoted to GA in 1.14. kubelet applies both beta + // and GA labels to ensure backward compatibility. + // TODO: stop applying the beta OS/Arch labels in Kubernetes 1.17. + LegacyLabelOS = "beta.kubernetes.io/os" + LegacyLabelArch = "beta.kubernetes.io/arch" + + // LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*) + LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io" + // LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*) + LabelNamespaceSuffixNode = "node.kubernetes.io" + + // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled + LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io" +) diff --git a/test/e2e/common/BUILD b/test/e2e/common/BUILD index 4f4ee5a8d8..bcd82581dc 100644 --- a/test/e2e/common/BUILD +++ b/test/e2e/common/BUILD @@ -49,7 +49,6 @@ go_library( "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/conditions:go_default_library", "//pkg/kubelet:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/images:go_default_library", "//pkg/kubelet/sysctl:go_default_library", "//pkg/security/apparmor:go_default_library", diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index d45dbe4d0a..113f1fffca 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -28,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" @@ -147,7 +146,7 @@ func RestartNodes(c clientset.Interface, nodes []v1.Node) error { for i := range nodes { node := &nodes[i] zone := framework.TestContext.CloudConfig.Zone - if z, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]; ok { + if z, ok := node.Labels[v1.LabelZoneFailureDomain]; ok { zone = z } nodeNamesByZone[zone] = append(nodeNamesByZone[zone], node.Name) diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index 355c0bccf2..7e934afcf8 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -56,7 +56,6 @@ go_library( "//pkg/controller/nodelifecycle:go_default_library", "//pkg/controller/service:go_default_library", "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis/config:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/dockershim/metrics:go_default_library", diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 6155095d33..6759563750 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -86,7 +86,6 @@ import ( nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle" "k8s.io/kubernetes/pkg/controller/service" "k8s.io/kubernetes/pkg/features" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" @@ -5137,7 +5136,7 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) { // collect values of zone label from all nodes zones := sets.NewString() for _, node := range nodes.Items { - if zone, found := node.Labels[kubeletapis.LabelZoneFailureDomain]; found { + if zone, found := node.Labels[v1.LabelZoneFailureDomain]; found { zones.Insert(zone) } } diff --git a/test/e2e/network/BUILD b/test/e2e/network/BUILD index a1987d8644..8df831586e 100644 --- a/test/e2e/network/BUILD +++ b/test/e2e/network/BUILD @@ -35,7 +35,6 @@ go_library( "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/cloudprovider/providers/gce:go_default_library", "//pkg/controller/endpoint:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/master/ports:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index 32e4c1ace1..db43181ab7 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -25,7 +25,6 @@ import ( clientset "k8s.io/client-go/kubernetes" cloudprovider "k8s.io/cloud-provider" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/providers/gce" @@ -142,7 +141,7 @@ var _ = SIGDescribe("Firewall rule", func() { // Instance could run in a different zone in multi-zone test. Figure out which zone // it is in before proceeding. zone := cloudConfig.Zone - if zoneInLabel, ok := nodeList.Items[0].Labels[kubeletapis.LabelZoneFailureDomain]; ok { + if zoneInLabel, ok := nodeList.Items[0].Labels[v1.LabelZoneFailureDomain]; ok { zone = zoneInLabel } removedTags := gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{}) diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index 9d58bc6769..bf1260593f 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -22,7 +22,6 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/scheduling:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/api:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index ba95b36292..f985f133e0 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -28,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -117,12 +116,12 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) // Find the name of the zone in which a Node is running func getZoneNameForNode(node v1.Node) (string, error) { for key, value := range node.Labels { - if key == kubeletapis.LabelZoneFailureDomain { + if key == v1.LabelZoneFailureDomain { return value, nil } } return "", fmt.Errorf("Zone name for node %s not found. No label with key %s", - node.Name, kubeletapis.LabelZoneFailureDomain) + node.Name, v1.LabelZoneFailureDomain) } // Return the number of zones in which we have nodes in this cluster. diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go index 0493a87866..c4ffd285d3 100644 --- a/test/e2e/scheduling/ubernetes_lite_volumes.go +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go @@ -28,7 +28,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/providers/gce" ) @@ -166,7 +165,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { pv, err := c.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - pvZone, ok := pv.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] + pvZone, ok := pv.ObjectMeta.Labels[v1.LabelZoneFailureDomain] Expect(ok).To(BeTrue(), "PV has no LabelZone to be found") pvZones.Insert(pvZone) } diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 34162b8673..78579bae5f 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -35,7 +35,6 @@ go_library( "//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/storage/v1/util:go_default_library", "//pkg/client/conditions:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//pkg/util/slice:go_default_library", "//pkg/volume/util:go_default_library", diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index 171adbf263..b091732f4d 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -44,7 +44,6 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/util/rand" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) // List of testDrivers to be executed in below loop @@ -450,7 +449,7 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela test := createGCEPDStorageClassTest() test.DelayBinding = delayBinding - test.NodeSelector = map[string]string{kubeletapis.LabelZoneFailureDomain: podZone} + test.NodeSelector = map[string]string{v1.LabelZoneFailureDomain: podZone} test.ExpectUnschedulable = true class := newStorageClass(test, namespace, suffix) diff --git a/test/e2e/storage/drivers/BUILD b/test/e2e/storage/drivers/BUILD index da57c59f18..4ab3eb8a5e 100644 --- a/test/e2e/storage/drivers/BUILD +++ b/test/e2e/storage/drivers/BUILD @@ -10,7 +10,6 @@ go_library( importpath = "k8s.io/kubernetes/test/e2e/storage/drivers", visibility = ["//visibility:public"], deps = [ - "//pkg/kubelet/apis:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/rbac/v1beta1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 908516da6e..a70b187ca3 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -52,7 +52,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/authentication/serviceaccount" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -1234,7 +1233,7 @@ func (g *gcePdDriver) CreateVolume(volType testpatterns.TestVolType) interface{} // PD will be created in framework.TestContext.CloudConfig.Zone zone, // so pods should be also scheduled there. g.driverInfo.Config.ClientNodeSelector = map[string]string{ - kubeletapis.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone, + v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone, } } By("creating a test gce pd volume") diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 3765964b6f..176d380f88 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -38,8 +38,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/pkg/kubelet/apis" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -205,10 +203,10 @@ func testZonalFailover(c clientset.Interface, ns string) { nodeName := pod.Spec.NodeName node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - podZone := node.Labels[apis.LabelZoneFailureDomain] + podZone := node.Labels[v1.LabelZoneFailureDomain] By("tainting nodes in the zone the pod is scheduled in") - selector := labels.SelectorFromSet(labels.Set(map[string]string{apis.LabelZoneFailureDomain: podZone})) + selector := labels.SelectorFromSet(labels.Set(map[string]string{v1.LabelZoneFailureDomain: podZone})) nodesInZone, err := c.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()}) Expect(err).ToNot(HaveOccurred()) removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone) @@ -237,7 +235,7 @@ func testZonalFailover(c clientset.Interface, ns string) { if err != nil { return false, nil } - newPodZone := node.Labels[apis.LabelZoneFailureDomain] + newPodZone := node.Labels[v1.LabelZoneFailureDomain] return newPodZone == otherZone, nil }) Expect(err).NotTo(HaveOccurred(), "Error waiting for pod to be scheduled in a different zone (%q): %v", otherZone, err) @@ -323,9 +321,9 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) if node == nil { framework.Failf("unexpected nil node found") } - zone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain] + zone, ok := node.Labels[v1.LabelZoneFailureDomain] if !ok { - framework.Failf("label %s not found on Node", kubeletapis.LabelZoneFailureDomain) + framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain) } for _, pv := range pvs { checkZoneFromLabelAndAffinity(pv, zone, false) @@ -380,9 +378,9 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s if node == nil { framework.Failf("unexpected nil node found") } - nodeZone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain] + nodeZone, ok := node.Labels[v1.LabelZoneFailureDomain] if !ok { - framework.Failf("label %s not found on Node", kubeletapis.LabelZoneFailureDomain) + framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain) } zoneFound := false for _, zone := range topoZones { @@ -423,7 +421,7 @@ func addAllowedTopologiesToStorageClass(c clientset.Interface, sc *storage.Stora term := v1.TopologySelectorTerm{ MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: zones, }, }, @@ -585,7 +583,7 @@ func waitForStatefulSetReplicasNotReady(statefulSetName, ns string, c clientset. // If match is true, check if zones in PV exactly match zones given. // Otherwise, check whether zones in PV is superset of zones given. func verifyZonesInPV(volume *v1.PersistentVolume, zones sets.String, match bool) error { - pvZones, err := util.LabelZonesToSet(volume.Labels[apis.LabelZoneFailureDomain]) + pvZones, err := util.LabelZonesToSet(volume.Labels[v1.LabelZoneFailureDomain]) if err != nil { return err } diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index bf61f418e2..983c507040 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -44,7 +44,6 @@ import ( "k8s.io/apiserver/pkg/authentication/serviceaccount" clientset "k8s.io/client-go/kubernetes" storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/providers/gce" @@ -71,9 +70,9 @@ func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, if pv == nil { framework.Failf("nil pv passed") } - pvLabel, ok := pv.Labels[kubeletapis.LabelZoneFailureDomain] + pvLabel, ok := pv.Labels[v1.LabelZoneFailureDomain] if !ok { - framework.Failf("label %s not found on PV", kubeletapis.LabelZoneFailureDomain) + framework.Failf("label %s not found on PV", v1.LabelZoneFailureDomain) } zonesFromLabel, err := volumeutil.LabelZonesToSet(pvLabel) @@ -81,10 +80,10 @@ func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, framework.Failf("unable to parse zone labels %s: %v", pvLabel, err) } if matchZones && !zonesFromLabel.Equal(zones) { - framework.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", kubeletapis.LabelZoneFailureDomain, zonesFromLabel, zones) + framework.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones) } if !matchZones && !zonesFromLabel.IsSuperset(zones) { - framework.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", kubeletapis.LabelZoneFailureDomain, zonesFromLabel, zones) + framework.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones) } if pv.Spec.NodeAffinity == nil { framework.Failf("node affinity not found in PV spec %v", pv.Spec) @@ -96,7 +95,7 @@ func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms { keyFound := false for _, r := range term.MatchExpressions { - if r.Key != kubeletapis.LabelZoneFailureDomain { + if r.Key != v1.LabelZoneFailureDomain { continue } keyFound = true @@ -110,7 +109,7 @@ func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, break } if !keyFound { - framework.Failf("label %s not found in term %v", kubeletapis.LabelZoneFailureDomain, term) + framework.Failf("label %s not found in term %v", v1.LabelZoneFailureDomain, term) } } } @@ -231,9 +230,9 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop if node == nil { framework.Failf("unexpected nil node found") } - zone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain] + zone, ok := node.Labels[v1.LabelZoneFailureDomain] if !ok { - framework.Failf("label %s not found on Node", kubeletapis.LabelZoneFailureDomain) + framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain) } if specifyAllowedTopology && topoZone != zone { framework.Failf("zone specified in allowedTopologies: %s does not match zone of node where PV got provisioned: %s", topoZone, zone) @@ -1021,7 +1020,7 @@ func addSingleZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *stora term := v1.TopologySelectorTerm{ MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ { - Key: kubeletapis.LabelZoneFailureDomain, + Key: v1.LabelZoneFailureDomain, Values: []string{zone}, }, }, diff --git a/test/integration/scheduler_perf/BUILD b/test/integration/scheduler_perf/BUILD index abaadbe35b..b83637d6c4 100644 --- a/test/integration/scheduler_perf/BUILD +++ b/test/integration/scheduler_perf/BUILD @@ -34,7 +34,6 @@ go_test( embed = [":go_default_library"], tags = ["integration"], deps = [ - "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/factory:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/test/integration/scheduler_perf/scheduler_bench_test.go b/test/integration/scheduler_perf/scheduler_bench_test.go index 818eb9a981..573916dae2 100644 --- a/test/integration/scheduler_perf/scheduler_bench_test.go +++ b/test/integration/scheduler_perf/scheduler_bench_test.go @@ -24,7 +24,6 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/integration/framework" testutils "k8s.io/kubernetes/test/utils" @@ -98,7 +97,7 @@ func BenchmarkSchedulingPodAffinity(b *testing.B) { ) // The test strategy creates pods with affinity for each other. testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod) - nodeStrategy := testutils.NewLabelNodePrepareStrategy(apis.LabelZoneFailureDomain, "zone1") + nodeStrategy := testutils.NewLabelNodePrepareStrategy(v1.LabelZoneFailureDomain, "zone1") for _, test := range tests { name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods) b.Run(name, func(b *testing.B) { @@ -119,10 +118,10 @@ func BenchmarkSchedulingNodeAffinity(b *testing.B) { } // The setup strategy creates pods with no affinity rules. setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup") - testBasePod := makeBasePodWithNodeAffinity(apis.LabelZoneFailureDomain, []string{"zone1", "zone2"}) + testBasePod := makeBasePodWithNodeAffinity(v1.LabelZoneFailureDomain, []string{"zone1", "zone2"}) // The test strategy creates pods with node-affinity for each other. testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod) - nodeStrategy := testutils.NewLabelNodePrepareStrategy(apis.LabelZoneFailureDomain, "zone1") + nodeStrategy := testutils.NewLabelNodePrepareStrategy(v1.LabelZoneFailureDomain, "zone1") for _, test := range tests { name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods) b.Run(name, func(b *testing.B) { @@ -148,7 +147,7 @@ func makeBasePodWithPodAntiAffinity(podLabels, affinityLabels map[string]string) LabelSelector: &metav1.LabelSelector{ MatchLabels: affinityLabels, }, - TopologyKey: apis.LabelHostname, + TopologyKey: v1.LabelHostname, }, }, }, @@ -173,7 +172,7 @@ func makeBasePodWithPodAffinity(podLabels, affinityZoneLabels map[string]string) LabelSelector: &metav1.LabelSelector{ MatchLabels: affinityZoneLabels, }, - TopologyKey: apis.LabelZoneFailureDomain, + TopologyKey: v1.LabelZoneFailureDomain, }, }, },