dependencies: pkg/volume

pull/6/head
Chao Xu 2016-11-18 12:58:56 -08:00
parent c962c2602a
commit bb675d395f
73 changed files with 1195 additions and 1194 deletions

View File

@ -20,7 +20,7 @@ import (
"errors"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -191,9 +191,9 @@ func newDetacher(testcase *testcase) *awsElasticBlockStoreDetacher {
func createVolSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
return &volume.Spec{
Volume: &api.Volume{
VolumeSource: api.VolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: string(name),
ReadOnly: readOnly,
},
@ -204,10 +204,10 @@ func createVolSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
func createPVSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
return &volume.Spec{
PersistentVolume: &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: string(name),
ReadOnly: readOnly,
},

View File

@ -25,8 +25,8 @@ import (
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
@ -85,13 +85,13 @@ func (plugin *awsElasticBlockStorePlugin) RequiresRemount() bool {
return false
}
func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
func (plugin *awsElasticBlockStorePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *awsElasticBlockStorePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &AWSDiskUtil{}, plugin.host.GetMounter())
}
@ -176,7 +176,7 @@ func (plugin *awsElasticBlockStorePlugin) newProvisionerInternal(options volume.
}
func getVolumeSource(
spec *volume.Spec) (*api.AWSElasticBlockStoreVolumeSource, bool, error) {
spec *volume.Spec) (*v1.AWSElasticBlockStoreVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
return spec.Volume.AWSElasticBlockStore, spec.Volume.AWSElasticBlockStore.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
@ -220,10 +220,10 @@ func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath
glog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName)
}
awsVolume := &api.Volume{
awsVolume := &v1.Volume{
Name: volName,
VolumeSource: api.VolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: sourceName,
},
},
@ -444,29 +444,29 @@ type awsElasticBlockStoreProvisioner struct {
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
func (c *awsElasticBlockStoreProvisioner) Provision() (*api.PersistentVolume, error) {
func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, error) {
volumeID, sizeGB, labels, err := c.manager.CreateVolume(c)
if err != nil {
glog.Errorf("Provision failed: %v", err)
return nil, err
}
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "aws-ebs-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: string(volumeID),
FSType: "ext4",
Partition: 0,

View File

@ -22,8 +22,8 @@ import (
"path"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
@ -48,10 +48,10 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/aws-ebs" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}}}}) {
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}}}}}) {
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
@ -70,15 +70,15 @@ func TestGetAccessModes(t *testing.T) {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), api.ReadWriteOnce) {
t.Errorf("Expected to support AccessModeTypes: %s", api.ReadWriteOnce)
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) {
t.Errorf("Expected to support AccessModeTypes: %s", v1.ReadWriteOnce)
}
if contains(plug.GetAccessModes(), api.ReadOnlyMany) {
t.Errorf("Expected not to support AccessModeTypes: %s", api.ReadOnlyMany)
if contains(plug.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected not to support AccessModeTypes: %s", v1.ReadOnlyMany)
}
}
func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
@ -118,10 +118,10 @@ func TestPlugin(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "pd",
FSType: "ext4",
},
@ -181,8 +181,8 @@ func TestPlugin(t *testing.T) {
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*awsElasticBlockStorePlugin).newProvisionerInternal(options, &fakePDManager{})
persistentSpec, err := provisioner.Provision()
@ -193,7 +193,7 @@ func TestPlugin(t *testing.T) {
if persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID != "test-aws-volume-name" {
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID)
}
cap := persistentSpec.Spec.Capacity[api.ResourceStorage]
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 100*1024*1024*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)
@ -215,30 +215,30 @@ func TestPlugin(t *testing.T) {
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{},
},
ClaimRef: &api.ObjectReference{
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
@ -255,7 +255,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
@ -276,10 +276,10 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "pd",
FSType: "ext4",
},

View File

@ -23,7 +23,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/volume"
@ -80,7 +80,7 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.K
}
tags["Name"] = volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
// AWS works with gigabytes, convert to GiB with rounding up
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))

View File

@ -25,7 +25,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/types"
@ -102,13 +102,13 @@ func (plugin *azureDataDiskPlugin) RequiresRemount() bool {
return false
}
func (plugin *azureDataDiskPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter())
}
@ -123,7 +123,7 @@ func (plugin *azureDataDiskPlugin) newMounterInternal(spec *volume.Spec, podUID
if azure.FSType != nil {
fsType = *azure.FSType
}
cachingMode := api.AzureDataDiskCachingNone
cachingMode := v1.AzureDataDiskCachingNone
if azure.CachingMode != nil {
cachingMode = *azure.CachingMode
}
@ -170,10 +170,10 @@ func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volName, mountPath string
if err != nil {
return nil, err
}
azVolume := &api.Volume{
azVolume := &v1.Volume{
Name: volName,
VolumeSource: api.VolumeSource{
AzureDisk: &api.AzureDiskVolumeSource{
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: sourceName,
},
},
@ -191,7 +191,7 @@ type azureDisk struct {
podUID types.UID
diskName string
diskUri string
cachingMode api.AzureDataDiskCachingMode
cachingMode v1.AzureDataDiskCachingMode
mounter mount.Interface
plugin *azureDataDiskPlugin
volume.MetricsNil
@ -359,7 +359,7 @@ func (c *azureDiskUnmounter) TearDownAt(dir string) error {
return nil
}
func getVolumeSource(spec *volume.Spec) (*api.AzureDiskVolumeSource, error) {
func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) {
if spec.Volume != nil && spec.Volume.AzureDisk != nil {
return spec.Volume.AzureDisk, nil
}

View File

@ -24,7 +24,7 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/compute"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -48,11 +48,11 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != azureDataDiskPluginName {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{AzureDisk: &api.AzureDiskVolumeSource{}}}}) {
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{AzureDisk: &api.AzureDiskVolumeSource{}}}}}) {
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
@ -114,11 +114,11 @@ func TestPlugin(t *testing.T) {
}
fs := "ext4"
ro := false
caching := api.AzureDataDiskCachingNone
spec := &api.Volume{
caching := v1.AzureDataDiskCachingNone
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
AzureDisk: &api.AzureDiskVolumeSource{
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: fakeDiskName,
DataDiskURI: fakeDiskUri,
FSType: &fs,

View File

@ -21,8 +21,8 @@ import (
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
)
@ -104,11 +104,11 @@ type azureDiskProvisioner struct {
var _ volume.Provisioner = &azureDiskProvisioner{}
func (a *azureDiskProvisioner) Provision() (*api.PersistentVolume, error) {
func (a *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
var sku, location, account string
name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 255)
capacity := a.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
@ -136,22 +136,22 @@ func (a *azureDiskProvisioner) Provision() (*api.PersistentVolume, error) {
return nil, err
}
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: a.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "azure-disk-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy,
AccessModes: a.options.PVC.Spec.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
AzureDisk: &api.AzureDiskVolumeSource{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: diskUri,
},

View File

@ -20,7 +20,7 @@ import (
"fmt"
"os"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
@ -77,19 +77,19 @@ func (plugin *azureFilePlugin) RequiresRemount() bool {
return false
}
func (plugin *azureFilePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
api.ReadWriteMany,
func (plugin *azureFilePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
}
}
func (plugin *azureFilePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *azureFilePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, &azureSvc{}, plugin.host.GetMounter())
}
func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, util azureUtil, mounter mount.Interface) (volume.Mounter, error) {
func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, util azureUtil, mounter mount.Interface) (volume.Mounter, error) {
source, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
@ -118,17 +118,17 @@ func (plugin *azureFilePlugin) newUnmounterInternal(volName string, podUID types
return &azureFileUnmounter{&azureFile{
volName: volName,
mounter: mounter,
pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}},
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: podUID}},
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
}}, nil
}
func (plugin *azureFilePlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
azureVolume := &api.Volume{
azureVolume := &v1.Volume{
Name: volName,
VolumeSource: api.VolumeSource{
AzureFile: &api.AzureFileVolumeSource{
VolumeSource: v1.VolumeSource{
AzureFile: &v1.AzureFileVolumeSource{
SecretName: volName,
ShareName: volName,
},
@ -140,7 +140,7 @@ func (plugin *azureFilePlugin) ConstructVolumeSpec(volName, mountPath string) (*
// azureFile volumes represent mount of an AzureFile share.
type azureFile struct {
volName string
pod *api.Pod
pod *v1.Pod
mounter mount.Interface
plugin *azureFilePlugin
volume.MetricsProvider
@ -268,7 +268,7 @@ func (c *azureFileUnmounter) TearDownAt(dir string) error {
}
func getVolumeSource(
spec *volume.Spec) (*api.AzureFileVolumeSource, bool, error) {
spec *volume.Spec) (*v1.AzureFileVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.AzureFile != nil {
return spec.Volume.AzureFile, spec.Volume.AzureFile.ReadOnly, nil
} else if spec.PersistentVolume != nil &&

View File

@ -22,8 +22,8 @@ import (
"path"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
@ -46,10 +46,10 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/azure-file" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{AzureFile: &api.AzureFileVolumeSource{}}}}) {
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AzureFile: &v1.AzureFileVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{AzureFile: &api.AzureFileVolumeSource{}}}}}) {
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AzureFile: &v1.AzureFileVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
@ -67,12 +67,12 @@ func TestGetAccessModes(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) || !contains(plug.GetAccessModes(), api.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany)
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) || !contains(plug.GetAccessModes(), v1.ReadOnlyMany) || !contains(plug.GetAccessModes(), v1.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
}
}
func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
@ -94,17 +94,17 @@ func TestPlugin(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
AzureFile: &api.AzureFileVolumeSource{
VolumeSource: v1.VolumeSource{
AzureFile: &v1.AzureFileVolumeSource{
SecretName: "secret",
ShareName: "share",
},
},
}
fake := &mount.FakeMounter{}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -155,30 +155,30 @@ func TestPlugin(t *testing.T) {
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
AzureFile: &api.AzureFileVolumeSource{},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureFile: &v1.AzureFileVolumeSource{},
},
ClaimRef: &api.ObjectReference{
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
@ -190,7 +190,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
@ -217,17 +217,17 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
AzureFile: &api.AzureFileVolumeSource{
VolumeSource: v1.VolumeSource{
AzureFile: &v1.AzureFileVolumeSource{
SecretName: "secret",
ShareName: "share",
},
},
}
fake := &mount.FakeMounter{}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
if _, ok := mounter.(volume.Unmounter); ok {
t.Errorf("Volume Mounter can be type-assert to Unmounter")

View File

@ -22,7 +22,7 @@ import (
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
@ -70,15 +70,15 @@ func (plugin *cephfsPlugin) RequiresRemount() bool {
return false
}
func (plugin *cephfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
api.ReadWriteMany,
func (plugin *cephfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
}
}
func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
cephvs, _, err := getVolumeSource(spec)
if err != nil {
return nil, err
@ -155,10 +155,10 @@ func (plugin *cephfsPlugin) newUnmounterInternal(volName string, podUID types.UI
}
func (plugin *cephfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
cephfsVolume := &api.Volume{
cephfsVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
CephFS: &api.CephFSVolumeSource{
VolumeSource: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{},
Path: volumeName,
},
@ -312,7 +312,7 @@ func (cephfsVolume *cephfs) execMount(mountpoint string) error {
return nil
}
func getVolumeSource(spec *volume.Spec) (*api.CephFSVolumeSource, bool, error) {
func getVolumeSource(spec *volume.Spec) (*v1.CephFSVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.CephFS != nil {
return spec.Volume.CephFS, spec.Volume.CephFS.ReadOnly, nil
} else if spec.PersistentVolume != nil &&

View File

@ -21,7 +21,7 @@ import (
"path"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -44,10 +44,10 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/cephfs" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{CephFS: &api.CephFSVolumeSource{}}}}) {
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{CephFS: &v1.CephFSVolumeSource{}}}}) {
t.Errorf("Expected true")
}
}
@ -64,10 +64,10 @@ func TestPlugin(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
CephFS: &api.CephFSVolumeSource{
VolumeSource: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{"a", "b"},
User: "user",
SecretRef: nil,

View File

@ -20,7 +20,7 @@ import (
"errors"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -241,9 +241,9 @@ func newDetacher(testcase *testcase) *cinderDiskDetacher {
func createVolSpec(name string, readOnly bool) *volume.Spec {
return &volume.Spec{
Volume: &api.Volume{
VolumeSource: api.VolumeSource{
Cinder: &api.CinderVolumeSource{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: name,
ReadOnly: readOnly,
},
@ -254,10 +254,10 @@ func createVolSpec(name string, readOnly bool) *volume.Spec {
func createPVSpec(name string, readOnly bool) *volume.Spec {
return &volume.Spec{
PersistentVolume: &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
Cinder: &api.CinderVolumeSource{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: name,
ReadOnly: readOnly,
},
@ -430,8 +430,8 @@ type instances struct {
instanceID string
}
func (instances *instances) NodeAddresses(name types.NodeName) ([]api.NodeAddress, error) {
return []api.NodeAddress{}, errors.New("Not implemented")
func (instances *instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
return []v1.NodeAddress{}, errors.New("Not implemented")
}
func (instances *instances) ExternalID(name types.NodeName) (string, error) {

View File

@ -23,8 +23,8 @@ import (
"path"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
"k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace"
@ -97,13 +97,13 @@ func (plugin *cinderPlugin) RequiresRemount() bool {
return false
}
func (plugin *cinderPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
func (plugin *cinderPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, &CinderDiskUtil{}, plugin.host.GetMounter())
}
@ -211,10 +211,10 @@ func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*
return nil, err
}
glog.V(4).Infof("Found volume %s mounted to %s", sourceName, mountPath)
cinderVolume := &api.Volume{
cinderVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
Cinder: &api.CinderVolumeSource{
VolumeSource: v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: sourceName,
},
},
@ -464,28 +464,28 @@ type cinderVolumeProvisioner struct {
var _ volume.Provisioner = &cinderVolumeProvisioner{}
func (c *cinderVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
volumeID, sizeGB, err := c.manager.CreateVolume(c)
if err != nil {
return nil, err
}
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "cinder-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
Cinder: &api.CinderVolumeSource{
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: volumeID,
FSType: "ext4",
ReadOnly: false,
@ -500,7 +500,7 @@ func (c *cinderVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
return pv, nil
}
func getVolumeSource(spec *volume.Spec) (*api.CinderVolumeSource, bool, error) {
func getVolumeSource(spec *volume.Spec) (*v1.CinderVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.Cinder != nil {
return spec.Volume.Cinder, spec.Volume.Cinder.ReadOnly, nil
} else if spec.PersistentVolume != nil &&

View File

@ -23,7 +23,7 @@ import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -47,11 +47,11 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/cinder" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{Cinder: &api.CinderVolumeSource{}}}}) {
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{Cinder: &v1.CinderVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{Cinder: &api.CinderVolumeSource{}}}}}) {
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Cinder: &v1.CinderVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
@ -140,10 +140,10 @@ func TestPlugin(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
Cinder: &api.CinderVolumeSource{
VolumeSource: v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: "pd",
FSType: "ext4",
},
@ -199,8 +199,8 @@ func TestPlugin(t *testing.T) {
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
persistentSpec, err := provisioner.Provision()
@ -211,7 +211,7 @@ func TestPlugin(t *testing.T) {
if persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID != "test-volume-name" {
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID)
}
cap := persistentSpec.Spec.Capacity[api.ResourceStorage]
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 1024*1024*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)

View File

@ -24,7 +24,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/volume"
)
@ -140,7 +140,7 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s
return "", 0, err
}
capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// Cinder works with gigabytes, convert to GiB with rounding up
volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))

View File

@ -20,7 +20,7 @@ import (
"fmt"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
ioutil "k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
@ -74,7 +74,7 @@ func (plugin *configMapPlugin) RequiresRemount() bool {
return true
}
func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return &configMapVolumeMounter{
configMapVolume: &configMapVolume{spec.Name(), pod.UID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter(), volume.MetricsNil{}},
source: *spec.Volume.ConfigMap,
@ -87,10 +87,10 @@ func (plugin *configMapPlugin) NewUnmounter(volName string, podUID types.UID) (v
}
func (plugin *configMapPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
configMapVolume := &api.Volume{
configMapVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{},
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
}
return volume.NewSpecFromVolume(configMapVolume), nil
@ -116,8 +116,8 @@ func (sv *configMapVolume) GetPath() string {
type configMapVolumeMounter struct {
*configMapVolume
source api.ConfigMapVolumeSource
pod api.Pod
source v1.ConfigMapVolumeSource
pod v1.Pod
opts *volume.VolumeOptions
}
@ -137,7 +137,7 @@ func wrappedVolumeSpec() volume.Spec {
// This should be on a tmpfs instead of the local disk; the problem is
// charging the memory for the tmpfs to the right cgroup. We should make
// this a tmpfs when we can do the accounting correctly.
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
}
}
@ -209,7 +209,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
return nil
}
func makePayload(mappings []api.KeyToPath, configMap *api.ConfigMap, defaultMode *int32) (map[string]volumeutil.FileProjection, error) {
func makePayload(mappings []v1.KeyToPath, configMap *v1.ConfigMap, defaultMode *int32) (map[string]volumeutil.FileProjection, error) {
if defaultMode == nil {
return nil, fmt.Errorf("No defaultMode used, not even the default value for it")
}
@ -245,7 +245,7 @@ func makePayload(mappings []api.KeyToPath, configMap *api.ConfigMap, defaultMode
return payload, nil
}
func totalBytes(configMap *api.ConfigMap) int {
func totalBytes(configMap *v1.ConfigMap) int {
totalSize := 0
for _, value := range configMap.Data {
totalSize += len(value)
@ -276,9 +276,9 @@ func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
return wrapped.TearDownAt(dir)
}
func getVolumeSource(spec *volume.Spec) (*api.ConfigMapVolumeSource, bool) {
func getVolumeSource(spec *volume.Spec) (*v1.ConfigMapVolumeSource, bool) {
var readOnly bool
var volumeSource *api.ConfigMapVolumeSource
var volumeSource *v1.ConfigMapVolumeSource
if spec.Volume != nil && spec.Volume.ConfigMap != nil {
volumeSource = spec.Volume.ConfigMap

View File

@ -25,9 +25,9 @@ import (
"strings"
"testing"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/empty_dir"
@ -39,15 +39,15 @@ func TestMakePayload(t *testing.T) {
caseMappingMode := int32(0400)
cases := []struct {
name string
mappings []api.KeyToPath
configMap *api.ConfigMap
mappings []v1.KeyToPath
configMap *v1.ConfigMap
mode int32
payload map[string]util.FileProjection
success bool
}{
{
name: "no overrides",
configMap: &api.ConfigMap{
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
@ -62,13 +62,13 @@ func TestMakePayload(t *testing.T) {
},
{
name: "basic 1",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "path/to/foo.txt",
},
},
configMap: &api.ConfigMap{
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
@ -82,13 +82,13 @@ func TestMakePayload(t *testing.T) {
},
{
name: "subdirs",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "path/to/1/2/3/foo.txt",
},
},
configMap: &api.ConfigMap{
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
@ -102,13 +102,13 @@ func TestMakePayload(t *testing.T) {
},
{
name: "subdirs 2",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "path/to/1/2/3/foo.txt",
},
},
configMap: &api.ConfigMap{
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
@ -122,7 +122,7 @@ func TestMakePayload(t *testing.T) {
},
{
name: "subdirs 3",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "path/to/1/2/3/foo.txt",
@ -132,7 +132,7 @@ func TestMakePayload(t *testing.T) {
Path: "another/path/to/the/esteemed/bar.bin",
},
},
configMap: &api.ConfigMap{
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
@ -147,13 +147,13 @@ func TestMakePayload(t *testing.T) {
},
{
name: "non existent key",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "zab",
Path: "path/to/foo.txt",
},
},
configMap: &api.ConfigMap{
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
@ -164,7 +164,7 @@ func TestMakePayload(t *testing.T) {
},
{
name: "mapping with Mode",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "foo.txt",
@ -176,7 +176,7 @@ func TestMakePayload(t *testing.T) {
Mode: &caseMappingMode,
},
},
configMap: &api.ConfigMap{
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
@ -191,7 +191,7 @@ func TestMakePayload(t *testing.T) {
},
{
name: "mapping with defaultMode",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "foo.txt",
@ -201,7 +201,7 @@ func TestMakePayload(t *testing.T) {
Path: "bar.bin",
},
},
configMap: &api.ConfigMap{
configMap: &v1.ConfigMap{
Data: map[string]string{
"foo": "foo",
"bar": "bar",
@ -260,7 +260,7 @@ func TestCanSupport(t *testing.T) {
if plugin.GetPluginName() != configMapPluginName {
t.Errorf("Wrong name: %s", plugin.GetPluginName())
}
if !plugin.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: ""}}}}}) {
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: ""}}}}}) {
t.Errorf("Expected true")
}
if plugin.CanSupport(&volume.Spec{}) {
@ -290,7 +290,7 @@ func TestPlugin(t *testing.T) {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -354,7 +354,7 @@ func TestPluginReboot(t *testing.T) {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -387,12 +387,12 @@ func TestPluginReboot(t *testing.T) {
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
func volumeSpec(volumeName, configMapName string, defaultMode int32) *api.Volume {
return &api.Volume{
func volumeSpec(volumeName, configMapName string, defaultMode int32) *v1.Volume {
return &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{
LocalObjectReference: api.LocalObjectReference{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: configMapName,
},
DefaultMode: &defaultMode,
@ -401,9 +401,9 @@ func volumeSpec(volumeName, configMapName string, defaultMode int32) *api.Volume
}
}
func configMap(namespace, name string) api.ConfigMap {
return api.ConfigMap{
ObjectMeta: api.ObjectMeta{
func configMap(namespace, name string) v1.ConfigMap {
return v1.ConfigMap{
ObjectMeta: v1.ObjectMeta{
Namespace: namespace,
Name: name,
},
@ -415,7 +415,7 @@ func configMap(namespace, name string) api.ConfigMap {
}
}
func doTestConfigMapDataInVolume(volumePath string, configMap api.ConfigMap, t *testing.T) {
func doTestConfigMapDataInVolume(volumePath string, configMap v1.ConfigMap, t *testing.T) {
for key, value := range configMap.Data {
configMapDataHostPath := path.Join(volumePath, key)
if _, err := os.Stat(configMapDataHostPath); err != nil {

View File

@ -22,7 +22,7 @@ import (
"sort"
"strings"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/types"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
@ -51,7 +51,7 @@ var _ volume.VolumePlugin = &downwardAPIPlugin{}
func wrappedVolumeSpec() volume.Spec {
return volume.Spec{
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}},
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory}}},
}
}
@ -82,7 +82,7 @@ func (plugin *downwardAPIPlugin) RequiresRemount() bool {
return true
}
func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
v := &downwardAPIVolume{
volName: spec.Name(),
items: spec.Volume.DownwardAPI.Items,
@ -108,10 +108,10 @@ func (plugin *downwardAPIPlugin) NewUnmounter(volName string, podUID types.UID)
}
func (plugin *downwardAPIPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
downwardAPIVolume := &api.Volume{
downwardAPIVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{},
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{},
},
}
return volume.NewSpecFromVolume(downwardAPIVolume), nil
@ -120,9 +120,9 @@ func (plugin *downwardAPIPlugin) ConstructVolumeSpec(volumeName, mountPath strin
// downwardAPIVolume retrieves downward API data and placing them into the volume on the host.
type downwardAPIVolume struct {
volName string
items []api.DownwardAPIVolumeFile
pod *api.Pod
podUID types.UID // TODO: remove this redundancy as soon NewUnmounter func will have *api.POD and not only types.UID
items []v1.DownwardAPIVolumeFile
pod *v1.Pod
podUID types.UID // TODO: remove this redundancy as soon NewUnmounter func will have *v1.POD and not only types.UID
plugin *downwardAPIPlugin
volume.MetricsNil
}
@ -131,7 +131,7 @@ type downwardAPIVolume struct {
// and dumps it in files
type downwardAPIVolumeMounter struct {
*downwardAPIVolume
source api.DownwardAPIVolumeSource
source v1.DownwardAPIVolumeSource
opts *volume.VolumeOptions
}
@ -286,9 +286,9 @@ func (b *downwardAPIVolumeMounter) getMetaDir() string {
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName)), b.volName)
}
func getVolumeSource(spec *volume.Spec) (*api.DownwardAPIVolumeSource, bool) {
func getVolumeSource(spec *volume.Spec) (*v1.DownwardAPIVolumeSource, bool) {
var readOnly bool
var volumeSource *api.DownwardAPIVolumeSource
var volumeSource *v1.DownwardAPIVolumeSource
if spec.Volume != nil && spec.Volume.DownwardAPI != nil {
volumeSource = spec.Volume.DownwardAPI

View File

@ -23,9 +23,9 @@ import (
"path"
"testing"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/types"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -90,8 +90,8 @@ func TestLabels(t *testing.T) {
"key1": "value1",
"key2": "value2"}
clientset := fake.NewSimpleClientset(&api.Pod{
ObjectMeta: api.ObjectMeta{
clientset := fake.NewSimpleClientset(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
Labels: labels,
@ -104,20 +104,20 @@ func TestLabels(t *testing.T) {
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
defaultMode := int32(0644)
volumeSpec := &api.Volume{
volumeSpec := &v1.Volume{
Name: testVolumeName,
VolumeSource: api.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
DefaultMode: &defaultMode,
Items: []api.DownwardAPIVolumeFile{
{Path: "labels", FieldRef: &api.ObjectFieldSelector{
Items: []v1.DownwardAPIVolumeFile{
{Path: "labels", FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.labels"}}}},
},
}
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: testPodUID, Labels: labels}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
@ -170,19 +170,19 @@ func TestAnnotations(t *testing.T) {
"a2": "value2"}
defaultMode := int32(0644)
volumeSpec := &api.Volume{
volumeSpec := &v1.Volume{
Name: testVolumeName,
VolumeSource: api.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
DefaultMode: &defaultMode,
Items: []api.DownwardAPIVolumeFile{
{Path: "annotations", FieldRef: &api.ObjectFieldSelector{
Items: []v1.DownwardAPIVolumeFile{
{Path: "annotations", FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.annotations"}}}},
},
}
clientset := fake.NewSimpleClientset(&api.Pod{
ObjectMeta: api.ObjectMeta{
clientset := fake.NewSimpleClientset(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
Annotations: annotations,
@ -197,7 +197,7 @@ func TestAnnotations(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Annotations: annotations}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: testPodUID, Annotations: annotations}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -235,19 +235,19 @@ func TestName(t *testing.T) {
)
defaultMode := int32(0644)
volumeSpec := &api.Volume{
volumeSpec := &v1.Volume{
Name: testVolumeName,
VolumeSource: api.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
DefaultMode: &defaultMode,
Items: []api.DownwardAPIVolumeFile{
{Path: "name_file_name", FieldRef: &api.ObjectFieldSelector{
Items: []v1.DownwardAPIVolumeFile{
{Path: "name_file_name", FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.name"}}}},
},
}
clientset := fake.NewSimpleClientset(&api.Pod{
ObjectMeta: api.ObjectMeta{
clientset := fake.NewSimpleClientset(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
},
@ -261,7 +261,7 @@ func TestName(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Name: testName}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: testPodUID, Name: testName}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -300,19 +300,19 @@ func TestNamespace(t *testing.T) {
)
defaultMode := int32(0644)
volumeSpec := &api.Volume{
volumeSpec := &v1.Volume{
Name: testVolumeName,
VolumeSource: api.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
DefaultMode: &defaultMode,
Items: []api.DownwardAPIVolumeFile{
{Path: "namespace_file_name", FieldRef: &api.ObjectFieldSelector{
Items: []v1.DownwardAPIVolumeFile{
{Path: "namespace_file_name", FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.namespace"}}}},
},
}
clientset := fake.NewSimpleClientset(&api.Pod{
ObjectMeta: api.ObjectMeta{
clientset := fake.NewSimpleClientset(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
},
@ -326,7 +326,7 @@ func TestNamespace(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Namespace: testNamespace}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: testPodUID, Namespace: testNamespace}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -367,8 +367,8 @@ func TestWriteTwiceNoUpdate(t *testing.T) {
"key1": "value1",
"key2": "value2"}
clientset := fake.NewSimpleClientset(&api.Pod{
ObjectMeta: api.ObjectMeta{
clientset := fake.NewSimpleClientset(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
Labels: labels,
@ -380,20 +380,20 @@ func TestWriteTwiceNoUpdate(t *testing.T) {
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
defaultMode := int32(0644)
volumeSpec := &api.Volume{
volumeSpec := &v1.Volume{
Name: testVolumeName,
VolumeSource: api.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
DefaultMode: &defaultMode,
Items: []api.DownwardAPIVolumeFile{
{Path: "labels", FieldRef: &api.ObjectFieldSelector{
Items: []v1.DownwardAPIVolumeFile{
{Path: "labels", FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.labels"}}}},
},
}
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: testPodUID, Labels: labels}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
@ -455,8 +455,8 @@ func TestWriteTwiceWithUpdate(t *testing.T) {
"key1": "value1",
"key2": "value2"}
clientset := fake.NewSimpleClientset(&api.Pod{
ObjectMeta: api.ObjectMeta{
clientset := fake.NewSimpleClientset(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
Labels: labels,
@ -468,20 +468,20 @@ func TestWriteTwiceWithUpdate(t *testing.T) {
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
defaultMode := int32(0644)
volumeSpec := &api.Volume{
volumeSpec := &v1.Volume{
Name: testVolumeName,
VolumeSource: api.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
DefaultMode: &defaultMode,
Items: []api.DownwardAPIVolumeFile{
{Path: "labels", FieldRef: &api.ObjectFieldSelector{
Items: []v1.DownwardAPIVolumeFile{
{Path: "labels", FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.labels"}}}},
},
}
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: testPodUID, Labels: labels}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
@ -562,8 +562,8 @@ func TestWriteWithUnixPath(t *testing.T) {
"a1": "value1",
"a2": "value2"}
clientset := fake.NewSimpleClientset(&api.Pod{
ObjectMeta: api.ObjectMeta{
clientset := fake.NewSimpleClientset(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
Labels: labels,
@ -576,22 +576,22 @@ func TestWriteWithUnixPath(t *testing.T) {
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
defaultMode := int32(0644)
volumeSpec := &api.Volume{
volumeSpec := &v1.Volume{
Name: testVolumeName,
VolumeSource: api.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
DefaultMode: &defaultMode,
Items: []api.DownwardAPIVolumeFile{
{Path: "this/is/mine/labels", FieldRef: &api.ObjectFieldSelector{
Items: []v1.DownwardAPIVolumeFile{
{Path: "this/is/mine/labels", FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.labels"}},
{Path: "this/is/yours/annotations", FieldRef: &api.ObjectFieldSelector{
{Path: "this/is/yours/annotations", FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.annotations"}},
}}},
}
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels, Annotations: annotations}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: testPodUID, Labels: labels, Annotations: annotations}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
@ -640,8 +640,8 @@ func TestWriteWithUnixPathBadPath(t *testing.T) {
"key2": "value2",
}
clientset := fake.NewSimpleClientset(&api.Pod{
ObjectMeta: api.ObjectMeta{
clientset := fake.NewSimpleClientset(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
Labels: labels,
@ -658,15 +658,15 @@ func TestWriteWithUnixPathBadPath(t *testing.T) {
}
defaultMode := int32(0644)
volumeSpec := &api.Volume{
volumeSpec := &v1.Volume{
Name: testVolumeName,
VolumeSource: api.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
DefaultMode: &defaultMode,
Items: []api.DownwardAPIVolumeFile{
Items: []v1.DownwardAPIVolumeFile{
{
Path: "this//labels",
FieldRef: &api.ObjectFieldSelector{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.labels",
},
},
@ -675,7 +675,7 @@ func TestWriteWithUnixPathBadPath(t *testing.T) {
},
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: testPodUID, Labels: labels}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
@ -710,19 +710,19 @@ func TestDefaultMode(t *testing.T) {
)
defaultMode := int32(0644)
volumeSpec := &api.Volume{
volumeSpec := &v1.Volume{
Name: testVolumeName,
VolumeSource: api.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
DefaultMode: &defaultMode,
Items: []api.DownwardAPIVolumeFile{
{Path: "name_file_name", FieldRef: &api.ObjectFieldSelector{
Items: []v1.DownwardAPIVolumeFile{
{Path: "name_file_name", FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.name"}}}},
},
}
clientset := fake.NewSimpleClientset(&api.Pod{
ObjectMeta: api.ObjectMeta{
clientset := fake.NewSimpleClientset(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
},
@ -736,7 +736,7 @@ func TestDefaultMode(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Name: testName}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: testPodUID, Name: testName}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -776,14 +776,14 @@ func TestItemMode(t *testing.T) {
defaultMode := int32(0644)
itemMode := int32(0400)
volumeSpec := &api.Volume{
volumeSpec := &v1.Volume{
Name: testVolumeName,
VolumeSource: api.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
DefaultMode: &defaultMode,
Items: []api.DownwardAPIVolumeFile{
Items: []v1.DownwardAPIVolumeFile{
{
Path: "name_file_name", FieldRef: &api.ObjectFieldSelector{FieldPath: "metadata.name"},
Path: "name_file_name", FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.name"},
Mode: &itemMode,
},
},
@ -791,8 +791,8 @@ func TestItemMode(t *testing.T) {
},
}
clientset := fake.NewSimpleClientset(&api.Pod{
ObjectMeta: api.ObjectMeta{
clientset := fake.NewSimpleClientset(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
},
@ -806,7 +806,7 @@ func TestItemMode(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Name: testName}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: testPodUID, Name: testName}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)

View File

@ -22,7 +22,7 @@ import (
"path"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
@ -89,12 +89,12 @@ func (plugin *emptyDirPlugin) RequiresRemount() bool {
return false
}
func (plugin *emptyDirPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *emptyDirPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(), &realMountDetector{plugin.host.GetMounter()}, opts)
}
func (plugin *emptyDirPlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface, mountDetector mountDetector, opts volume.VolumeOptions) (volume.Mounter, error) {
medium := api.StorageMediumDefault
func (plugin *emptyDirPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, mounter mount.Interface, mountDetector mountDetector, opts volume.VolumeOptions) (volume.Mounter, error) {
medium := v1.StorageMediumDefault
if spec.Volume.EmptyDir != nil { // Support a non-specified source as EmptyDir.
medium = spec.Volume.EmptyDir.Medium
}
@ -116,9 +116,9 @@ func (plugin *emptyDirPlugin) NewUnmounter(volName string, podUID types.UID) (vo
func (plugin *emptyDirPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface, mountDetector mountDetector) (volume.Unmounter, error) {
ed := &emptyDir{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}},
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: podUID}},
volName: volName,
medium: api.StorageMediumDefault, // might be changed later
medium: v1.StorageMediumDefault, // might be changed later
mounter: mounter,
mountDetector: mountDetector,
plugin: plugin,
@ -128,10 +128,10 @@ func (plugin *emptyDirPlugin) newUnmounterInternal(volName string, podUID types.
}
func (plugin *emptyDirPlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
emptyDirVolume := &api.Volume{
emptyDirVolume := &v1.Volume{
Name: volName,
VolumeSource: api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
}
return volume.NewSpecFromVolume(emptyDirVolume), nil
@ -157,9 +157,9 @@ const (
// EmptyDir volumes are temporary directories exposed to the pod.
// These do not persist beyond the lifetime of a pod.
type emptyDir struct {
pod *api.Pod
pod *v1.Pod
volName string
medium api.StorageMedium
medium v1.StorageMedium
mounter mount.Interface
mountDetector mountDetector
plugin *emptyDirPlugin
@ -200,17 +200,17 @@ func (ed *emptyDir) SetUpAt(dir string, fsGroup *int64) error {
// medium is memory, and a mountpoint is present, then the volume is
// ready.
if volumeutil.IsReady(ed.getMetaDir()) {
if ed.medium == api.StorageMediumMemory && !notMnt {
if ed.medium == v1.StorageMediumMemory && !notMnt {
return nil
} else if ed.medium == api.StorageMediumDefault {
} else if ed.medium == v1.StorageMediumDefault {
return nil
}
}
switch ed.medium {
case api.StorageMediumDefault:
case v1.StorageMediumDefault:
err = ed.setupDir(dir)
case api.StorageMediumMemory:
case v1.StorageMediumMemory:
err = ed.setupTmpfs(dir)
default:
err = fmt.Errorf("unknown storage medium %q", ed.medium)
@ -305,7 +305,7 @@ func (ed *emptyDir) TearDownAt(dir string) error {
return err
}
if isMnt && medium == mediumMemory {
ed.medium = api.StorageMediumMemory
ed.medium = v1.StorageMediumMemory
return ed.teardownTmpfs(dir)
}
// assume StorageMediumDefault
@ -341,9 +341,9 @@ func (ed *emptyDir) getMetaDir() string {
return path.Join(ed.plugin.host.GetPodPluginDir(ed.pod.UID, strings.EscapeQualifiedNameForDisk(emptyDirPluginName)), ed.volName)
}
func getVolumeSource(spec *volume.Spec) (*api.EmptyDirVolumeSource, bool) {
func getVolumeSource(spec *volume.Spec) (*v1.EmptyDirVolumeSource, bool) {
var readOnly bool
var volumeSource *api.EmptyDirVolumeSource
var volumeSource *v1.EmptyDirVolumeSource
if spec.Volume != nil && spec.Volume.EmptyDir != nil {
volumeSource = spec.Volume.EmptyDir

View File

@ -23,7 +23,7 @@ import (
"path"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -55,10 +55,10 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/empty-dir" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}) {
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
@ -74,13 +74,13 @@ func (fake *fakeMountDetector) GetMountMedium(path string) (storageMedium, bool,
func TestPluginEmptyRootContext(t *testing.T) {
doTestPlugin(t, pluginTestConfig{
medium: api.StorageMediumDefault,
medium: v1.StorageMediumDefault,
expectedSetupMounts: 0,
expectedTeardownMounts: 0})
}
type pluginTestConfig struct {
medium api.StorageMedium
medium v1.StorageMedium
idempotent bool
expectedSetupMounts int
shouldBeMountedBeforeTeardown bool
@ -101,14 +101,14 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
plug = makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath)
volumeName = "test-volume"
spec = &api.Volume{
spec = &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: config.medium}},
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: config.medium}},
}
physicalMounter = mount.FakeMounter{}
mountDetector = fakeMountDetector{}
pod = &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod = &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
)
if config.idempotent {
@ -171,7 +171,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
// Make an unmounter for the volume
teardownMedium := mediumUnknown
if config.medium == api.StorageMediumMemory {
if config.medium == v1.StorageMediumMemory {
teardownMedium = mediumMemory
}
unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown}
@ -211,10 +211,10 @@ func TestPluginBackCompat(t *testing.T) {
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath)
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -240,10 +240,10 @@ func TestMetrics(t *testing.T) {
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir)
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)

View File

@ -21,7 +21,7 @@ import (
"strconv"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
@ -77,14 +77,14 @@ func (plugin *fcPlugin) RequiresRemount() bool {
return false
}
func (plugin *fcPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
func (plugin *fcPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
}
}
func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &FCUtil{}, plugin.host.GetMounter())
}
@ -142,10 +142,10 @@ func (plugin *fcPlugin) execCommand(command string, args []string) ([]byte, erro
}
func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
fcVolume := &api.Volume{
fcVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
FC: &api.FCVolumeSource{},
VolumeSource: v1.VolumeSource{
FC: &v1.FCVolumeSource{},
},
}
return volume.NewSpecFromVolume(fcVolume), nil
@ -225,7 +225,7 @@ func (c *fcDiskUnmounter) TearDownAt(dir string) error {
return diskTearDown(c.manager, *c, dir, c.mounter)
}
func getVolumeSource(spec *volume.Spec) (*api.FCVolumeSource, bool, error) {
func getVolumeSource(spec *volume.Spec) (*v1.FCVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.FC != nil {
return spec.Volume.FC, spec.Volume.FC.ReadOnly, nil
} else if spec.PersistentVolume != nil &&

View File

@ -21,8 +21,8 @@ import (
"os"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -47,7 +47,7 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/fc" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
@ -66,12 +66,12 @@ func TestGetAccessModes(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", api.ReadWriteOnce, api.ReadOnlyMany)
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) || !contains(plug.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
}
}
func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
@ -200,10 +200,10 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
func TestPluginVolume(t *testing.T) {
lun := int32(0)
vol := &api.Volume{
vol := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
FC: &api.FCVolumeSource{
VolumeSource: v1.VolumeSource{
FC: &v1.FCVolumeSource{
TargetWWNs: []string{"some_wwn"},
FSType: "ext4",
Lun: &lun,
@ -215,13 +215,13 @@ func TestPluginVolume(t *testing.T) {
func TestPluginPersistentVolume(t *testing.T) {
lun := int32(0)
vol := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
vol := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "vol1",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
FC: &api.FCVolumeSource{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &v1.FCVolumeSource{
TargetWWNs: []string{"some_wwn"},
FSType: "ext4",
Lun: &lun,
@ -240,34 +240,34 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
defer os.RemoveAll(tmpDir)
lun := int32(0)
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
FC: &api.FCVolumeSource{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &v1.FCVolumeSource{
TargetWWNs: []string{"some_wwn"},
FSType: "ext4",
Lun: &lun,
},
},
ClaimRef: &api.ObjectReference{
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
@ -279,7 +279,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {

View File

@ -25,7 +25,7 @@ import (
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
@ -97,15 +97,15 @@ func (plugin *flexVolumePlugin) RequiresRemount() bool {
}
// GetAccessModes gets the allowed access modes for this plugin.
func (plugin *flexVolumePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
func (plugin *flexVolumePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
}
}
// NewMounter is the mounter routine to build the volume.
func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
fv, _, err := getVolumeSource(spec)
if err != nil {
return nil, err
@ -131,7 +131,7 @@ func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ vo
}
// newMounterInternal is the internal mounter routine to build the volume.
func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, manager flexVolumeManager, mounter mount.Interface, runner exec.Interface, secrets map[string]string) (volume.Mounter, error) {
func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, manager flexVolumeManager, mounter mount.Interface, runner exec.Interface, secrets map[string]string) (volume.Mounter, error) {
source, _, err := getVolumeSource(spec)
if err != nil {
return nil, err
@ -180,10 +180,10 @@ func (plugin *flexVolumePlugin) newUnmounterInternal(volName string, podUID type
}
func (plugin *flexVolumePlugin) ConstructVolumeSpec(volumeName, sourceName string) (*volume.Spec, error) {
flexVolume := &api.Volume{
flexVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
FlexVolume: &api.FlexVolumeSource{
VolumeSource: v1.VolumeSource{
FlexVolume: &v1.FlexVolumeSource{
Driver: sourceName,
},
},
@ -420,7 +420,7 @@ func (f *flexVolumeUnmounter) TearDownAt(dir string) error {
return nil
}
func getVolumeSource(spec *volume.Spec) (*api.FlexVolumeSource, bool, error) {
func getVolumeSource(spec *volume.Spec) (*v1.FlexVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.FlexVolume != nil {
return spec.Volume.FlexVolume, spec.Volume.FlexVolume.ReadOnly, nil
} else if spec.PersistentVolume != nil &&

View File

@ -25,7 +25,7 @@ import (
"testing"
"text/template"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
@ -190,13 +190,13 @@ func TestCanSupport(t *testing.T) {
if plugin.GetPluginName() != "kubernetes.io/fakeAttacher" {
t.Errorf("Wrong name: %s", plugin.GetPluginName())
}
if !plugin.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{FlexVolume: &api.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}) {
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}) {
t.Errorf("Expected true")
}
if !plugin.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{FlexVolume: &api.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}}) {
if !plugin.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}}) {
t.Errorf("Expected true")
}
if plugin.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
if plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
@ -216,12 +216,12 @@ func TestGetAccessModes(t *testing.T) {
if err != nil {
t.Fatalf("Can't find the plugin by name")
}
if !contains(plugin.GetAccessModes(), api.ReadWriteOnce) || !contains(plugin.GetAccessModes(), api.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", api.ReadWriteOnce, api.ReadOnlyMany)
if !contains(plugin.GetAccessModes(), v1.ReadWriteOnce) || !contains(plugin.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
}
}
func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
@ -239,7 +239,7 @@ func doTestPluginAttachDetach(t *testing.T, spec *volume.Spec, tmpDir string) {
t.Errorf("Can't find the plugin by name")
}
fake := &mount.FakeMounter{}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
secretMap := make(map[string]string)
secretMap["flexsecret"] = base64.StdEncoding.EncodeToString([]byte("foo"))
mounter, err := plugin.(*flexVolumePlugin).newMounterInternal(spec, pod, &flexVolumeUtil{}, fake, exec.New(), secretMap)
@ -320,7 +320,7 @@ func doTestPluginMountUnmount(t *testing.T, spec *volume.Spec, tmpDir string) {
t.Errorf("Can't find the plugin by name")
}
fake := &mount.FakeMounter{}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
// Use nil secret to test for nil secret case.
mounter, err := plugin.(*flexVolumePlugin).newMounterInternal(spec, pod, &flexVolumeUtil{}, fake, exec.New(), nil)
volumePath := mounter.GetPath()
@ -374,9 +374,9 @@ func TestPluginVolumeAttacher(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
vol := &api.Volume{
vol := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{FlexVolume: &api.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher", ReadOnly: false}},
VolumeSource: v1.VolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher", ReadOnly: false}},
}
doTestPluginAttachDetach(t, volume.NewSpecFromVolume(vol), tmpDir)
}
@ -388,9 +388,9 @@ func TestPluginVolumeMounter(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
vol := &api.Volume{
vol := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{FlexVolume: &api.FlexVolumeSource{Driver: "kubernetes.io/fakeMounter", ReadOnly: false}},
VolumeSource: v1.VolumeSource{FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeMounter", ReadOnly: false}},
}
doTestPluginMountUnmount(t, volume.NewSpecFromVolume(vol), tmpDir)
}
@ -402,13 +402,13 @@ func TestPluginPersistentVolume(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
vol := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
vol := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "vol1",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
FlexVolume: &api.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher", ReadOnly: false},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FlexVolume: &v1.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher", ReadOnly: false},
},
},
}

View File

@ -23,7 +23,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/env"
"k8s.io/kubernetes/pkg/util/mount"
@ -49,7 +49,7 @@ type flockerVolume struct {
datasetName string
// dataset uuid
datasetUUID string
//pod *api.Pod
//pod *v1.Pod
flockerClient flockerApi.Clientable
manager volumeManager
plugin *flockerPlugin
@ -111,13 +111,13 @@ func (p *flockerPlugin) RequiresRemount() bool {
return false
}
func (plugin *flockerPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
func (plugin *flockerPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
func (p *flockerPlugin) getFlockerVolumeSource(spec *volume.Spec) (*api.FlockerVolumeSource, bool) {
func (p *flockerPlugin) getFlockerVolumeSource(spec *volume.Spec) (*v1.FlockerVolumeSource, bool) {
// AFAIK this will always be r/w, but perhaps for the future it will be needed
readOnly := false
@ -127,7 +127,7 @@ func (p *flockerPlugin) getFlockerVolumeSource(spec *volume.Spec) (*api.FlockerV
return spec.PersistentVolume.Spec.Flocker, readOnly
}
func (plugin *flockerPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *flockerPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &FlockerUtil{}, plugin.host.GetMounter())
}
@ -172,10 +172,10 @@ func (p *flockerPlugin) newUnmounterInternal(volName string, podUID types.UID, m
}
func (p *flockerPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
flockerVolume := &api.Volume{
flockerVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
Flocker: &api.FlockerVolumeSource{
VolumeSource: v1.VolumeSource{
Flocker: &v1.FlockerVolumeSource{
DatasetName: volumeName,
},
},
@ -392,7 +392,7 @@ func (b *flockerVolumeMounter) updateDatasetPrimary(datasetUUID string, primaryU
}
func getVolumeSource(spec *volume.Spec) (*api.FlockerVolumeSource, bool, error) {
func getVolumeSource(spec *volume.Spec) (*v1.FlockerVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.Flocker != nil {
return spec.Volume.Flocker, spec.ReadOnly, nil
} else if spec.PersistentVolume != nil &&

View File

@ -21,7 +21,7 @@ import (
"os"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -144,10 +144,10 @@ func TestPlugin(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
Flocker: &api.FlockerVolumeSource{
VolumeSource: v1.VolumeSource{
Flocker: &v1.FlockerVolumeSource{
DatasetUUID: "uuid1",
},
},
@ -181,24 +181,24 @@ func TestCanSupport(t *testing.T) {
specs := map[*volume.Spec]bool{
&volume.Spec{
Volume: &api.Volume{
VolumeSource: api.VolumeSource{
Flocker: &api.FlockerVolumeSource{},
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
Flocker: &v1.FlockerVolumeSource{},
},
},
}: true,
&volume.Spec{
PersistentVolume: &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
Flocker: &api.FlockerVolumeSource{},
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Flocker: &v1.FlockerVolumeSource{},
},
},
},
}: true,
&volume.Spec{
Volume: &api.Volume{
VolumeSource: api.VolumeSource{},
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{},
},
}: false,
}
@ -215,9 +215,9 @@ func TestGetFlockerVolumeSource(t *testing.T) {
p := flockerPlugin{}
spec := &volume.Spec{
Volume: &api.Volume{
VolumeSource: api.VolumeSource{
Flocker: &api.FlockerVolumeSource{},
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
Flocker: &v1.FlockerVolumeSource{},
},
},
}
@ -226,10 +226,10 @@ func TestGetFlockerVolumeSource(t *testing.T) {
assert.Equal(spec.Volume.Flocker, vs)
spec = &volume.Spec{
PersistentVolume: &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
Flocker: &api.FlockerVolumeSource{},
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Flocker: &v1.FlockerVolumeSource{},
},
},
},
@ -247,16 +247,16 @@ func TestNewMounterDatasetName(t *testing.T) {
assert.NoError(err)
spec := &volume.Spec{
Volume: &api.Volume{
VolumeSource: api.VolumeSource{
Flocker: &api.FlockerVolumeSource{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
Flocker: &v1.FlockerVolumeSource{
DatasetName: "something",
},
},
},
}
_, err = plug.NewMounter(spec, &api.Pod{}, volume.VolumeOptions{})
_, err = plug.NewMounter(spec, &v1.Pod{}, volume.VolumeOptions{})
assert.NoError(err)
}
@ -268,16 +268,16 @@ func TestNewMounterDatasetUUID(t *testing.T) {
assert.NoError(err)
spec := &volume.Spec{
Volume: &api.Volume{
VolumeSource: api.VolumeSource{
Flocker: &api.FlockerVolumeSource{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
Flocker: &v1.FlockerVolumeSource{
DatasetUUID: "uuid1",
},
},
},
}
mounter, err := plug.NewMounter(spec, &api.Pod{}, volume.VolumeOptions{})
mounter, err := plug.NewMounter(spec, &v1.Pod{}, volume.VolumeOptions{})
assert.NoError(err)
assert.NotNil(mounter, "got a nil mounter")
@ -349,7 +349,7 @@ func TestSetUpAtInternal(t *testing.T) {
plug, err := plugMgr.FindPluginByName(flockerPluginName)
assert.NoError(err)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
b := flockerVolumeMounter{flockerVolume: &flockerVolume{pod: pod, plugin: plug.(*flockerPlugin)}}
b.client = newMockFlockerClient("dataset-id", "primary-uid", mockPath)

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/rand"
"k8s.io/kubernetes/pkg/volume"
@ -71,7 +71,7 @@ func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID
node := nodes[rand.Intn(len(nodes))]
glog.V(2).Infof("selected flocker node with UUID '%s' to provision dataset", node.UUID)
capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
volumeSizeGB = int(volume.RoundUpSize(requestBytes, 1024*1024*1024))

View File

@ -20,7 +20,7 @@ import (
"fmt"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -31,10 +31,10 @@ func TestFlockerUtil_CreateVolume(t *testing.T) {
assert := assert.New(t)
// test CreateVolume happy path
pvc := volumetest.CreateTestPVC("3Gi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
pvc := volumetest.CreateTestPVC("3Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
options := volume.VolumeOptions{
PVC: pvc,
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
fakeFlockerClient := newFakeFlockerClient()

View File

@ -19,8 +19,8 @@ package flocker
import (
"fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/volume"
)
@ -52,7 +52,7 @@ type flockerVolumeProvisioner struct {
var _ volume.Provisioner = &flockerVolumeProvisioner{}
func (c *flockerVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
if len(c.options.Parameters) > 0 {
return nil, fmt.Errorf("Provisioning failed: Specified at least one unsupported parameter")
@ -67,22 +67,22 @@ func (c *flockerVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
return nil, err
}
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "flocker-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
Flocker: &api.FlockerVolumeSource{
PersistentVolumeSource: v1.PersistentVolumeSource{
Flocker: &v1.FlockerVolumeSource{
DatasetUUID: datasetUUID,
},
},

View File

@ -20,8 +20,8 @@ import (
"fmt"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -47,10 +47,10 @@ func newTestableProvisioner(assert *assert.Assertions, options volume.VolumeOpti
func TestProvision(t *testing.T) {
assert := assert.New(t)
pvc := volumetest.CreateTestPVC("3Gi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
pvc := volumetest.CreateTestPVC("3Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
options := volume.VolumeOptions{
PVC: pvc,
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner := newTestableProvisioner(assert, options)
@ -58,7 +58,7 @@ func TestProvision(t *testing.T) {
persistentSpec, err := provisioner.Provision()
assert.NoError(err, "Provision() failed: ", err)
cap := persistentSpec.Spec.Capacity[api.ResourceStorage]
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
assert.Equal(int64(3*1024*1024*1024), cap.Value())
@ -75,7 +75,7 @@ func TestProvision(t *testing.T) {
// parameters are not supported
options = volume.VolumeOptions{
PVC: pvc,
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
Parameters: map[string]string{
"not-supported-params": "test123",
},
@ -89,7 +89,7 @@ func TestProvision(t *testing.T) {
pvc.Spec.Selector = &unversioned.LabelSelector{MatchLabels: map[string]string{"key": "value"}}
options = volume.VolumeOptions{
PVC: pvc,
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner = newTestableProvisioner(assert, options)

View File

@ -21,7 +21,7 @@ import (
"fmt"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -224,9 +224,9 @@ func newDetacher(testcase *testcase) *gcePersistentDiskDetacher {
func createVolSpec(name string, readOnly bool) *volume.Spec {
return &volume.Spec{
Volume: &api.Volume{
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: name,
ReadOnly: readOnly,
},
@ -237,10 +237,10 @@ func createVolSpec(name string, readOnly bool) *volume.Spec {
func createPVSpec(name string, readOnly bool) *volume.Spec {
return &volume.Spec{
PersistentVolume: &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: name,
ReadOnly: readOnly,
},

View File

@ -23,8 +23,8 @@ import (
"strconv"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
@ -80,20 +80,20 @@ func (plugin *gcePersistentDiskPlugin) RequiresRemount() bool {
return false
}
func (plugin *gcePersistentDiskPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
func (plugin *gcePersistentDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
}
}
func (plugin *gcePersistentDiskPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *gcePersistentDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &GCEDiskUtil{}, plugin.host.GetMounter())
}
func getVolumeSource(
spec *volume.Spec) (*api.GCEPersistentDiskVolumeSource, bool, error) {
spec *volume.Spec) (*v1.GCEPersistentDiskVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
return spec.Volume.GCEPersistentDisk, spec.Volume.GCEPersistentDisk.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
@ -186,10 +186,10 @@ func (plugin *gcePersistentDiskPlugin) ConstructVolumeSpec(volumeName, mountPath
if err != nil {
return nil, err
}
gceVolume := &api.Volume{
gceVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: sourceName,
},
},
@ -381,28 +381,28 @@ type gcePersistentDiskProvisioner struct {
var _ volume.Provisioner = &gcePersistentDiskProvisioner{}
func (c *gcePersistentDiskProvisioner) Provision() (*api.PersistentVolume, error) {
func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
volumeID, sizeGB, labels, err := c.manager.CreateVolume(c)
if err != nil {
return nil, err
}
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "gce-pd-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PersistentVolumeSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: volumeID,
Partition: 0,
ReadOnly: false,

View File

@ -22,8 +22,8 @@ import (
"path"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -47,10 +47,10 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/gce-pd" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}}}}) {
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}}}}}) {
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
@ -68,12 +68,12 @@ func TestGetAccessModes(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", api.ReadWriteOnce, api.ReadOnlyMany)
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) || !contains(plug.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
}
}
func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
@ -111,10 +111,10 @@ func TestPlugin(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "pd",
FSType: "ext4",
},
@ -174,8 +174,8 @@ func TestPlugin(t *testing.T) {
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{})
persistentSpec, err := provisioner.Provision()
@ -186,7 +186,7 @@ func TestPlugin(t *testing.T) {
if persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName != "test-gce-volume-name" {
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName)
}
cap := persistentSpec.Spec.Capacity[api.ResourceStorage]
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 100*1024*1024*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)
@ -208,30 +208,30 @@ func TestPlugin(t *testing.T) {
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
},
ClaimRef: &api.ObjectReference{
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
@ -248,7 +248,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {

View File

@ -24,7 +24,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/util/exec"
@ -78,7 +78,7 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
}
name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
// GCE works with gigabytes, convert to GiB with rounding up
requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)

View File

@ -22,7 +22,7 @@ import (
"path"
"strings"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
@ -43,7 +43,7 @@ var _ volume.VolumePlugin = &gitRepoPlugin{}
func wrappedVolumeSpec() volume.Spec {
return volume.Spec{
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
}
}
@ -81,7 +81,7 @@ func (plugin *gitRepoPlugin) RequiresRemount() bool {
return false
}
func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return &gitRepoVolumeMounter{
gitRepoVolume: &gitRepoVolume{
volName: spec.Name(),
@ -108,10 +108,10 @@ func (plugin *gitRepoPlugin) NewUnmounter(volName string, podUID types.UID) (vol
}
func (plugin *gitRepoPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
gitVolume := &api.Volume{
gitVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
GitRepo: &api.GitRepoVolumeSource{},
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{},
},
}
return volume.NewSpecFromVolume(gitVolume), nil
@ -137,7 +137,7 @@ func (gr *gitRepoVolume) GetPath() string {
type gitRepoVolumeMounter struct {
*gitRepoVolume
pod api.Pod
pod v1.Pod
source string
revision string
target string
@ -263,9 +263,9 @@ func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
return wrapped.TearDownAt(dir)
}
func getVolumeSource(spec *volume.Spec) (*api.GitRepoVolumeSource, bool) {
func getVolumeSource(spec *volume.Spec) (*v1.GitRepoVolumeSource, bool) {
var readOnly bool
var volumeSource *api.GitRepoVolumeSource
var volumeSource *v1.GitRepoVolumeSource
if spec.Volume != nil && spec.Volume.GitRepo != nil {
volumeSource = spec.Volume.GitRepo

View File

@ -25,7 +25,7 @@ import (
"strings"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/volume"
@ -54,7 +54,7 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/git-repo" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{}}}}) {
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{GitRepo: &v1.GitRepoVolumeSource{}}}}) {
t.Errorf("Expected true")
}
}
@ -73,16 +73,16 @@ func TestPlugin(t *testing.T) {
scenarios := []struct {
name string
vol *api.Volume
vol *v1.Volume
expecteds []expectedCommand
isExpectedFailure bool
}{
{
name: "target-dir",
vol: &api.Volume{
vol: &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
GitRepo: &api.GitRepoVolumeSource{
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
Revision: revision,
Directory: "target_dir",
@ -107,10 +107,10 @@ func TestPlugin(t *testing.T) {
},
{
name: "target-dir-no-revision",
vol: &api.Volume{
vol: &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
GitRepo: &api.GitRepoVolumeSource{
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
Directory: "target_dir",
},
@ -126,10 +126,10 @@ func TestPlugin(t *testing.T) {
},
{
name: "only-git-clone",
vol: &api.Volume{
vol: &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
GitRepo: &api.GitRepoVolumeSource{
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
},
},
@ -144,10 +144,10 @@ func TestPlugin(t *testing.T) {
},
{
name: "no-target-dir",
vol: &api.Volume{
vol: &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
GitRepo: &api.GitRepoVolumeSource{
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
Revision: revision,
Directory: "",
@ -172,10 +172,10 @@ func TestPlugin(t *testing.T) {
},
{
name: "current-dir",
vol: &api.Volume{
vol: &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
GitRepo: &api.GitRepoVolumeSource{
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
Revision: revision,
Directory: ".",
@ -214,7 +214,7 @@ func TestPlugin(t *testing.T) {
func doTestPlugin(scenario struct {
name string
vol *api.Volume
vol *v1.Volume
expecteds []expectedCommand
isExpectedFailure bool
}, t *testing.T) []error {
@ -231,7 +231,7 @@ func doTestPlugin(scenario struct {
fmt.Errorf("Can't find the plugin by name"))
return allErrs
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(scenario.vol), pod, volume.VolumeOptions{})
if err != nil {
@ -311,7 +311,7 @@ func doTestPlugin(scenario struct {
func doTestSetUp(scenario struct {
name string
vol *api.Volume
vol *v1.Volume
expecteds []expectedCommand
isExpectedFailure bool
}, mounter volume.Mounter) []error {

View File

@ -20,22 +20,22 @@ import (
"fmt"
"os"
"path"
"runtime"
dstrings "strings"
"github.com/golang/glog"
gcli "github.com/heketi/heketi/client/api/go-client"
gapi "github.com/heketi/heketi/pkg/glusterfs/api"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
"runtime"
)
// This is the primary entrypoint for volume plugins.
@ -104,15 +104,15 @@ func (plugin *glusterfsPlugin) RequiresRemount() bool {
return false
}
func (plugin *glusterfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
api.ReadWriteMany,
func (plugin *glusterfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
}
}
func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
source, _ := plugin.getGlusterVolumeSource(spec)
ep_name := source.EndpointsName
// PVC/POD is in same ns.
@ -126,7 +126,7 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ vol
return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(), exec.New())
}
func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*api.GlusterfsVolumeSource, bool) {
func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*v1.GlusterfsVolumeSource, bool) {
// Glusterfs volumes used directly in a pod have a ReadOnly flag set by the pod author.
// Glusterfs volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
@ -136,7 +136,7 @@ func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*api.G
}
}
func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *api.Endpoints, pod *api.Pod, mounter mount.Interface, exe exec.Interface) (volume.Mounter, error) {
func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endpoints, pod *v1.Pod, mounter mount.Interface, exe exec.Interface) (volume.Mounter, error) {
source, readOnly := plugin.getGlusterVolumeSource(spec)
return &glusterfsMounter{
glusterfs: &glusterfs{
@ -159,7 +159,7 @@ func (plugin *glusterfsPlugin) newUnmounterInternal(volName string, podUID types
return &glusterfsUnmounter{&glusterfs{
volName: volName,
mounter: mounter,
pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}},
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: podUID}},
plugin: plugin,
}}, nil
}
@ -170,10 +170,10 @@ func (plugin *glusterfsPlugin) execCommand(command string, args []string) ([]byt
}
func (plugin *glusterfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
glusterfsVolume := &api.Volume{
glusterfsVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
Glusterfs: &api.GlusterfsVolumeSource{
VolumeSource: v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: volumeName,
Path: volumeName,
},
@ -185,7 +185,7 @@ func (plugin *glusterfsPlugin) ConstructVolumeSpec(volumeName, mountPath string)
// Glusterfs volumes represent a bare host file or directory mount of an Glusterfs export.
type glusterfs struct {
volName string
pod *api.Pod
pod *v1.Pod
mounter mount.Interface
plugin *glusterfsPlugin
volume.MetricsNil
@ -193,7 +193,7 @@ type glusterfs struct {
type glusterfsMounter struct {
*glusterfs
hosts *api.Endpoints
hosts *v1.Endpoints
path string
readOnly bool
exe exec.Interface
@ -352,7 +352,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
}
func getVolumeSource(
spec *volume.Spec) (*api.GlusterfsVolumeSource, bool, error) {
spec *volume.Spec) (*v1.GlusterfsVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
return spec.Volume.Glusterfs, spec.Volume.Glusterfs.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
@ -416,7 +416,7 @@ func (plugin *glusterfsPlugin) newDeleterInternal(spec *volume.Spec) (volume.Del
type glusterfsVolumeDeleter struct {
*glusterfsMounter
provisioningConfig
spec *api.PersistentVolume
spec *v1.PersistentVolume
}
func (d *glusterfsVolumeDeleter) GetPath() string {
@ -479,7 +479,7 @@ func (d *glusterfsVolumeDeleter) Delete() error {
return nil
}
func (r *glusterfsVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
func (r *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
var err error
if r.options.PVC.Spec.Selector != nil {
glog.V(4).Infof("glusterfs: not able to parse your claim Selector")
@ -499,21 +499,21 @@ func (r *glusterfsVolumeProvisioner) Provision() (*api.PersistentVolume, error)
glog.Errorf("glusterfs: create volume err: %v.", err)
return nil, fmt.Errorf("glusterfs: create volume err: %v.", err)
}
pv := new(api.PersistentVolume)
pv := new(v1.PersistentVolume)
pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs
pv.Spec.PersistentVolumeReclaimPolicy = r.options.PersistentVolumeReclaimPolicy
pv.Spec.AccessModes = r.options.PVC.Spec.AccessModes
if len(pv.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = r.plugin.GetAccessModes()
}
pv.Spec.Capacity = api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
pv.Spec.Capacity = v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
}
return pv, nil
}
func (p *glusterfsVolumeProvisioner) CreateVolume() (r *api.GlusterfsVolumeSource, size int, err error) {
capacity := p.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
func (p *glusterfsVolumeProvisioner) CreateVolume() (r *v1.GlusterfsVolumeSource, size int, err error) {
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
glog.V(2).Infof("glusterfs: create volume of size: %d bytes and configuration %+v", volSizeBytes, p.provisioningConfig)
@ -573,30 +573,30 @@ func (p *glusterfsVolumeProvisioner) CreateVolume() (r *api.GlusterfsVolumeSourc
return nil, 0, fmt.Errorf("failed to create endpoint/service %v", err)
}
glog.V(3).Infof("glusterfs: dynamic ep %v and svc : %v ", endpoint, service)
return &api.GlusterfsVolumeSource{
return &v1.GlusterfsVolumeSource{
EndpointsName: endpoint.Name,
Path: volume.Name,
ReadOnly: false,
}, sz, nil
}
func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epServiceName string, hostips []string, pvcname string) (endpoint *api.Endpoints, service *api.Service, err error) {
func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epServiceName string, hostips []string, pvcname string) (endpoint *v1.Endpoints, service *v1.Service, err error) {
addrlist := make([]api.EndpointAddress, len(hostips))
addrlist := make([]v1.EndpointAddress, len(hostips))
for i, v := range hostips {
addrlist[i].IP = v
}
endpoint = &api.Endpoints{
ObjectMeta: api.ObjectMeta{
endpoint = &v1.Endpoints{
ObjectMeta: v1.ObjectMeta{
Namespace: namespace,
Name: epServiceName,
Labels: map[string]string{
"gluster.kubernetes.io/provisioned-for-pvc": pvcname,
},
},
Subsets: []api.EndpointSubset{{
Subsets: []v1.EndpointSubset{{
Addresses: addrlist,
Ports: []api.EndpointPort{{Port: 1, Protocol: "TCP"}},
Ports: []v1.EndpointPort{{Port: 1, Protocol: "TCP"}},
}},
}
_, err = p.plugin.host.GetKubeClient().Core().Endpoints(namespace).Create(endpoint)
@ -608,16 +608,16 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS
glog.Errorf("glusterfs: failed to create endpoint %v", err)
return nil, nil, fmt.Errorf("error creating endpoint %v", err)
}
service = &api.Service{
ObjectMeta: api.ObjectMeta{
service = &v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: epServiceName,
Namespace: namespace,
Labels: map[string]string{
"gluster.kubernetes.io/provisioned-for-pvc": pvcname,
},
},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{Protocol: "TCP", Port: 1}}}}
_, err = p.plugin.host.GetKubeClient().Core().Services(namespace).Create(service)
if err != nil && errors.IsAlreadyExists(err) {

View File

@ -22,8 +22,8 @@ import (
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
@ -50,10 +50,10 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/glusterfs" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{}}}}) {
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
@ -72,12 +72,12 @@ func TestGetAccessModes(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) || !contains(plug.GetAccessModes(), api.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany)
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) || !contains(plug.GetAccessModes(), v1.ReadOnlyMany) || !contains(plug.GetAccessModes(), v1.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
}
}
func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
@ -99,8 +99,8 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
ep := &api.Endpoints{ObjectMeta: api.ObjectMeta{Name: "foo"}, Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}}}}}
ep := &v1.Endpoints{ObjectMeta: v1.ObjectMeta{Name: "foo"}, Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}}}}
var fcmd exec.FakeCmd
fcmd = exec.FakeCmd{
CombinedOutputScript: []exec.FakeCombinedOutputAction{
@ -115,7 +115,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
},
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, &mount.FakeMounter{}, &fake)
volumePath := mounter.GetPath()
if err != nil {
@ -157,21 +157,21 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
}
func TestPluginVolume(t *testing.T) {
vol := &api.Volume{
vol := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}},
VolumeSource: v1.VolumeSource{Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolume(t *testing.T) {
vol := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
vol := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "vol1",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
},
}
@ -186,41 +186,41 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
ClaimRef: &api.ObjectReference{
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
ep := &api.Endpoints{
ObjectMeta: api.ObjectMeta{
ep := &v1.Endpoints{
ObjectMeta: v1.ObjectMeta{
Namespace: "nsA",
Name: "ep",
},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "foo", Port: 80, Protocol: api.ProtocolTCP}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "foo", Port: 80, Protocol: v1.ProtocolTCP}},
}},
}
@ -232,7 +232,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
@ -241,7 +241,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
}
func TestParseClassParameters(t *testing.T) {
secret := api.Secret{
secret := v1.Secret{
Type: "kubernetes.io/glusterfs",
Data: map[string][]byte{
"data": []byte("mypassword"),
@ -251,7 +251,7 @@ func TestParseClassParameters(t *testing.T) {
tests := []struct {
name string
parameters map[string]string
secret *api.Secret
secret *v1.Secret
expectError bool
expectConfig *provisioningConfig
}{

View File

@ -21,7 +21,7 @@ import (
"os"
"regexp"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/uuid"
@ -83,13 +83,13 @@ func (plugin *hostPathPlugin) RequiresRemount() bool {
return false
}
func (plugin *hostPathPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
func (plugin *hostPathPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
func (plugin *hostPathPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *hostPathPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
hostPathVolumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
@ -122,10 +122,10 @@ func (plugin *hostPathPlugin) NewProvisioner(options volume.VolumeOptions) (volu
}
func (plugin *hostPathPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
hostPathVolume := &api.Volume{
hostPathVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: volumeName,
},
},
@ -249,11 +249,11 @@ func (r *hostPathRecycler) Recycle() error {
if err != nil {
return err
}
pod := templateClone.(*api.Pod)
pod := templateClone.(*v1.Pod)
// overrides
pod.Spec.ActiveDeadlineSeconds = &r.timeout
pod.Spec.Volumes[0].VolumeSource = api.VolumeSource{
HostPath: &api.HostPathVolumeSource{
pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: r.path,
},
}
@ -270,25 +270,25 @@ type hostPathProvisioner struct {
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
// This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) {
func (r *hostPathProvisioner) Provision() (*v1.PersistentVolume, error) {
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())
capacity := r.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
capacity := r.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: r.options.PVName,
Annotations: map[string]string{
"kubernetes.io/createdby": "hostpath-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
AccessModes: r.options.PVC.Spec.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): capacity,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): capacity,
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
PersistentVolumeSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: fullpath,
},
},
@ -326,7 +326,7 @@ func (r *hostPathDeleter) Delete() error {
}
func getVolumeSource(
spec *volume.Spec) (*api.HostPathVolumeSource, bool, error) {
spec *volume.Spec) (*v1.HostPathVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.HostPath != nil {
return spec.Volume.HostPath, spec.ReadOnly, nil
} else if spec.PersistentVolume != nil &&

View File

@ -23,9 +23,9 @@ import (
"os"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/uuid"
@ -44,13 +44,13 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/host-path" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{}}}}) {
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{}}}}}) {
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
@ -63,8 +63,8 @@ func TestGetAccessModes(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if len(plug.GetAccessModes()) != 1 || plug.GetAccessModes()[0] != api.ReadWriteOnce {
t.Errorf("Expected %s PersistentVolumeAccessMode", api.ReadWriteOnce)
if len(plug.GetAccessModes()) != 1 || plug.GetAccessModes()[0] != v1.ReadWriteOnce {
t.Errorf("Expected %s PersistentVolumeAccessMode", v1.ReadWriteOnce)
}
}
@ -73,7 +73,7 @@ func TestRecycler(t *testing.T) {
pluginHost := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volume.VolumeConfig{}}}, pluginHost)
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/foo"}}}}}
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/foo"}}}}}
plug, err := plugMgr.FindRecyclablePluginBySpec(spec)
if err != nil {
t.Errorf("Can't find the plugin by name")
@ -99,7 +99,7 @@ func TestDeleter(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: tempPath}}}}}
plug, err := plugMgr.FindDeletablePluginBySpec(spec)
if err != nil {
t.Errorf("Can't find the plugin by name")
@ -132,7 +132,7 @@ func TestDeleterTempDir(t *testing.T) {
for name, test := range tests {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: test.path}}}}}
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: test.path}}}}}
plug, _ := plugMgr.FindDeletablePluginBySpec(spec)
deleter, _ := plug.NewDeleter(spec)
err := deleter.Delete()
@ -153,14 +153,14 @@ func TestProvisioner(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{ProvisioningEnabled: true}),
volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: tempPath}}}}}
plug, err := plugMgr.FindCreatablePluginBySpec(spec)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("1Gi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
PVC: volumetest.CreateTestPVC("1Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
creater, err := plug.NewProvisioner(options)
if err != nil {
@ -174,15 +174,15 @@ func TestProvisioner(t *testing.T) {
t.Errorf("Expected pv.Spec.HostPath.Path to not be empty: %#v", pv)
}
expectedCapacity := resource.NewQuantity(1*1024*1024*1024, resource.BinarySI)
actualCapacity := pv.Spec.Capacity[api.ResourceStorage]
actualCapacity := pv.Spec.Capacity[v1.ResourceStorage]
expectedAmt := expectedCapacity.Value()
actualAmt := actualCapacity.Value()
if expectedAmt != actualAmt {
t.Errorf("Expected capacity %+v but got %+v", expectedAmt, actualAmt)
}
if pv.Spec.PersistentVolumeReclaimPolicy != api.PersistentVolumeReclaimDelete {
t.Errorf("Expected reclaim policy %+v but got %+v", api.PersistentVolumeReclaimDelete, pv.Spec.PersistentVolumeReclaimPolicy)
if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
t.Errorf("Expected reclaim policy %+v but got %+v", v1.PersistentVolumeReclaimDelete, pv.Spec.PersistentVolumeReclaimPolicy)
}
os.RemoveAll(pv.Spec.HostPath.Path)
@ -196,11 +196,11 @@ func TestPlugin(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/vol1"}},
VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/vol1"}},
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -232,30 +232,30 @@ func TestPlugin(t *testing.T) {
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{Path: "foo"},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "foo"},
},
ClaimRef: &api.ObjectReference{
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
@ -267,7 +267,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {

View File

@ -22,7 +22,7 @@ import (
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
@ -82,14 +82,14 @@ func (plugin *iscsiPlugin) RequiresRemount() bool {
return false
}
func (plugin *iscsiPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
func (plugin *iscsiPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
}
}
func (plugin *iscsiPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *iscsiPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &ISCSIUtil{}, plugin.host.GetMounter())
}
@ -147,10 +147,10 @@ func (plugin *iscsiPlugin) execCommand(command string, args []string) ([]byte, e
}
func (plugin *iscsiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
iscsiVolume := &api.Volume{
iscsiVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
ISCSI: &api.ISCSIVolumeSource{
VolumeSource: v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: volumeName,
IQN: volumeName,
},
@ -240,7 +240,7 @@ func portalMounter(portal string) string {
return portal
}
func getVolumeSource(spec *volume.Spec) (*api.ISCSIVolumeSource, bool, error) {
func getVolumeSource(spec *volume.Spec) (*v1.ISCSIVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.ISCSI != nil {
return spec.Volume.ISCSI, spec.Volume.ISCSI.ReadOnly, nil
} else if spec.PersistentVolume != nil &&

View File

@ -21,8 +21,8 @@ import (
"os"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -47,7 +47,7 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/iscsi" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
@ -66,12 +66,12 @@ func TestGetAccessModes(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", api.ReadWriteOnce, api.ReadOnlyMany)
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) || !contains(plug.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
}
}
func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
@ -199,10 +199,10 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
}
func TestPluginVolume(t *testing.T) {
vol := &api.Volume{
vol := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
ISCSI: &api.ISCSIVolumeSource{
VolumeSource: v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: "127.0.0.1:3260",
IQN: "iqn.2014-12.server:storage.target01",
FSType: "ext4",
@ -214,13 +214,13 @@ func TestPluginVolume(t *testing.T) {
}
func TestPluginPersistentVolume(t *testing.T) {
vol := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
vol := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "vol1",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
ISCSI: &api.ISCSIVolumeSource{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: "127.0.0.1:3260",
IQN: "iqn.2014-12.server:storage.target01",
FSType: "ext4",
@ -239,35 +239,35 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
ISCSI: &api.ISCSIVolumeSource{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: "127.0.0.1:3260",
IQN: "iqn.2014-12.server:storage.target01",
FSType: "ext4",
Lun: 0,
},
},
ClaimRef: &api.ObjectReference{
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
@ -279,7 +279,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {

View File

@ -22,7 +22,7 @@ import (
"runtime"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
@ -87,19 +87,19 @@ func (plugin *nfsPlugin) RequiresRemount() bool {
return false
}
func (plugin *nfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
api.ReadWriteMany,
func (plugin *nfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
}
}
func (plugin *nfsPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *nfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter())
}
func (plugin *nfsPlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface) (volume.Mounter, error) {
func (plugin *nfsPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, mounter mount.Interface) (volume.Mounter, error) {
source, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
@ -126,7 +126,7 @@ func (plugin *nfsPlugin) newUnmounterInternal(volName string, podUID types.UID,
return &nfsUnmounter{&nfs{
volName: volName,
mounter: mounter,
pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}},
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: podUID}},
plugin: plugin,
}}, nil
}
@ -136,10 +136,10 @@ func (plugin *nfsPlugin) NewRecycler(pvName string, spec *volume.Spec, eventReco
}
func (plugin *nfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
nfsVolume := &api.Volume{
nfsVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
NFS: &api.NFSVolumeSource{
VolumeSource: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Path: volumeName,
},
},
@ -150,7 +150,7 @@ func (plugin *nfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*vol
// NFS volumes represent a bare host file or directory mount of an NFS export.
type nfs struct {
volName string
pod *api.Pod
pod *v1.Pod
mounter mount.Interface
plugin *nfsPlugin
volume.MetricsNil
@ -337,12 +337,12 @@ func (r *nfsRecycler) Recycle() error {
if err != nil {
return err
}
pod := templateClone.(*api.Pod)
pod := templateClone.(*v1.Pod)
// overrides
pod.Spec.ActiveDeadlineSeconds = &r.timeout
pod.GenerateName = "pv-recycler-nfs-"
pod.Spec.Volumes[0].VolumeSource = api.VolumeSource{
NFS: &api.NFSVolumeSource{
pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: r.server,
Path: r.path,
},
@ -350,7 +350,7 @@ func (r *nfsRecycler) Recycle() error {
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient(), r.eventRecorder)
}
func getVolumeSource(spec *volume.Spec) (*api.NFSVolumeSource, bool, error) {
func getVolumeSource(spec *volume.Spec) (*v1.NFSVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.NFS != nil {
return spec.Volume.NFS, spec.Volume.NFS.ReadOnly, nil
} else if spec.PersistentVolume != nil &&

View File

@ -21,8 +21,8 @@ import (
"os"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -47,13 +47,13 @@ func TestCanSupport(t *testing.T) {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{NFS: &api.NFSVolumeSource{}}}}) {
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{NFS: &v1.NFSVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{NFS: &api.NFSVolumeSource{}}}}}) {
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{NFS: &v1.NFSVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
@ -72,8 +72,8 @@ func TestGetAccessModes(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) || !contains(plug.GetAccessModes(), api.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany)
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) || !contains(plug.GetAccessModes(), v1.ReadOnlyMany) || !contains(plug.GetAccessModes(), v1.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
}
}
@ -87,7 +87,7 @@ func TestRecycler(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins([]volume.VolumePlugin{&nfsPlugin{nil, volume.VolumeConfig{}}}, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{NFS: &api.NFSVolumeSource{Path: "/foo"}}}}}
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{NFS: &v1.NFSVolumeSource{Path: "/foo"}}}}}
plug, err := plugMgr.FindRecyclablePluginBySpec(spec)
if err != nil {
t.Errorf("Can't find the plugin by name")
@ -116,7 +116,7 @@ func (r *mockRecycler) Recycle() error {
return nil
}
func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
@ -139,7 +139,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
t.Errorf("Can't find the plugin by name")
}
fake := &mount.FakeMounter{}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*nfsPlugin).newMounterInternal(spec, pod, fake)
volumePath := mounter.GetPath()
if err != nil {
@ -202,21 +202,21 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
}
func TestPluginVolume(t *testing.T) {
vol := &api.Volume{
vol := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{NFS: &api.NFSVolumeSource{Server: "localhost", Path: "/somepath", ReadOnly: false}},
VolumeSource: v1.VolumeSource{NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/somepath", ReadOnly: false}},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolume(t *testing.T) {
vol := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
vol := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "vol1",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
NFS: &api.NFSVolumeSource{Server: "localhost", Path: "/somepath", ReadOnly: false},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/somepath", ReadOnly: false},
},
},
}
@ -231,30 +231,30 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
NFS: &api.NFSVolumeSource{},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{},
},
ClaimRef: &api.ObjectReference{
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
@ -266,7 +266,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {

View File

@ -20,7 +20,7 @@ import (
"errors"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider/providers/photon"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -187,9 +187,9 @@ func newDetacher(testcase *testcase) *photonPersistentDiskDetacher {
func createVolSpec(name string, readOnly bool) *volume.Spec {
return &volume.Spec{
Volume: &api.Volume{
VolumeSource: api.VolumeSource{
PhotonPersistentDisk: &api.PhotonPersistentDiskVolumeSource{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{
PdID: name,
},
},
@ -199,10 +199,10 @@ func createVolSpec(name string, readOnly bool) *volume.Spec {
func createPVSpec(name string, readOnly bool) *volume.Spec {
return &volume.Spec{
PersistentVolume: &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
PhotonPersistentDisk: &api.PhotonPersistentDiskVolumeSource{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{
PdID: name,
},
},

View File

@ -22,8 +22,8 @@ import (
"path"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
@ -77,7 +77,7 @@ func (plugin *photonPersistentDiskPlugin) RequiresRemount() bool {
return false
}
func (plugin *photonPersistentDiskPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *photonPersistentDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, &PhotonDiskUtil{}, plugin.host.GetMounter())
}
@ -120,10 +120,10 @@ func (plugin *photonPersistentDiskPlugin) newUnmounterInternal(volName string, p
}
func (plugin *photonPersistentDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
photonPersistentDisk := &api.Volume{
photonPersistentDisk := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
PhotonPersistentDisk: &api.PhotonPersistentDiskVolumeSource{
VolumeSource: v1.VolumeSource{
PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{
PdID: volumeName,
},
},
@ -292,9 +292,9 @@ func (ppd *photonPersistentDisk) GetPath() string {
}
// TODO: supporting more access mode for PhotonController persistent disk
func (plugin *photonPersistentDiskPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
func (plugin *photonPersistentDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
@ -346,28 +346,28 @@ func (plugin *photonPersistentDiskPlugin) newProvisionerInternal(options volume.
}, nil
}
func (p *photonPersistentDiskProvisioner) Provision() (*api.PersistentVolume, error) {
func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
pdID, sizeGB, err := p.manager.CreateVolume(p)
if err != nil {
return nil, err
}
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: p.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "photon-volume-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
AccessModes: p.options.PVC.Spec.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
PhotonPersistentDisk: &api.PhotonPersistentDiskVolumeSource{
PersistentVolumeSource: v1.PersistentVolumeSource{
PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{
PdID: pdID,
FSType: "ext4",
},
@ -382,7 +382,7 @@ func (p *photonPersistentDiskProvisioner) Provision() (*api.PersistentVolume, er
}
func getVolumeSource(
spec *volume.Spec) (*api.PhotonPersistentDiskVolumeSource, bool, error) {
spec *volume.Spec) (*v1.PhotonPersistentDiskVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.PhotonPersistentDisk != nil {
return spec.Volume.PhotonPersistentDisk, spec.ReadOnly, nil
} else if spec.PersistentVolume != nil &&

View File

@ -22,7 +22,7 @@ import (
"path"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -46,10 +46,10 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/photon-pd" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{PhotonPersistentDisk: &api.PhotonPersistentDiskVolumeSource{}}}}) {
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{PhotonPersistentDisk: &api.PhotonPersistentDiskVolumeSource{}}}}}) {
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
@ -68,15 +68,15 @@ func TestGetAccessModes(t *testing.T) {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), api.ReadWriteOnce) {
t.Errorf("Expected to support AccessModeTypes: %s", api.ReadWriteOnce)
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) {
t.Errorf("Expected to support AccessModeTypes: %s", v1.ReadWriteOnce)
}
if contains(plug.GetAccessModes(), api.ReadOnlyMany) {
t.Errorf("Expected not to support AccessModeTypes: %s", api.ReadOnlyMany)
if contains(plug.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected not to support AccessModeTypes: %s", v1.ReadOnlyMany)
}
}
func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
@ -112,10 +112,10 @@ func TestPlugin(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
PhotonPersistentDisk: &api.PhotonPersistentDiskVolumeSource{
VolumeSource: v1.VolumeSource{
PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{
PdID: "pdid",
FSType: "ext4",
},
@ -175,8 +175,8 @@ func TestPlugin(t *testing.T) {
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("10Gi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
PVC: volumetest.CreateTestPVC("10Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*photonPersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{})
persistentSpec, err := provisioner.Provision()
@ -187,7 +187,7 @@ func TestPlugin(t *testing.T) {
if persistentSpec.Spec.PersistentVolumeSource.PhotonPersistentDisk.PdID != "test-photon-pd-id" {
t.Errorf("Provision() returned unexpected persistent disk ID: %s", persistentSpec.Spec.PersistentVolumeSource.PhotonPersistentDisk.PdID)
}
cap := persistentSpec.Spec.Capacity[api.ResourceStorage]
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 10*1024*1024*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)
@ -217,10 +217,10 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
PhotonPersistentDisk: &api.PhotonPersistentDiskVolumeSource{
VolumeSource: v1.VolumeSource{
PhotonPersistentDisk: &v1.PhotonPersistentDiskVolumeSource{
PdID: "pdid",
FSType: "ext4",
},

View File

@ -24,7 +24,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/photon"
"k8s.io/kubernetes/pkg/volume"
@ -87,7 +87,7 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd
return "", 0, err
}
capacity := p.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// PhotonController works with GB, convert to GB with rounding up
volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))

View File

@ -23,8 +23,8 @@ import (
"sync"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
@ -40,7 +40,7 @@ type VolumeOptions struct {
// many kinds of provisioners.
// Reclamation policy for a persistent volume
PersistentVolumeReclaimPolicy api.PersistentVolumeReclaimPolicy
PersistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy
// PV.Name of the appropriate PersistentVolume. Used to generate cloud
// volume name.
PVName string
@ -48,7 +48,7 @@ type VolumeOptions struct {
// Provisioners *must* create a PV that would be matched by this PVC,
// i.e. with required capacity, accessMode, labels matching PVC.Selector and
// so on.
PVC *api.PersistentVolumeClaim
PVC *v1.PersistentVolumeClaim
// Unique name of Kubernetes cluster.
ClusterName string
// Tags to attach to the real volume in the cloud provider - e.g. AWS EBS
@ -90,12 +90,12 @@ type VolumePlugin interface {
// NewMounter creates a new volume.Mounter from an API specification.
// Ownership of the spec pointer in *not* transferred.
// - spec: The api.Volume spec
// - spec: The v1.Volume spec
// - pod: The enclosing pod
NewMounter(spec *Spec, podRef *api.Pod, opts VolumeOptions) (Mounter, error)
NewMounter(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (Mounter, error)
// NewUnmounter creates a new volume.Unmounter from recoverable state.
// - name: The volume name, as per the api.Volume spec.
// - name: The volume name, as per the v1.Volume spec.
// - podUID: The UID of the enclosing pod
NewUnmounter(name string, podUID types.UID) (Unmounter, error)
@ -111,7 +111,7 @@ type VolumePlugin interface {
type PersistentVolumePlugin interface {
VolumePlugin
// GetAccessModes describes the ways a given volume can be accessed/mounted.
GetAccessModes() []api.PersistentVolumeAccessMode
GetAccessModes() []v1.PersistentVolumeAccessMode
}
// RecyclableVolumePlugin is an extended interface of VolumePlugin and is used
@ -190,7 +190,7 @@ type VolumeHost interface {
// the provided spec. This is used to implement volume plugins which
// "wrap" other plugins. For example, the "secret" volume is
// implemented in terms of the "emptyDir" volume.
NewWrapperMounter(volName string, spec Spec, pod *api.Pod, opts VolumeOptions) (Mounter, error)
NewWrapperMounter(volName string, spec Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error)
// NewWrapperUnmounter finds an appropriate plugin with which to handle
// the provided spec. See comments on NewWrapperMounter for more
@ -213,7 +213,7 @@ type VolumeHost interface {
GetHostIP() (net.IP, error)
// Returns node allocatable
GetNodeAllocatable() (api.ResourceList, error)
GetNodeAllocatable() (v1.ResourceList, error)
}
// VolumePluginMgr tracks registered plugins.
@ -224,8 +224,8 @@ type VolumePluginMgr struct {
// Spec is an internal representation of a volume. All API volume types translate to Spec.
type Spec struct {
Volume *api.Volume
PersistentVolume *api.PersistentVolume
Volume *v1.Volume
PersistentVolume *v1.PersistentVolume
ReadOnly bool
}
@ -269,7 +269,7 @@ type VolumeConfig struct {
// which override specific properties of the pod in accordance with that
// plugin. See NewPersistentVolumeRecyclerPodTemplate for the properties
// that are expected to be overridden.
RecyclerPodTemplate *api.Pod
RecyclerPodTemplate *v1.Pod
// RecyclerMinimumTimeout is the minimum amount of time in seconds for the
// recycler pod's ActiveDeadlineSeconds attribute. Added to the minimum
@ -296,15 +296,15 @@ type VolumeConfig struct {
ProvisioningEnabled bool
}
// NewSpecFromVolume creates an Spec from an api.Volume
func NewSpecFromVolume(vs *api.Volume) *Spec {
// NewSpecFromVolume creates an Spec from an v1.Volume
func NewSpecFromVolume(vs *v1.Volume) *Spec {
return &Spec{
Volume: vs,
}
}
// NewSpecFromPersistentVolume creates an Spec from an api.PersistentVolume
func NewSpecFromPersistentVolume(pv *api.PersistentVolume, readOnly bool) *Spec {
// NewSpecFromPersistentVolume creates an Spec from an v1.PersistentVolume
func NewSpecFromPersistentVolume(pv *v1.PersistentVolume, readOnly bool) *Spec {
return &Spec{
PersistentVolume: pv,
ReadOnly: readOnly,
@ -526,32 +526,32 @@ func (pm *VolumePluginMgr) FindAttachablePluginByName(name string) (AttachableVo
// before failing. Recommended. Default is 60 seconds.
//
// See HostPath and NFS for working recycler examples
func NewPersistentVolumeRecyclerPodTemplate() *api.Pod {
func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod {
timeout := int64(60)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
GenerateName: "pv-recycler-",
Namespace: api.NamespaceDefault,
Namespace: v1.NamespaceDefault,
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
ActiveDeadlineSeconds: &timeout,
RestartPolicy: api.RestartPolicyNever,
Volumes: []api.Volume{
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "vol",
// IMPORTANT! All plugins using this template MUST
// override pod.Spec.Volumes[0].VolumeSource Recycler
// implementations without a valid VolumeSource will fail.
VolumeSource: api.VolumeSource{},
VolumeSource: v1.VolumeSource{},
},
},
Containers: []api.Container{
Containers: []v1.Container{
{
Name: "pv-recycler",
Image: "gcr.io/google_containers/busybox",
Command: []string{"/bin/sh"},
Args: []string{"-c", "test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z \"$(ls -A /scrub)\" || exit 1"},
VolumeMounts: []api.VolumeMount{
VolumeMounts: []v1.VolumeMount{
{
Name: "vol",
MountPath: "/scrub",

View File

@ -19,13 +19,13 @@ package volume
import (
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
func TestSpecSourceConverters(t *testing.T) {
v := &api.Volume{
v := &v1.Volume{
Name: "foo",
VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}},
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
}
converted := NewSpecFromVolume(v)
@ -36,10 +36,10 @@ func TestSpecSourceConverters(t *testing.T) {
t.Errorf("Expected %v but got %v", v.Name, converted.Name())
}
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{Name: "bar"},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}},
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{Name: "bar"},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{}},
},
}

View File

@ -24,8 +24,8 @@ import (
"github.com/golang/glog"
"github.com/pborman/uuid"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
@ -117,15 +117,15 @@ func (plugin *quobytePlugin) RequiresRemount() bool {
return false
}
func (plugin *quobytePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
api.ReadWriteMany,
func (plugin *quobytePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
}
}
func getVolumeSource(spec *volume.Spec) (*api.QuobyteVolumeSource, bool, error) {
func getVolumeSource(spec *volume.Spec) (*v1.QuobyteVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.Quobyte != nil {
return spec.Volume.Quobyte, spec.Volume.Quobyte.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
@ -137,10 +137,10 @@ func getVolumeSource(spec *volume.Spec) (*api.QuobyteVolumeSource, bool, error)
}
func (plugin *quobytePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
quobyteVolume := &api.Volume{
quobyteVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
Quobyte: &api.QuobyteVolumeSource{
VolumeSource: v1.VolumeSource{
Quobyte: &v1.QuobyteVolumeSource{
Volume: volumeName,
},
},
@ -148,11 +148,11 @@ func (plugin *quobytePlugin) ConstructVolumeSpec(volumeName, mountPath string) (
return volume.NewSpecFromVolume(quobyteVolume), nil
}
func (plugin *quobytePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *quobytePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter())
}
func (plugin *quobytePlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface) (volume.Mounter, error) {
func (plugin *quobytePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, mounter mount.Interface) (volume.Mounter, error) {
source, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
@ -182,7 +182,7 @@ func (plugin *quobytePlugin) newUnmounterInternal(volName string, podUID types.U
&quobyte{
volName: volName,
mounter: mounter,
pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}},
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: podUID}},
plugin: plugin,
},
}, nil
@ -191,7 +191,7 @@ func (plugin *quobytePlugin) newUnmounterInternal(volName string, podUID types.U
// Quobyte volumes represent a bare host directory mount of an quobyte export.
type quobyte struct {
volName string
pod *api.Pod
pod *v1.Pod
user string
group string
volume string
@ -293,7 +293,7 @@ func (unmounter *quobyteUnmounter) TearDownAt(dir string) error {
type quobyteVolumeDeleter struct {
*quobyteMounter
pv *api.PersistentVolume
pv *v1.PersistentVolume
}
func (plugin *quobytePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
@ -346,7 +346,7 @@ type quobyteVolumeProvisioner struct {
options volume.VolumeOptions
}
func (provisioner *quobyteVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
if provisioner.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("claim Selector is not supported")
}
@ -393,15 +393,15 @@ func (provisioner *quobyteVolumeProvisioner) Provision() (*api.PersistentVolume,
if err != nil {
return nil, err
}
pv := new(api.PersistentVolume)
pv := new(v1.PersistentVolume)
pv.Spec.PersistentVolumeSource.Quobyte = vol
pv.Spec.PersistentVolumeReclaimPolicy = provisioner.options.PersistentVolumeReclaimPolicy
pv.Spec.AccessModes = provisioner.options.PVC.Spec.AccessModes
if len(pv.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = provisioner.plugin.GetAccessModes()
}
pv.Spec.Capacity = api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
pv.Spec.Capacity = v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
}
return pv, nil
}

View File

@ -21,8 +21,8 @@ import (
"os"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -46,10 +46,10 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/quobyte" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{}}}}) {
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
@ -68,12 +68,12 @@ func TestGetAccessModes(t *testing.T) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) || !contains(plug.GetAccessModes(), api.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany)
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) || !contains(plug.GetAccessModes(), v1.ReadOnlyMany) || !contains(plug.GetAccessModes(), v1.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
}
}
func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
@ -96,7 +96,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*quobytePlugin).newMounterInternal(spec, pod, &mount.FakeMounter{})
volumePath := mounter.GetPath()
if err != nil {
@ -126,23 +126,23 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
}
func TestPluginVolume(t *testing.T) {
vol := &api.Volume{
vol := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
Quobyte: &api.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "root", Group: "root"},
VolumeSource: v1.VolumeSource{
Quobyte: &v1.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "root", Group: "root"},
},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolume(t *testing.T) {
vol := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
vol := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "vol1",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
Quobyte: &api.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "root", Group: "root"},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Quobyte: &v1.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "root", Group: "root"},
},
},
}
@ -151,30 +151,30 @@ func TestPluginPersistentVolume(t *testing.T) {
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
Quobyte: &api.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "root", Group: "root"},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Quobyte: &v1.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "root", Group: "root"},
},
ClaimRef: &api.ObjectReference{
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
@ -191,7 +191,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {

View File

@ -21,7 +21,7 @@ import (
"path"
"strings"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/volume"
"github.com/golang/glog"
@ -32,8 +32,8 @@ type quobyteVolumeManager struct {
config *quobyteAPIConfig
}
func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner) (quobyte *api.QuobyteVolumeSource, size int, err error) {
capacity := provisioner.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner) (quobyte *v1.QuobyteVolumeSource, size int, err error) {
capacity := provisioner.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volumeSize := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024))
// Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited)
// to simulate a size constraint we could set here a Quota
@ -46,11 +46,11 @@ func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProv
}
if _, err := manager.createQuobyteClient().CreateVolume(volumeRequest); err != nil {
return &api.QuobyteVolumeSource{}, volumeSize, err
return &v1.QuobyteVolumeSource{}, volumeSize, err
}
glog.V(4).Infof("Created Quobyte volume %s", provisioner.volume)
return &api.QuobyteVolumeSource{
return &v1.QuobyteVolumeSource{
Registry: provisioner.registry,
Volume: provisioner.volume,
User: provisioner.user,

View File

@ -26,7 +26,7 @@ import (
"os"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
@ -39,7 +39,7 @@ type diskManager interface {
// Detaches the disk from the kubelet's host machine.
DetachDisk(disk rbdUnmounter, mntPath string) error
// Creates a rbd image
CreateImage(provisioner *rbdVolumeProvisioner) (r *api.RBDVolumeSource, volumeSizeGB int, err error)
CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDVolumeSource, volumeSizeGB int, err error)
// Deletes a rbd image
DeleteImage(deleter *rbdVolumeDeleter) error
}

View File

@ -21,9 +21,9 @@ import (
dstrings "strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
@ -86,14 +86,14 @@ func (plugin *rbdPlugin) RequiresRemount() bool {
return false
}
func (plugin *rbdPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
func (plugin *rbdPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
}
}
func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
var secret string
var err error
source, _ := plugin.getRBDVolumeSource(spec)
@ -109,7 +109,7 @@ func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.Vo
return plugin.newMounterInternal(spec, pod.UID, &RBDUtil{}, plugin.host.GetMounter(), secret)
}
func (plugin *rbdPlugin) getRBDVolumeSource(spec *volume.Spec) (*api.RBDVolumeSource, bool) {
func (plugin *rbdPlugin) getRBDVolumeSource(spec *volume.Spec) (*v1.RBDVolumeSource, bool) {
// rbd volumes used directly in a pod have a ReadOnly flag set by the pod author.
// rbd volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
if spec.Volume != nil && spec.Volume.RBD != nil {
@ -165,10 +165,10 @@ func (plugin *rbdPlugin) newUnmounterInternal(volName string, podUID types.UID,
}
func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
rbdVolume := &api.Volume{
rbdVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
RBD: &api.RBDVolumeSource{
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{},
},
},
@ -244,7 +244,7 @@ type rbdVolumeProvisioner struct {
options volume.VolumeOptions
}
func (r *rbdVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
if r.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("claim Selector is not supported")
}
@ -311,8 +311,8 @@ func (r *rbdVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
return nil, fmt.Errorf("rbd: create volume failed, err: %v", err)
}
glog.Infof("successfully created rbd image %q", image)
pv := new(api.PersistentVolume)
rbd.SecretRef = new(api.LocalObjectReference)
pv := new(v1.PersistentVolume)
rbd.SecretRef = new(v1.LocalObjectReference)
rbd.SecretRef.Name = secretName
rbd.RadosUser = r.Id
pv.Spec.PersistentVolumeSource.RBD = rbd
@ -321,8 +321,8 @@ func (r *rbdVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
if len(pv.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = r.plugin.GetAccessModes()
}
pv.Spec.Capacity = api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", sizeMB)),
pv.Spec.Capacity = v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", sizeMB)),
}
return pv, nil
}
@ -424,7 +424,7 @@ func (plugin *rbdPlugin) execCommand(command string, args []string) ([]byte, err
}
func getVolumeSource(
spec *volume.Spec) (*api.RBDVolumeSource, bool, error) {
spec *volume.Spec) (*v1.RBDVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.RBD != nil {
return spec.Volume.RBD, spec.Volume.RBD.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
@ -435,7 +435,7 @@ func getVolumeSource(
return nil, false, fmt.Errorf("Spec does not reference a RBD volume type")
}
func parsePodSecret(pod *api.Pod, secretName string, kubeClient clientset.Interface) (string, error) {
func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (string, error) {
secret, err := volutil.GetSecretForPod(pod, secretName, kubeClient)
if err != nil {
glog.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName)

View File

@ -21,8 +21,8 @@ import (
"os"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -47,7 +47,7 @@ func TestCanSupport(t *testing.T) {
if plug.GetPluginName() != "kubernetes.io/rbd" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
@ -87,7 +87,7 @@ func (fake *fakeDiskManager) DetachDisk(c rbdUnmounter, mntPath string) error {
return nil
}
func (fake *fakeDiskManager) CreateImage(provisioner *rbdVolumeProvisioner) (r *api.RBDVolumeSource, volumeSizeGB int, err error) {
func (fake *fakeDiskManager) CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDVolumeSource, volumeSizeGB int, err error) {
return nil, 0, fmt.Errorf("not implemented")
}
@ -155,10 +155,10 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
}
func TestPluginVolume(t *testing.T) {
vol := &api.Volume{
vol := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
RBD: &api.RBDVolumeSource{
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
@ -168,13 +168,13 @@ func TestPluginVolume(t *testing.T) {
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolume(t *testing.T) {
vol := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
vol := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "vol1",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
RBD: &api.RBDVolumeSource{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
@ -193,34 +193,34 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
RBD: &api.RBDVolumeSource{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
},
},
ClaimRef: &api.ObjectReference{
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
@ -232,7 +232,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {

View File

@ -34,7 +34,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/node"
@ -313,8 +313,8 @@ func (util *RBDUtil) DetachDisk(c rbdUnmounter, mntPath string) error {
return nil
}
func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *api.RBDVolumeSource, size int, err error) {
capacity := p.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDVolumeSource, size int, err error) {
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// convert to MB that rbd defaults on
sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024))
@ -342,7 +342,7 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *api.RBDVolumeSourc
return nil, 0, err
}
return &api.RBDVolumeSource{
return &v1.RBDVolumeSource{
CephMonitors: p.rbdMounter.Mon,
RBDImage: p.rbdMounter.Image,
RBDPool: p.rbdMounter.Pool,

View File

@ -22,7 +22,7 @@ import (
"runtime"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
ioutil "k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
@ -49,7 +49,7 @@ var _ volume.VolumePlugin = &secretPlugin{}
func wrappedVolumeSpec() volume.Spec {
return volume.Spec{
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}},
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory}}},
}
}
@ -83,7 +83,7 @@ func (plugin *secretPlugin) RequiresRemount() bool {
return true
}
func (plugin *secretPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *secretPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return &secretVolumeMounter{
secretVolume: &secretVolume{
spec.Name(),
@ -113,10 +113,10 @@ func (plugin *secretPlugin) NewUnmounter(volName string, podUID types.UID) (volu
}
func (plugin *secretPlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
secretVolume := &api.Volume{
secretVolume := &v1.Volume{
Name: volName,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: volName,
},
},
@ -144,8 +144,8 @@ func (sv *secretVolume) GetPath() string {
type secretVolumeMounter struct {
*secretVolume
source api.SecretVolumeSource
pod api.Pod
source v1.SecretVolumeSource
pod v1.Pod
opts *volume.VolumeOptions
}
@ -232,7 +232,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
return nil
}
func makePayload(mappings []api.KeyToPath, secret *api.Secret, defaultMode *int32) (map[string]volumeutil.FileProjection, error) {
func makePayload(mappings []v1.KeyToPath, secret *v1.Secret, defaultMode *int32) (map[string]volumeutil.FileProjection, error) {
if defaultMode == nil {
return nil, fmt.Errorf("No defaultMode used, not even the default value for it")
}
@ -267,7 +267,7 @@ func makePayload(mappings []api.KeyToPath, secret *api.Secret, defaultMode *int3
return payload, nil
}
func totalSecretBytes(secret *api.Secret) int {
func totalSecretBytes(secret *v1.Secret) int {
totalSize := 0
for _, bytes := range secret.Data {
totalSize += len(bytes)
@ -303,9 +303,9 @@ func (c *secretVolumeUnmounter) TearDownAt(dir string) error {
return wrapped.TearDownAt(dir)
}
func getVolumeSource(spec *volume.Spec) (*api.SecretVolumeSource, bool) {
func getVolumeSource(spec *volume.Spec) (*v1.SecretVolumeSource, bool) {
var readOnly bool
var volumeSource *api.SecretVolumeSource
var volumeSource *v1.SecretVolumeSource
if spec.Volume != nil && spec.Volume.Secret != nil {
volumeSource = spec.Volume.Secret

View File

@ -26,9 +26,9 @@ import (
"strings"
"testing"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/empty_dir"
@ -42,15 +42,15 @@ func TestMakePayload(t *testing.T) {
caseMappingMode := int32(0400)
cases := []struct {
name string
mappings []api.KeyToPath
secret *api.Secret
mappings []v1.KeyToPath
secret *v1.Secret
mode int32
payload map[string]util.FileProjection
success bool
}{
{
name: "no overrides",
secret: &api.Secret{
secret: &v1.Secret{
Data: map[string][]byte{
"foo": []byte("foo"),
"bar": []byte("bar"),
@ -65,13 +65,13 @@ func TestMakePayload(t *testing.T) {
},
{
name: "basic 1",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "path/to/foo.txt",
},
},
secret: &api.Secret{
secret: &v1.Secret{
Data: map[string][]byte{
"foo": []byte("foo"),
"bar": []byte("bar"),
@ -85,13 +85,13 @@ func TestMakePayload(t *testing.T) {
},
{
name: "subdirs",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "path/to/1/2/3/foo.txt",
},
},
secret: &api.Secret{
secret: &v1.Secret{
Data: map[string][]byte{
"foo": []byte("foo"),
"bar": []byte("bar"),
@ -105,13 +105,13 @@ func TestMakePayload(t *testing.T) {
},
{
name: "subdirs 2",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "path/to/1/2/3/foo.txt",
},
},
secret: &api.Secret{
secret: &v1.Secret{
Data: map[string][]byte{
"foo": []byte("foo"),
"bar": []byte("bar"),
@ -125,7 +125,7 @@ func TestMakePayload(t *testing.T) {
},
{
name: "subdirs 3",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "path/to/1/2/3/foo.txt",
@ -135,7 +135,7 @@ func TestMakePayload(t *testing.T) {
Path: "another/path/to/the/esteemed/bar.bin",
},
},
secret: &api.Secret{
secret: &v1.Secret{
Data: map[string][]byte{
"foo": []byte("foo"),
"bar": []byte("bar"),
@ -150,13 +150,13 @@ func TestMakePayload(t *testing.T) {
},
{
name: "non existent key",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "zab",
Path: "path/to/foo.txt",
},
},
secret: &api.Secret{
secret: &v1.Secret{
Data: map[string][]byte{
"foo": []byte("foo"),
"bar": []byte("bar"),
@ -167,7 +167,7 @@ func TestMakePayload(t *testing.T) {
},
{
name: "mapping with Mode",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "foo.txt",
@ -179,7 +179,7 @@ func TestMakePayload(t *testing.T) {
Mode: &caseMappingMode,
},
},
secret: &api.Secret{
secret: &v1.Secret{
Data: map[string][]byte{
"foo": []byte("foo"),
"bar": []byte("bar"),
@ -194,7 +194,7 @@ func TestMakePayload(t *testing.T) {
},
{
name: "mapping with defaultMode",
mappings: []api.KeyToPath{
mappings: []v1.KeyToPath{
{
Key: "foo",
Path: "foo.txt",
@ -204,7 +204,7 @@ func TestMakePayload(t *testing.T) {
Path: "bar.bin",
},
},
secret: &api.Secret{
secret: &v1.Secret{
Data: map[string][]byte{
"foo": []byte("foo"),
"bar": []byte("bar"),
@ -263,7 +263,7 @@ func TestCanSupport(t *testing.T) {
if plugin.GetPluginName() != secretPluginName {
t.Errorf("Wrong name: %s", plugin.GetPluginName())
}
if !plugin.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: ""}}}}) {
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: ""}}}}) {
t.Errorf("Expected true")
}
if plugin.CanSupport(&volume.Spec{}) {
@ -292,7 +292,7 @@ func TestPlugin(t *testing.T) {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -365,7 +365,7 @@ func TestPluginReboot(t *testing.T) {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -397,11 +397,11 @@ func TestPluginReboot(t *testing.T) {
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
func volumeSpec(volumeName, secretName string, defaultMode int32) *api.Volume {
return &api.Volume{
func volumeSpec(volumeName, secretName string, defaultMode int32) *v1.Volume {
return &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
DefaultMode: &defaultMode,
},
@ -409,9 +409,9 @@ func volumeSpec(volumeName, secretName string, defaultMode int32) *api.Volume {
}
}
func secret(namespace, name string) api.Secret {
return api.Secret{
ObjectMeta: api.ObjectMeta{
func secret(namespace, name string) v1.Secret {
return v1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: namespace,
Name: name,
},
@ -423,7 +423,7 @@ func secret(namespace, name string) api.Secret {
}
}
func doTestSecretDataInVolume(volumePath string, secret api.Secret, t *testing.T) {
func doTestSecretDataInVolume(volumePath string, secret v1.Secret, t *testing.T) {
for key, value := range secret.Data {
secretDataHostPath := path.Join(volumePath, key)
if _, err := os.Stat(secretDataHostPath); err != nil {

View File

@ -27,9 +27,9 @@ import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/io"
@ -86,7 +86,7 @@ func (f *fakeVolumeHost) GetWriter() io.Writer {
return f.writer
}
func (f *fakeVolumeHost) NewWrapperMounter(volName string, spec Spec, pod *api.Pod, opts VolumeOptions) (Mounter, error) {
func (f *fakeVolumeHost) NewWrapperMounter(volName string, spec Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil {
@ -122,8 +122,8 @@ func (f *fakeVolumeHost) GetHostIP() (net.IP, error) {
return nil, fmt.Errorf("GetHostIP() not implemented")
}
func (f *fakeVolumeHost) GetNodeAllocatable() (api.ResourceList, error) {
return api.ResourceList{}, nil
func (f *fakeVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) {
return v1.ResourceList{}, nil
}
func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin {
@ -196,7 +196,7 @@ func (plugin *FakeVolumePlugin) RequiresRemount() bool {
return false
}
func (plugin *FakeVolumePlugin) NewMounter(spec *Spec, pod *api.Pod, opts VolumeOptions) (Mounter, error) {
func (plugin *FakeVolumePlugin) NewMounter(spec *Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error) {
plugin.Lock()
defer plugin.Unlock()
volume := plugin.getFakeVolume(&plugin.Mounters)
@ -283,8 +283,8 @@ func (plugin *FakeVolumePlugin) NewProvisioner(options VolumeOptions) (Provision
return &FakeProvisioner{options, plugin.Host}, nil
}
func (plugin *FakeVolumePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{}
func (plugin *FakeVolumePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{}
}
func (plugin *FakeVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*Spec, error) {
@ -473,24 +473,24 @@ type FakeProvisioner struct {
Host VolumeHost
}
func (fc *FakeProvisioner) Provision() (*api.PersistentVolume, error) {
func (fc *FakeProvisioner) Provision() (*v1.PersistentVolume, error) {
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: fc.Options.PVName,
Annotations: map[string]string{
"kubernetes.io/createdby": "fakeplugin-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: fc.Options.PersistentVolumeReclaimPolicy,
AccessModes: fc.Options.PVC.Spec.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): fc.Options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)],
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): fc.Options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
PersistentVolumeSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: fullpath,
},
},
@ -735,17 +735,17 @@ func GetTestVolumePluginMgr(
}
// CreateTestPVC returns a provisionable PVC for tests
func CreateTestPVC(capacity string, accessModes []api.PersistentVolumeAccessMode) *api.PersistentVolumeClaim {
claim := api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
func CreateTestPVC(capacity string, accessModes []v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "dummy",
Namespace: "default",
},
Spec: api.PersistentVolumeClaimSpec{
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: accessModes,
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(capacity),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(capacity),
},
},
},

View File

@ -20,8 +20,8 @@ import (
"fmt"
"reflect"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/watch"
@ -51,13 +51,13 @@ type RecycleEventRecorder func(eventtype, message string)
// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
// will be overwritten with unique name based on PV.Name.
// client - kube client for API operations.
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error {
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error {
return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder))
}
// same as above func comments, except 'recyclerClient' is a narrower pod API
// interface to ease testing
func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.Pod, recyclerClient recyclerClient) error {
func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error {
glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name)
// Generate unique name for the recycler pod - we need to get "already
@ -83,7 +83,7 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.P
return fmt.Errorf("unexpected error creating recycler pod: %+v\n", err)
}
}
defer func(pod *api.Pod) {
defer func(pod *v1.Pod) {
glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
if err := recyclerClient.DeletePod(pod.Name, pod.Namespace); err != nil {
glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
@ -95,17 +95,17 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.P
for {
event := <-podCh
switch event.Object.(type) {
case *api.Pod:
case *v1.Pod:
// POD changed
pod := event.Object.(*api.Pod)
pod := event.Object.(*v1.Pod)
glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
switch event.Type {
case watch.Added, watch.Modified:
if pod.Status.Phase == api.PodSucceeded {
if pod.Status.Phase == v1.PodSucceeded {
// Recycle succeeded.
return nil
}
if pod.Status.Phase == api.PodFailed {
if pod.Status.Phase == v1.PodFailed {
if pod.Status.Message != "" {
return fmt.Errorf(pod.Status.Message)
} else {
@ -120,9 +120,9 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.P
return fmt.Errorf("recycler pod watcher failed")
}
case *api.Event:
case *v1.Event:
// Event received
podEvent := event.Object.(*api.Event)
podEvent := event.Object.(*v1.Event)
glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
if event.Type == watch.Added {
recyclerClient.Event(podEvent.Type, podEvent.Message)
@ -134,8 +134,8 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.P
// recyclerClient abstracts access to a Pod by providing a narrower interface.
// This makes it easier to mock a client for testing.
type recyclerClient interface {
CreatePod(pod *api.Pod) (*api.Pod, error)
GetPod(name, namespace string) (*api.Pod, error)
CreatePod(pod *v1.Pod) (*v1.Pod, error)
GetPod(name, namespace string) (*v1.Pod, error)
DeletePod(name, namespace string) error
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used
// to close the reflector backing the watch. The caller is responsible for
@ -157,11 +157,11 @@ type realRecyclerClient struct {
recorder RecycleEventRecorder
}
func (c *realRecyclerClient) CreatePod(pod *api.Pod) (*api.Pod, error) {
func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
return c.client.Core().Pods(pod.Namespace).Create(pod)
}
func (c *realRecyclerClient) GetPod(name, namespace string) (*api.Pod, error) {
func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
return c.client.Core().Pods(namespace).Get(name)
}
@ -175,8 +175,8 @@ func (c *realRecyclerClient) Event(eventtype, message string) {
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
podSelector, _ := fields.ParseSelector("metadata.name=" + name)
options := api.ListOptions{
FieldSelector: podSelector,
options := v1.ListOptions{
FieldSelector: podSelector.String(),
Watch: true,
}
@ -186,8 +186,8 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s
}
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
eventWatch, err := c.client.Core().Events(namespace).Watch(api.ListOptions{
FieldSelector: eventSelector,
eventWatch, err := c.client.Core().Events(namespace).Watch(v1.ListOptions{
FieldSelector: eventSelector.String(),
Watch: true,
})
if err != nil {
@ -229,9 +229,9 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s
// recycle operation. The calculation and return value is either the
// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is
// greater.
func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *api.PersistentVolume) int64 {
func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 {
giQty := resource.MustParse("1Gi")
pvQty := pv.Spec.Capacity[api.ResourceStorage]
pvQty := pv.Spec.Capacity[v1.ResourceStorage]
giSize := giQty.Value()
pvSize := pvQty.Value()
timeout := (pvSize / giSize) * int64(timeoutIncrement)

View File

@ -29,7 +29,7 @@ import (
"sync"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
k8sRuntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/volume/util/types"
@ -40,7 +40,7 @@ const (
emptyUniquePodName types.UniquePodName = types.UniquePodName("")
// emptyUniqueVolumeName is a UniqueVolumeName for empty string
emptyUniqueVolumeName api.UniqueVolumeName = api.UniqueVolumeName("")
emptyUniqueVolumeName v1.UniqueVolumeName = v1.UniqueVolumeName("")
)
// NestedPendingOperations defines the supported set of operations.
@ -55,7 +55,7 @@ type NestedPendingOperations interface {
// concatenation of volumeName and podName is removed from the list of
// executing operations allowing a new operation to be started with the
// volumeName without error.
Run(volumeName api.UniqueVolumeName, podName types.UniquePodName, operationFunc func() error) error
Run(volumeName v1.UniqueVolumeName, podName types.UniquePodName, operationFunc func() error) error
// Wait blocks until all operations are completed. This is typically
// necessary during tests - the test should wait until all operations finish
@ -64,7 +64,7 @@ type NestedPendingOperations interface {
// IsOperationPending returns true if an operation for the given volumeName and podName is pending,
// otherwise it returns false
IsOperationPending(volumeName api.UniqueVolumeName, podName types.UniquePodName) bool
IsOperationPending(volumeName v1.UniqueVolumeName, podName types.UniquePodName) bool
}
// NewNestedPendingOperations returns a new instance of NestedPendingOperations.
@ -85,14 +85,14 @@ type nestedPendingOperations struct {
}
type operation struct {
volumeName api.UniqueVolumeName
volumeName v1.UniqueVolumeName
podName types.UniquePodName
operationPending bool
expBackoff exponentialbackoff.ExponentialBackoff
}
func (grm *nestedPendingOperations) Run(
volumeName api.UniqueVolumeName,
volumeName v1.UniqueVolumeName,
podName types.UniquePodName,
operationFunc func() error) error {
grm.lock.Lock()
@ -141,7 +141,7 @@ func (grm *nestedPendingOperations) Run(
}
func (grm *nestedPendingOperations) IsOperationPending(
volumeName api.UniqueVolumeName,
volumeName v1.UniqueVolumeName,
podName types.UniquePodName) bool {
grm.lock.RLock()
@ -156,7 +156,7 @@ func (grm *nestedPendingOperations) IsOperationPending(
// This is an internal function and caller should acquire and release the lock
func (grm *nestedPendingOperations) isOperationExists(
volumeName api.UniqueVolumeName,
volumeName v1.UniqueVolumeName,
podName types.UniquePodName) (bool, int) {
// If volumeName is empty, operation can be executed concurrently
@ -184,7 +184,7 @@ func (grm *nestedPendingOperations) isOperationExists(
}
func (grm *nestedPendingOperations) getOperation(
volumeName api.UniqueVolumeName,
volumeName v1.UniqueVolumeName,
podName types.UniquePodName) (uint, error) {
// Assumes lock has been acquired by caller.
@ -201,7 +201,7 @@ func (grm *nestedPendingOperations) getOperation(
func (grm *nestedPendingOperations) deleteOperation(
// Assumes lock has been acquired by caller.
volumeName api.UniqueVolumeName,
volumeName v1.UniqueVolumeName,
podName types.UniquePodName) {
opIndex := -1
@ -219,7 +219,7 @@ func (grm *nestedPendingOperations) deleteOperation(
}
func (grm *nestedPendingOperations) operationComplete(
volumeName api.UniqueVolumeName, podName types.UniquePodName, err *error) {
volumeName v1.UniqueVolumeName, podName types.UniquePodName, err *error) {
// Defer operations are executed in Last-In is First-Out order. In this case
// the lock is acquired first when operationCompletes begins, and is
// released when the method finishes, after the lock is released cond is
@ -272,7 +272,7 @@ func (grm *nestedPendingOperations) Wait() {
}
func getOperationName(
volumeName api.UniqueVolumeName, podName types.UniquePodName) string {
volumeName v1.UniqueVolumeName, podName types.UniquePodName) string {
podNameStr := ""
if podName != emptyUniquePodName {
podNameStr = fmt.Sprintf(" (%q)", podName)

View File

@ -21,7 +21,7 @@ import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume/util/types"
)
@ -46,7 +46,7 @@ const (
func Test_NewGoRoutineMap_Positive_SingleOp(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation := func() error { return nil }
// Act
@ -61,8 +61,8 @@ func Test_NewGoRoutineMap_Positive_SingleOp(t *testing.T) {
func Test_NewGoRoutineMap_Positive_TwoOps(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volume1Name := api.UniqueVolumeName("volume1-name")
volume2Name := api.UniqueVolumeName("volume2-name")
volume1Name := v1.UniqueVolumeName("volume1-name")
volume2Name := v1.UniqueVolumeName("volume2-name")
operation := func() error { return nil }
// Act
@ -82,7 +82,7 @@ func Test_NewGoRoutineMap_Positive_TwoOps(t *testing.T) {
func Test_NewGoRoutineMap_Positive_TwoSubOps(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation1PodName := types.UniquePodName("operation1-podname")
operation2PodName := types.UniquePodName("operation2-podname")
operation := func() error { return nil }
@ -104,7 +104,7 @@ func Test_NewGoRoutineMap_Positive_TwoSubOps(t *testing.T) {
func Test_NewGoRoutineMap_Positive_SingleOpWithExpBackoff(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation := func() error { return nil }
// Act
@ -119,7 +119,7 @@ func Test_NewGoRoutineMap_Positive_SingleOpWithExpBackoff(t *testing.T) {
func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletes(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateCallbackFunc(operation1DoneCh)
err1 := grm.Run(volumeName, "" /* operationSubName */, operation1)
@ -151,7 +151,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletes(t *testing.T) {
func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletesWithExpBackoff(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateCallbackFunc(operation1DoneCh)
err1 := grm.Run(volumeName, "" /* operationSubName */, operation1)
@ -183,7 +183,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletesWithExpBackoff(t *
func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanics(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation1 := generatePanicFunc()
err1 := grm.Run(volumeName, "" /* operationSubName */, operation1)
if err1 != nil {
@ -213,7 +213,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanics(t *testing.T) {
func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanicsWithExpBackoff(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation1 := generatePanicFunc()
err1 := grm.Run(volumeName, "" /* operationSubName */, operation1)
if err1 != nil {
@ -243,7 +243,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanicsWithExpBackoff(t *tes
func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletes(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err1 := grm.Run(volumeName, "" /* operationSubName */, operation1)
@ -267,7 +267,7 @@ func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletes(t *testing.T) {
func Test_NewGoRoutineMap_Negative_SecondSubOpBeforeFirstCompletes2(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operationPodName := types.UniquePodName("operation-podname")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
@ -292,7 +292,7 @@ func Test_NewGoRoutineMap_Negative_SecondSubOpBeforeFirstCompletes2(t *testing.T
func Test_NewGoRoutineMap_Negative_SecondSubOpBeforeFirstCompletes(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operationPodName := types.UniquePodName("operation-podname")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
@ -317,7 +317,7 @@ func Test_NewGoRoutineMap_Negative_SecondSubOpBeforeFirstCompletes(t *testing.T)
func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletesWithExpBackoff(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err1 := grm.Run(volumeName, "" /* operationSubName */, operation1)
@ -341,7 +341,7 @@ func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletesWithExpBackoff(t
func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err1 := grm.Run(volumeName, "" /* operationSubName */, operation1)
@ -385,7 +385,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) {
func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletesWithExpBackoff(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err1 := grm.Run(volumeName, "" /* operationSubName */, operation1)
@ -468,7 +468,7 @@ func Test_NewGoRoutineMap_Positive_Wait(t *testing.T) {
// Test that Wait() really blocks until the last operation succeeds
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err := grm.Run(volumeName, "" /* operationSubName */, operation1)
@ -497,7 +497,7 @@ func Test_NewGoRoutineMap_Positive_WaitWithExpBackoff(t *testing.T) {
// Test that Wait() really blocks until the last operation succeeds
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
volumeName := api.UniqueVolumeName("volume-name")
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err := grm.Run(volumeName, "" /* operationSubName */, operation1)

View File

@ -25,9 +25,9 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
kevents "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/types"
@ -112,12 +112,12 @@ type OperationExecutor interface {
// IsOperationPending returns true if an operation for the given volumeName and podName is pending,
// otherwise it returns false
IsOperationPending(volumeName api.UniqueVolumeName, podName volumetypes.UniquePodName) bool
IsOperationPending(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) bool
}
// NewOperationExecutor returns a new instance of OperationExecutor.
func NewOperationExecutor(
kubeClient internalclientset.Interface,
kubeClient clientset.Interface,
volumePluginMgr *volume.VolumePluginMgr,
recorder record.EventRecorder,
checkNodeCapabilitiesBeforeMount bool) OperationExecutor {
@ -136,16 +136,16 @@ func NewOperationExecutor(
// state of the world cache after successful mount/unmount.
type ActualStateOfWorldMounterUpdater interface {
// Marks the specified volume as mounted to the specified pod
MarkVolumeAsMounted(podName volumetypes.UniquePodName, podUID types.UID, volumeName api.UniqueVolumeName, mounter volume.Mounter, outerVolumeSpecName string, volumeGidValue string) error
MarkVolumeAsMounted(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, outerVolumeSpecName string, volumeGidValue string) error
// Marks the specified volume as unmounted from the specified pod
MarkVolumeAsUnmounted(podName volumetypes.UniquePodName, volumeName api.UniqueVolumeName) error
MarkVolumeAsUnmounted(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error
// Marks the specified volume as having been globally mounted.
MarkDeviceAsMounted(volumeName api.UniqueVolumeName) error
MarkDeviceAsMounted(volumeName v1.UniqueVolumeName) error
// Marks the specified volume as having its global mount unmounted.
MarkDeviceAsUnmounted(volumeName api.UniqueVolumeName) error
MarkDeviceAsUnmounted(volumeName v1.UniqueVolumeName) error
}
// ActualStateOfWorldAttacherUpdater defines a set of operations updating the
@ -158,25 +158,25 @@ type ActualStateOfWorldAttacherUpdater interface {
// TODO: in the future, we should be able to remove the volumeName
// argument to this method -- since it is used only for attachable
// volumes. See issue 29695.
MarkVolumeAsAttached(volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error
MarkVolumeAsAttached(volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error
// Marks the specified volume as detached from the specified node
MarkVolumeAsDetached(volumeName api.UniqueVolumeName, nodeName types.NodeName)
MarkVolumeAsDetached(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// Marks desire to detach the specified volume (remove the volume from the node's
// volumesToReportedAsAttached list)
RemoveVolumeFromReportAsAttached(volumeName api.UniqueVolumeName, nodeName types.NodeName) error
RemoveVolumeFromReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) error
// Unmarks the desire to detach for the specified volume (add the volume back to
// the node's volumesToReportedAsAttached list)
AddVolumeToReportAsAttached(volumeName api.UniqueVolumeName, nodeName types.NodeName)
AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
}
// VolumeToAttach represents a volume that should be attached to a node.
type VolumeToAttach struct {
// VolumeName is the unique identifier for the volume that should be
// attached.
VolumeName api.UniqueVolumeName
VolumeName v1.UniqueVolumeName
// VolumeSpec is a volume spec containing the specification for the volume
// that should be attached.
@ -190,7 +190,7 @@ type VolumeToAttach struct {
// volume and are scheduled to the underlying node. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
ScheduledPods []*api.Pod
ScheduledPods []*v1.Pod
}
// VolumeToMount represents a volume that should be attached to this node and
@ -198,7 +198,7 @@ type VolumeToAttach struct {
type VolumeToMount struct {
// VolumeName is the unique identifier for the volume that should be
// mounted.
VolumeName api.UniqueVolumeName
VolumeName v1.UniqueVolumeName
// PodName is the unique identifier for the pod that the volume should be
// mounted to after it is attached.
@ -215,7 +215,7 @@ type VolumeToMount struct {
OuterVolumeSpecName string
// Pod to mount the volume to. Used to create NewMounter.
Pod *api.Pod
Pod *v1.Pod
// PluginIsAttachable indicates that the plugin for this volume implements
// the volume.Attacher interface
@ -236,7 +236,7 @@ type VolumeToMount struct {
// AttachedVolume represents a volume that is attached to a node.
type AttachedVolume struct {
// VolumeName is the unique identifier for the volume that is attached.
VolumeName api.UniqueVolumeName
VolumeName v1.UniqueVolumeName
// VolumeSpec is the volume spec containing the specification for the
// volume that is attached.
@ -260,7 +260,7 @@ type MountedVolume struct {
PodName volumetypes.UniquePodName
// VolumeName is the unique identifier of the volume mounted to the pod.
VolumeName api.UniqueVolumeName
VolumeName v1.UniqueVolumeName
// InnerVolumeSpecName is the volume.Spec.Name() of the volume. If the
// volume was referenced through a persistent volume claims, this contains
@ -361,7 +361,7 @@ type MountedVolume struct {
type operationExecutor struct {
// Used to fetch objects from the API server like Node in the
// VerifyControllerAttachedVolume operation.
kubeClient internalclientset.Interface
kubeClient clientset.Interface
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
@ -380,7 +380,7 @@ type operationExecutor struct {
checkNodeCapabilitiesBeforeMount bool
}
func (oe *operationExecutor) IsOperationPending(volumeName api.UniqueVolumeName, podName volumetypes.UniquePodName) bool {
func (oe *operationExecutor) IsOperationPending(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) bool {
return oe.pendingOperations.IsOperationPending(volumeName, podName)
}
@ -502,7 +502,7 @@ func (oe *operationExecutor) generateVolumesAreAttachedFunc(
volumesPerPlugin := make(map[string][]*volume.Spec)
// volumeSpecMap maps from a volume spec to its unique volumeName which will be used
// when calling MarkVolumeAsDetached
volumeSpecMap := make(map[*volume.Spec]api.UniqueVolumeName)
volumeSpecMap := make(map[*volume.Spec]v1.UniqueVolumeName)
// Iterate each volume spec and put them into a map index by the pluginName
for _, volumeAttached := range attachedVolumes {
volumePlugin, err :=
@ -607,7 +607,7 @@ func (oe *operationExecutor) generateAttachVolumeFunc(
volumeToAttach.NodeName,
attachErr)
for _, pod := range volumeToAttach.ScheduledPods {
oe.recorder.Eventf(pod, api.EventTypeWarning, kevents.FailedMountVolume, err.Error())
oe.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedMountVolume, err.Error())
}
return err
}
@ -620,7 +620,7 @@ func (oe *operationExecutor) generateAttachVolumeFunc(
// Update actual state of world
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
api.UniqueVolumeName(""), volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath)
v1.UniqueVolumeName(""), volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath)
if addVolumeNodeErr != nil {
// On failure, return error. Caller will log and retry.
return fmt.Errorf(
@ -857,7 +857,7 @@ func (oe *operationExecutor) generateMountVolumeFunc(
volumeToMount.PodName,
volumeToMount.Pod.UID,
err)
oe.recorder.Eventf(volumeToMount.Pod, api.EventTypeWarning, kevents.FailedMountVolume, err.Error())
oe.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, err.Error())
return err
}
@ -887,7 +887,7 @@ func (oe *operationExecutor) generateMountVolumeFunc(
if oe.checkNodeCapabilitiesBeforeMount {
if canMountErr := volumeMounter.CanMount(); canMountErr != nil {
errMsg := fmt.Sprintf("Unable to mount volume %v (spec.Name: %v) on pod %v (UID: %v). Verify that your node machine has the required components before attempting to mount this volume type. %s", volumeToMount.VolumeName, volumeToMount.VolumeSpec.Name(), volumeToMount.Pod.Name, volumeToMount.Pod.UID, canMountErr.Error())
oe.recorder.Eventf(volumeToMount.Pod, api.EventTypeWarning, kevents.FailedMountVolume, errMsg)
oe.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, errMsg)
glog.Errorf(errMsg)
return fmt.Errorf(errMsg)
}
@ -904,7 +904,7 @@ func (oe *operationExecutor) generateMountVolumeFunc(
volumeToMount.PodName,
volumeToMount.Pod.UID,
mountErr)
oe.recorder.Eventf(volumeToMount.Pod, api.EventTypeWarning, kevents.FailedMountVolume, err.Error())
oe.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, err.Error())
return err
}
@ -1190,7 +1190,7 @@ func (oe *operationExecutor) generateVerifyControllerAttachedVolumeFunc(
for _, attachedVolume := range node.Status.VolumesAttached {
if attachedVolume.Name == volumeToMount.VolumeName {
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
api.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath)
v1.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath)
glog.Infof("Controller successfully attached volume %q (spec.Name: %q) pod %q (UID: %q) devicePath: %q",
volumeToMount.VolumeName,
volumeToMount.VolumeSpec.Name(),

View File

@ -22,9 +22,9 @@ import (
"path"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/storage"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/api/v1"
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/mount"
)
@ -113,7 +113,7 @@ func PathExists(path string) (bool, error) {
}
// GetSecretForPod locates secret by name in the pod's namespace and returns secret map
func GetSecretForPod(pod *api.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) {
func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) {
secret := make(map[string]string)
if kubeClient == nil {
return secret, fmt.Errorf("Cannot get kube client")
@ -138,7 +138,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl
if err != nil {
return secret, err
}
if secrets.Type != api.SecretType(volumePluginName) {
if secrets.Type != v1.SecretType(volumePluginName) {
return secret, fmt.Errorf("Cannot get secret of type %s", volumePluginName)
}
for name, data := range secrets.Data {
@ -147,7 +147,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl
return secret, nil
}
func GetClassForVolume(kubeClient clientset.Interface, pv *api.PersistentVolume) (*storage.StorageClass, error) {
func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) {
// TODO: replace with a real attribute after beta
className, found := pv.Annotations["volume.beta.kubernetes.io/storage-class"]
if !found {

View File

@ -21,7 +21,7 @@ package volumehelper
import (
"fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/types"
)
@ -38,7 +38,7 @@ const (
)
// GetUniquePodName returns a unique identifier to reference a pod by
func GetUniquePodName(pod *api.Pod) types.UniquePodName {
func GetUniquePodName(pod *v1.Pod) types.UniquePodName {
return types.UniquePodName(pod.UID)
}
@ -48,15 +48,15 @@ func GetUniquePodName(pod *api.Pod) types.UniquePodName {
// The returned name can be used to uniquely reference the volume, for example,
// to prevent operations (attach/detach or mount/unmount) from being triggered
// on the same volume.
func GetUniqueVolumeName(pluginName, volumeName string) api.UniqueVolumeName {
return api.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName))
func GetUniqueVolumeName(pluginName, volumeName string) v1.UniqueVolumeName {
return v1.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName))
}
// GetUniqueVolumeNameForNonAttachableVolume returns the unique volume name
// for a non-attachable volume.
func GetUniqueVolumeNameForNonAttachableVolume(
podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) api.UniqueVolumeName {
return api.UniqueVolumeName(
podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) v1.UniqueVolumeName {
return v1.UniqueVolumeName(
fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, volumeSpec.Name()))
}
@ -67,7 +67,7 @@ func GetUniqueVolumeNameForNonAttachableVolume(
// If the given plugin does not support the volume spec, this returns an error.
func GetUniqueVolumeNameFromSpec(
volumePlugin volume.VolumePlugin,
volumeSpec *volume.Spec) (api.UniqueVolumeName, error) {
volumeSpec *volume.Spec) (v1.UniqueVolumeName, error) {
if volumePlugin == nil {
return "", fmt.Errorf(
"volumePlugin should not be nil. volumeSpec.Name=%q",

View File

@ -24,14 +24,15 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/watch"
)
type testcase struct {
// Input of the test
name string
existingPod *api.Pod
createPod *api.Pod
existingPod *v1.Pod
createPod *v1.Pod
// eventSequence is list of events that are simulated during recycling. It
// can be either event generated by a recycler pod or a state change of
// the pod. (see newPodEvent and newEvent below).
@ -44,7 +45,7 @@ type testcase struct {
expectedError string
}
func newPodEvent(eventtype watch.EventType, name string, phase api.PodPhase, message string) watch.Event {
func newPodEvent(eventtype watch.EventType, name string, phase v1.PodPhase, message string) watch.Event {
return watch.Event{
Type: eventtype,
Object: newPod(name, phase, message),
@ -54,9 +55,9 @@ func newPodEvent(eventtype watch.EventType, name string, phase api.PodPhase, mes
func newEvent(eventtype, message string) watch.Event {
return watch.Event{
Type: watch.Added,
Object: &api.Event{
ObjectMeta: api.ObjectMeta{
Namespace: api.NamespaceDefault,
Object: &v1.Event{
ObjectMeta: v1.ObjectMeta{
Namespace: v1.NamespaceDefault,
},
Reason: "MockEvent",
Message: message,
@ -65,13 +66,13 @@ func newEvent(eventtype, message string) watch.Event {
}
}
func newPod(name string, phase api.PodPhase, message string) *api.Pod {
return &api.Pod{
ObjectMeta: api.ObjectMeta{
Namespace: api.NamespaceDefault,
func newPod(name string, phase v1.PodPhase, message string) *v1.Pod {
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Namespace: v1.NamespaceDefault,
Name: name,
},
Status: api.PodStatus{
Status: v1.PodStatus{
Phase: phase,
Message: message,
},
@ -83,70 +84,70 @@ func TestRecyclerPod(t *testing.T) {
{
// Test recycler success with some events
name: "RecyclerSuccess",
createPod: newPod("podRecyclerSuccess", api.PodPending, ""),
createPod: newPod("podRecyclerSuccess", v1.PodPending, ""),
eventSequence: []watch.Event{
// Pod gets Running and Succeeded
newPodEvent(watch.Added, "podRecyclerSuccess", api.PodPending, ""),
newEvent(api.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"),
newEvent(api.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""),
newEvent(api.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""),
newEvent(api.EventTypeNormal, "Created container with docker id 83d929aeac82"),
newEvent(api.EventTypeNormal, "Started container with docker id 83d929aeac82"),
newPodEvent(watch.Modified, "podRecyclerSuccess", api.PodRunning, ""),
newPodEvent(watch.Modified, "podRecyclerSuccess", api.PodSucceeded, ""),
newPodEvent(watch.Added, "podRecyclerSuccess", v1.PodPending, ""),
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"),
newEvent(v1.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""),
newEvent(v1.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""),
newEvent(v1.EventTypeNormal, "Created container with docker id 83d929aeac82"),
newEvent(v1.EventTypeNormal, "Started container with docker id 83d929aeac82"),
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodRunning, ""),
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodSucceeded, ""),
},
expectedEvents: []mockEvent{
{api.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"},
{api.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""},
{api.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""},
{api.EventTypeNormal, "Created container with docker id 83d929aeac82"},
{api.EventTypeNormal, "Started container with docker id 83d929aeac82"},
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"},
{v1.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""},
{v1.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""},
{v1.EventTypeNormal, "Created container with docker id 83d929aeac82"},
{v1.EventTypeNormal, "Started container with docker id 83d929aeac82"},
},
expectedError: "",
},
{
// Test recycler failure with some events
name: "RecyclerFailure",
createPod: newPod("podRecyclerFailure", api.PodPending, ""),
createPod: newPod("podRecyclerFailure", v1.PodPending, ""),
eventSequence: []watch.Event{
// Pod gets Running and Succeeded
newPodEvent(watch.Added, "podRecyclerFailure", api.PodPending, ""),
newEvent(api.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"),
newEvent(api.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"),
newEvent(api.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"recycler-for-podRecyclerFailure\"/\"default\". list of unattached/unmounted"),
newPodEvent(watch.Modified, "podRecyclerFailure", api.PodRunning, ""),
newPodEvent(watch.Modified, "podRecyclerFailure", api.PodFailed, "Pod was active on the node longer than specified deadline"),
newPodEvent(watch.Added, "podRecyclerFailure", v1.PodPending, ""),
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"),
newEvent(v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"),
newEvent(v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"recycler-for-podRecyclerFailure\"/\"default\". list of unattached/unmounted"),
newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodRunning, ""),
newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodFailed, "Pod was active on the node longer than specified deadline"),
},
expectedEvents: []mockEvent{
{api.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"},
{api.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"},
{api.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"recycler-for-podRecyclerFailure\"/\"default\". list of unattached/unmounted"},
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"},
{v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"},
{v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"recycler-for-podRecyclerFailure\"/\"default\". list of unattached/unmounted"},
},
expectedError: "Pod was active on the node longer than specified deadline",
},
{
// Recycler pod gets deleted
name: "RecyclerDeleted",
createPod: newPod("podRecyclerDeleted", api.PodPending, ""),
createPod: newPod("podRecyclerDeleted", v1.PodPending, ""),
eventSequence: []watch.Event{
// Pod gets Running and Succeeded
newPodEvent(watch.Added, "podRecyclerDeleted", api.PodPending, ""),
newEvent(api.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"),
newPodEvent(watch.Deleted, "podRecyclerDeleted", api.PodPending, ""),
newPodEvent(watch.Added, "podRecyclerDeleted", v1.PodPending, ""),
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"),
newPodEvent(watch.Deleted, "podRecyclerDeleted", v1.PodPending, ""),
},
expectedEvents: []mockEvent{
{api.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"},
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"},
},
expectedError: "recycler pod was deleted",
},
{
// Another recycler pod is already running
name: "RecyclerRunning",
existingPod: newPod("podOldRecycler", api.PodRunning, ""),
createPod: newPod("podNewRecycler", api.PodFailed, "mock message"),
existingPod: newPod("podOldRecycler", v1.PodRunning, ""),
createPod: newPod("podNewRecycler", v1.PodFailed, "mock message"),
eventSequence: []watch.Event{
// Old pod succeeds
newPodEvent(watch.Modified, "podOldRecycler", api.PodSucceeded, ""),
newPodEvent(watch.Modified, "podOldRecycler", v1.PodSucceeded, ""),
},
// No error = old pod succeeded. If the new pod was used, there
// would be error with "mock message".
@ -155,11 +156,11 @@ func TestRecyclerPod(t *testing.T) {
{
// Another recycler pod is already running and fails
name: "FailedRecyclerRunning",
existingPod: newPod("podOldRecycler", api.PodRunning, ""),
createPod: newPod("podNewRecycler", api.PodFailed, "mock message"),
existingPod: newPod("podOldRecycler", v1.PodRunning, ""),
createPod: newPod("podNewRecycler", v1.PodFailed, "mock message"),
eventSequence: []watch.Event{
// Old pod failure
newPodEvent(watch.Modified, "podOldRecycler", api.PodFailed, "Pod was active on the node longer than specified deadline"),
newPodEvent(watch.Modified, "podOldRecycler", v1.PodFailed, "Pod was active on the node longer than specified deadline"),
},
// If the new pod was used, there would be error with "mock message".
expectedError: "Pod was active on the node longer than specified deadline",
@ -205,7 +206,7 @@ func TestRecyclerPod(t *testing.T) {
}
type mockRecyclerClient struct {
pod *api.Pod
pod *v1.Pod
deletedCalled bool
receivedEvents []mockEvent
events []watch.Event
@ -215,7 +216,7 @@ type mockEvent struct {
eventtype, message string
}
func (c *mockRecyclerClient) CreatePod(pod *api.Pod) (*api.Pod, error) {
func (c *mockRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
if c.pod == nil {
c.pod = pod
return c.pod, nil
@ -224,7 +225,7 @@ func (c *mockRecyclerClient) CreatePod(pod *api.Pod) (*api.Pod, error) {
return nil, errors.NewAlreadyExists(api.Resource("pods"), pod.Name)
}
func (c *mockRecyclerClient) GetPod(name, namespace string) (*api.Pod, error) {
func (c *mockRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
if c.pod != nil {
return c.pod, nil
} else {
@ -252,10 +253,10 @@ func (c *mockRecyclerClient) Event(eventtype, message string) {
}
func TestCalculateTimeoutForVolume(t *testing.T) {
pv := &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("500M"),
pv := &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("500M"),
},
},
}
@ -265,13 +266,13 @@ func TestCalculateTimeoutForVolume(t *testing.T) {
t.Errorf("Expected 50 for timeout but got %v", timeout)
}
pv.Spec.Capacity[api.ResourceStorage] = resource.MustParse("2Gi")
pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("2Gi")
timeout = CalculateTimeoutForVolume(50, 30, pv)
if timeout != 60 {
t.Errorf("Expected 60 for timeout but got %v", timeout)
}
pv.Spec.Capacity[api.ResourceStorage] = resource.MustParse("150Gi")
pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("150Gi")
timeout = CalculateTimeoutForVolume(50, 30, pv)
if timeout != 4500 {
t.Errorf("Expected 4500 for timeout but got %v", timeout)

View File

@ -25,8 +25,8 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
)
@ -149,7 +149,7 @@ type Provisioner interface {
// Provision creates the resource by allocating the underlying volume in a
// storage system. This method should block until completion and returns
// PersistentVolume representing the created storage resource.
Provision() (*api.PersistentVolume, error)
Provision() (*v1.PersistentVolume, error)
}
// Deleter removes the resource from the underlying storage provider. Calls

View File

@ -20,7 +20,7 @@ import (
"errors"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -187,9 +187,9 @@ func newDetacher(testcase *testcase) *vsphereVMDKDetacher {
func createVolSpec(name string) *volume.Spec {
return &volume.Spec{
Volume: &api.Volume{
VolumeSource: api.VolumeSource{
VsphereVolume: &api.VsphereVirtualDiskVolumeSource{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: name,
},
},
@ -199,10 +199,10 @@ func createVolSpec(name string) *volume.Spec {
func createPVSpec(name string) *volume.Spec {
return &volume.Spec{
PersistentVolume: &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
VsphereVolume: &api.VsphereVirtualDiskVolumeSource{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: name,
},
},

View File

@ -22,8 +22,8 @@ import (
"path"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
@ -77,7 +77,7 @@ func (plugin *vsphereVolumePlugin) RequiresRemount() bool {
return false
}
func (plugin *vsphereVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
func (plugin *vsphereVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, &VsphereDiskUtil{}, plugin.host.GetMounter())
}
@ -119,10 +119,10 @@ func (plugin *vsphereVolumePlugin) newUnmounterInternal(volName string, podUID t
}
func (plugin *vsphereVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
vsphereVolume := &api.Volume{
vsphereVolume := &v1.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
VsphereVolume: &api.VsphereVirtualDiskVolumeSource{
VolumeSource: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumeName,
},
},
@ -285,9 +285,9 @@ func (vv *vsphereVolume) GetPath() string {
}
// vSphere Persistent Volume Plugin
func (plugin *vsphereVolumePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
func (plugin *vsphereVolumePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
@ -341,28 +341,28 @@ func (plugin *vsphereVolumePlugin) newProvisionerInternal(options volume.VolumeO
}, nil
}
func (v *vsphereVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
vmDiskPath, sizeKB, err := v.manager.CreateVolume(v)
if err != nil {
return nil, err
}
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: v.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "vsphere-volume-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: v.options.PersistentVolumeReclaimPolicy,
AccessModes: v.options.PVC.Spec.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", sizeKB)),
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", sizeKB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
VsphereVolume: &api.VsphereVirtualDiskVolumeSource{
PersistentVolumeSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: vmDiskPath,
FSType: "ext4",
},
@ -377,7 +377,7 @@ func (v *vsphereVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
}
func getVolumeSource(
spec *volume.Spec) (*api.VsphereVirtualDiskVolumeSource, bool, error) {
spec *volume.Spec) (*v1.VsphereVirtualDiskVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.VsphereVolume != nil {
return spec.Volume.VsphereVolume, spec.ReadOnly, nil
} else if spec.PersistentVolume != nil &&

View File

@ -22,7 +22,7 @@ import (
"path"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
@ -47,11 +47,11 @@ func TestCanSupport(t *testing.T) {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{VsphereVolume: &api.VsphereVirtualDiskVolumeSource{}}}}) {
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{VsphereVolume: &api.VsphereVirtualDiskVolumeSource{}}}}}) {
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
@ -90,10 +90,10 @@ func TestPlugin(t *testing.T) {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
spec := &v1.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
VsphereVolume: &api.VsphereVirtualDiskVolumeSource{
VolumeSource: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: "[local] test-volume-name.vmdk",
FSType: "ext4",
},
@ -142,8 +142,8 @@ func TestPlugin(t *testing.T) {
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*vsphereVolumePlugin).newProvisionerInternal(options, &fakePDManager{})
persistentSpec, err := provisioner.Provision()
@ -155,7 +155,7 @@ func TestPlugin(t *testing.T) {
t.Errorf("Provision() returned unexpected path %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath)
}
cap := persistentSpec.Spec.Capacity[api.ResourceStorage]
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 100*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)

View File

@ -23,7 +23,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/volume"
@ -58,7 +58,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPa
return "", 0, err
}
capacity := v.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// vSphere works with kilobytes, convert to KiB with rounding up
volSizeKB := int(volume.RoundUpSize(volSizeBytes, 1024))