mirror of https://github.com/k3s-io/k3s
Fixing all the "composite literal uses unkeyed fields" Vet errors.
parent
590038dcf1
commit
32d569d6c7
|
@ -56,19 +56,19 @@ func NewCMServer() *CMServer {
|
|||
LookupCacheSizeForRC: 4096,
|
||||
LookupCacheSizeForRS: 4096,
|
||||
LookupCacheSizeForDaemonSet: 1024,
|
||||
ServiceSyncPeriod: unversioned.Duration{5 * time.Minute},
|
||||
NodeSyncPeriod: unversioned.Duration{10 * time.Second},
|
||||
ResourceQuotaSyncPeriod: unversioned.Duration{5 * time.Minute},
|
||||
NamespaceSyncPeriod: unversioned.Duration{5 * time.Minute},
|
||||
PVClaimBinderSyncPeriod: unversioned.Duration{10 * time.Minute},
|
||||
HorizontalPodAutoscalerSyncPeriod: unversioned.Duration{30 * time.Second},
|
||||
DeploymentControllerSyncPeriod: unversioned.Duration{30 * time.Second},
|
||||
MinResyncPeriod: unversioned.Duration{12 * time.Hour},
|
||||
ServiceSyncPeriod: unversioned.Duration{Duration: 5 * time.Minute},
|
||||
NodeSyncPeriod: unversioned.Duration{Duration: 10 * time.Second},
|
||||
ResourceQuotaSyncPeriod: unversioned.Duration{Duration: 5 * time.Minute},
|
||||
NamespaceSyncPeriod: unversioned.Duration{Duration: 5 * time.Minute},
|
||||
PVClaimBinderSyncPeriod: unversioned.Duration{Duration: 10 * time.Minute},
|
||||
HorizontalPodAutoscalerSyncPeriod: unversioned.Duration{Duration: 30 * time.Second},
|
||||
DeploymentControllerSyncPeriod: unversioned.Duration{Duration: 30 * time.Second},
|
||||
MinResyncPeriod: unversioned.Duration{Duration: 12 * time.Hour},
|
||||
RegisterRetryCount: 10,
|
||||
PodEvictionTimeout: unversioned.Duration{5 * time.Minute},
|
||||
NodeMonitorGracePeriod: unversioned.Duration{40 * time.Second},
|
||||
NodeStartupGracePeriod: unversioned.Duration{60 * time.Second},
|
||||
NodeMonitorPeriod: unversioned.Duration{5 * time.Second},
|
||||
PodEvictionTimeout: unversioned.Duration{Duration: 5 * time.Minute},
|
||||
NodeMonitorGracePeriod: unversioned.Duration{Duration: 40 * time.Second},
|
||||
NodeStartupGracePeriod: unversioned.Duration{Duration: 60 * time.Second},
|
||||
NodeMonitorPeriod: unversioned.Duration{Duration: 5 * time.Second},
|
||||
ClusterName: "kubernetes",
|
||||
TerminatedPodGCThreshold: 12500,
|
||||
VolumeConfiguration: componentconfig.VolumeConfiguration{
|
||||
|
@ -92,7 +92,7 @@ func NewCMServer() *CMServer {
|
|||
// AddFlags adds flags for a specific CMServer to the specified FlagSet
|
||||
func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on")
|
||||
fs.Var(componentconfig.IPVar{&s.Address}, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.Var(componentconfig.IPVar{Val: &s.Address}, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.")
|
||||
fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
|
||||
fs.IntVar(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
|
||||
|
|
|
@ -60,15 +60,15 @@ func NewProxyConfig() *ProxyServerConfig {
|
|||
|
||||
// AddFlags adds flags for a specific ProxyServer to the specified FlagSet
|
||||
func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Var(componentconfig.IPVar{&s.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.Var(componentconfig.IPVar{Val: &s.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
|
||||
fs.IntVar(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port to bind the health check server. Use 0 to disable.")
|
||||
fs.Var(componentconfig.IPVar{&s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)")
|
||||
fs.Var(componentconfig.IPVar{Val: &s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)")
|
||||
fs.IntVar(s.OOMScoreAdj, "oom-score-adj", util.IntPtrDerefOr(s.OOMScoreAdj, qos.KubeProxyOOMScoreAdj), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]")
|
||||
fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).")
|
||||
fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.")
|
||||
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
|
||||
fs.Var(componentconfig.PortRangeVar{&s.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.")
|
||||
fs.Var(componentconfig.PortRangeVar{Val: &s.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.")
|
||||
fs.StringVar(&s.HostnameOverride, "hostname-override", s.HostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.")
|
||||
fs.Var(&s.Mode, "proxy-mode", "Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the '"+ExperimentalProxyModeAnnotation+"' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.")
|
||||
fs.IntVar(s.IPTablesMasqueradeBit, "iptables-masquerade-bit", util.IntPtrDerefOr(s.IPTablesMasqueradeBit, 14), "If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].")
|
||||
|
|
|
@ -68,7 +68,7 @@ func NewKubeletServer() *KubeletServer {
|
|||
KubeletConfiguration: componentconfig.KubeletConfiguration{
|
||||
Address: "0.0.0.0",
|
||||
CAdvisorPort: 4194,
|
||||
VolumeStatsAggPeriod: unversioned.Duration{time.Minute},
|
||||
VolumeStatsAggPeriod: unversioned.Duration{Duration: time.Minute},
|
||||
CertDirectory: "/var/run/kubernetes",
|
||||
CgroupRoot: "",
|
||||
ConfigureCBR0: false,
|
||||
|
@ -80,14 +80,14 @@ func NewKubeletServer() *KubeletServer {
|
|||
EnableCustomMetrics: false,
|
||||
EnableDebuggingHandlers: true,
|
||||
EnableServer: true,
|
||||
FileCheckFrequency: unversioned.Duration{20 * time.Second},
|
||||
FileCheckFrequency: unversioned.Duration{Duration: 20 * time.Second},
|
||||
HealthzBindAddress: "127.0.0.1",
|
||||
HealthzPort: 10248,
|
||||
HostNetworkSources: kubetypes.AllSource,
|
||||
HostPIDSources: kubetypes.AllSource,
|
||||
HostIPCSources: kubetypes.AllSource,
|
||||
HTTPCheckFrequency: unversioned.Duration{20 * time.Second},
|
||||
ImageMinimumGCAge: unversioned.Duration{2 * time.Minute},
|
||||
HTTPCheckFrequency: unversioned.Duration{Duration: 20 * time.Second},
|
||||
ImageMinimumGCAge: unversioned.Duration{Duration: 2 * time.Minute},
|
||||
ImageGCHighThresholdPercent: 90,
|
||||
ImageGCLowThresholdPercent: 80,
|
||||
LowDiskSpaceThresholdMB: 256,
|
||||
|
@ -96,12 +96,12 @@ func NewKubeletServer() *KubeletServer {
|
|||
MaxPerPodContainerCount: 2,
|
||||
MaxOpenFiles: 1000000,
|
||||
MaxPods: 110,
|
||||
MinimumGCAge: unversioned.Duration{1 * time.Minute},
|
||||
MinimumGCAge: unversioned.Duration{Duration: 1 * time.Minute},
|
||||
NetworkPluginDir: "/usr/libexec/kubernetes/kubelet-plugins/net/exec/",
|
||||
NetworkPluginName: "",
|
||||
NonMasqueradeCIDR: "10.0.0.0/8",
|
||||
VolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
|
||||
NodeStatusUpdateFrequency: unversioned.Duration{10 * time.Second},
|
||||
NodeStatusUpdateFrequency: unversioned.Duration{Duration: 10 * time.Second},
|
||||
NodeLabels: make(map[string]string),
|
||||
OOMScoreAdj: qos.KubeletOOMScoreAdj,
|
||||
LockFilePath: "",
|
||||
|
@ -118,14 +118,14 @@ func NewKubeletServer() *KubeletServer {
|
|||
RootDirectory: defaultRootDir,
|
||||
RuntimeCgroups: "",
|
||||
SerializeImagePulls: true,
|
||||
StreamingConnectionIdleTimeout: unversioned.Duration{4 * time.Hour},
|
||||
SyncFrequency: unversioned.Duration{1 * time.Minute},
|
||||
StreamingConnectionIdleTimeout: unversioned.Duration{Duration: 4 * time.Hour},
|
||||
SyncFrequency: unversioned.Duration{Duration: 1 * time.Minute},
|
||||
SystemCgroups: "",
|
||||
ReconcileCIDR: true,
|
||||
KubeAPIQPS: 5.0,
|
||||
KubeAPIBurst: 10,
|
||||
ExperimentalFlannelOverlay: experimentalFlannelOverlay,
|
||||
OutOfDiskTransitionFrequency: unversioned.Duration{5 * time.Minute},
|
||||
OutOfDiskTransitionFrequency: unversioned.Duration{Duration: 5 * time.Minute},
|
||||
HairpinMode: componentconfig.PromiscuousBridge,
|
||||
BabysitDaemons: false,
|
||||
},
|
||||
|
@ -141,7 +141,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.StringVar(&s.ManifestURL, "manifest-url", s.ManifestURL, "URL for accessing the container manifest")
|
||||
fs.StringVar(&s.ManifestURLHeader, "manifest-url-header", s.ManifestURLHeader, "HTTP header to use when accessing the manifest URL, with the key separated from the value with a ':', as in 'key:value'")
|
||||
fs.BoolVar(&s.EnableServer, "enable-server", s.EnableServer, "Enable the Kubelet's server")
|
||||
fs.Var(componentconfig.IPVar{&s.Address}, "address", "The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.Var(componentconfig.IPVar{Val: &s.Address}, "address", "The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.UintVar(&s.Port, "port", s.Port, "The port for the Kubelet to serve on.")
|
||||
fs.UintVar(&s.ReadOnlyPort, "read-only-port", s.ReadOnlyPort, "The read-only port for the Kubelet to serve on with no authentication/authorization (set to 0 to disable)")
|
||||
fs.StringVar(&s.TLSCertFile, "tls-cert-file", s.TLSCertFile, ""+
|
||||
|
@ -173,7 +173,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.Var(&s.KubeConfig, "kubeconfig", "Path to a kubeconfig file, specifying how to authenticate to API server (the master location is set by the api-servers flag).")
|
||||
fs.UintVar(&s.CAdvisorPort, "cadvisor-port", s.CAdvisorPort, "The port of the localhost cAdvisor endpoint")
|
||||
fs.IntVar(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port of the localhost healthz endpoint")
|
||||
fs.Var(componentconfig.IPVar{&s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the healthz server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)")
|
||||
fs.Var(componentconfig.IPVar{Val: &s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the healthz server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)")
|
||||
fs.IntVar(&s.OOMScoreAdj, "oom-score-adj", s.OOMScoreAdj, "The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]")
|
||||
fs.StringSliceVar(&s.APIServerList, "api-servers", []string{}, "List of Kubernetes API servers for publishing events, and reading pods and services. (ip:port), comma separated.")
|
||||
fs.BoolVar(&s.RegisterNode, "register-node", s.RegisterNode, "Register the node with the apiserver (defaults to true if --api-servers is set)")
|
||||
|
|
|
@ -608,7 +608,7 @@ func RunKubelet(kcfg *KubeletConfig) error {
|
|||
eventBroadcaster.StartLogging(glog.V(3).Infof)
|
||||
if kcfg.EventClient != nil {
|
||||
glog.V(4).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kcfg.EventClient.Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kcfg.EventClient.Events("")})
|
||||
} else {
|
||||
glog.Warning("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
|
|
|
@ -33,5 +33,5 @@ func Version(version string) string {
|
|||
}
|
||||
|
||||
func GroupVersion(gv unversioned.GroupVersion) unversioned.GroupVersion {
|
||||
return unversioned.GroupVersion{Group(gv.Group), Version(gv.Version)}
|
||||
return unversioned.GroupVersion{Group: Group(gv.Group), Version: Version(gv.Version)}
|
||||
}
|
||||
|
|
|
@ -97,9 +97,9 @@ func main() {
|
|||
// We may change the output path later.
|
||||
arguments.OutputPackagePath = "k8s.io/kubernetes/cmd/libs/go2idl/client-gen/testoutput"
|
||||
arguments.CustomArgs = clientgenargs.Args{
|
||||
[]unversioned.GroupVersion{{"testgroup", ""}},
|
||||
[]unversioned.GroupVersion{{Group: "testgroup", Version: ""}},
|
||||
map[unversioned.GroupVersion]string{
|
||||
unversioned.GroupVersion{"testgroup", ""}: "k8s.io/kubernetes/cmd/libs/go2idl/client-gen/testdata/apis/testgroup",
|
||||
unversioned.GroupVersion{Group: "testgroup", Version: ""}: "k8s.io/kubernetes/cmd/libs/go2idl/client-gen/testdata/apis/testgroup",
|
||||
},
|
||||
"test_internalclientset",
|
||||
"k8s.io/kubernetes/cmd/libs/go2idl/client-gen/testoutput/clientset_generated/",
|
||||
|
|
|
@ -109,7 +109,7 @@ func TestUpdateStatusTestType(t *testing.T) {
|
|||
"name": "baz",
|
||||
},
|
||||
},
|
||||
Status: testgroup.TestTypeStatus{"I'm in good status"},
|
||||
Status: testgroup.TestTypeStatus{Blah: "I'm in good status"},
|
||||
}
|
||||
c := DecoratedSimpleClient{
|
||||
simpleClient: simple.Client{
|
||||
|
|
|
@ -51,7 +51,7 @@ func calledOnce(h bool, ret runtime.Object, err error) (<-chan struct{}, func(co
|
|||
|
||||
func TestRegister_withUnknownNode(t *testing.T) {
|
||||
fc := &core.Fake{}
|
||||
nodes := &fakeNodes{&fake.FakeNodes{&fake.FakeCore{fc}}}
|
||||
nodes := &fakeNodes{&fake.FakeNodes{&fake.FakeCore{Fake: fc}}}
|
||||
createCalled, createOnce := calledOnce(true, nil, nil)
|
||||
fc.AddReactor("create", "nodes", createOnce)
|
||||
|
||||
|
@ -84,7 +84,7 @@ func TestRegister_withUnknownNode(t *testing.T) {
|
|||
|
||||
func TestRegister_withKnownNode(t *testing.T) {
|
||||
fc := &core.Fake{}
|
||||
nodes := &fakeNodes{&fake.FakeNodes{&fake.FakeCore{fc}}}
|
||||
nodes := &fakeNodes{&fake.FakeNodes{&fake.FakeCore{Fake: fc}}}
|
||||
updateCalled, updateOnce := calledOnce(true, nil, nil)
|
||||
fc.AddReactor("update", "nodes", updateOnce)
|
||||
|
||||
|
@ -122,9 +122,9 @@ func TestRegister_withSemiKnownNode(t *testing.T) {
|
|||
// CreateOrUpdate should proceed to attempt an update.
|
||||
|
||||
fc := &core.Fake{}
|
||||
nodes := &fakeNodes{&fake.FakeNodes{&fake.FakeCore{fc}}}
|
||||
nodes := &fakeNodes{&fake.FakeNodes{&fake.FakeCore{Fake: fc}}}
|
||||
|
||||
createCalled, createOnce := calledOnce(true, nil, errors.NewAlreadyExists(unversioned.GroupResource{"", ""}, "nodes"))
|
||||
createCalled, createOnce := calledOnce(true, nil, errors.NewAlreadyExists(unversioned.GroupResource{Group: "", Resource: ""}, "nodes"))
|
||||
fc.AddReactor("create", "nodes", createOnce)
|
||||
|
||||
updateCalled, updateOnce := calledOnce(true, nil, nil)
|
||||
|
|
|
@ -39,8 +39,8 @@ func Test_nodeWithUpdatedStatus(t *testing.T) {
|
|||
Status: s,
|
||||
Reason: r,
|
||||
Message: "some message we don't care about here",
|
||||
LastTransitionTime: unversioned.Time{now.Add(-time.Minute)},
|
||||
LastHeartbeatTime: unversioned.Time{now.Add(d)},
|
||||
LastTransitionTime: unversioned.Time{Time: now.Add(-time.Minute)},
|
||||
LastHeartbeatTime: unversioned.Time{Time: now.Add(d)},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -874,7 +874,7 @@ func (s *SchedulerServer) bootstrap(hks hyperkube.Interface, sc *schedcfg.Config
|
|||
broadcaster := record.NewBroadcaster()
|
||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
||||
broadcaster.StartLogging(log.Infof)
|
||||
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{eventsClient.Events("")})
|
||||
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: eventsClient.Events("")})
|
||||
|
||||
lw := cache.NewListWatchFromClient(s.client.CoreClient, "pods", api.NamespaceAll, fields.Everything())
|
||||
|
||||
|
|
|
@ -342,13 +342,13 @@ func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolum
|
|||
// ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format.
|
||||
func ParseRFC3339(s string, nowFn func() unversioned.Time) (unversioned.Time, error) {
|
||||
if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil {
|
||||
return unversioned.Time{t}, nil
|
||||
return unversioned.Time{Time: t}, nil
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, s)
|
||||
if err != nil {
|
||||
return unversioned.Time{}, err
|
||||
}
|
||||
return unversioned.Time{t}, nil
|
||||
return unversioned.Time{Time: t}, nil
|
||||
}
|
||||
|
||||
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
|
||||
|
|
|
@ -563,7 +563,7 @@ func TestValidateVolumes(t *testing.T) {
|
|||
{Name: "glusterfs", VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "host1", Path: "path", ReadOnly: false}}},
|
||||
{Name: "flocker", VolumeSource: api.VolumeSource{Flocker: &api.FlockerVolumeSource{DatasetName: "datasetName"}}},
|
||||
{Name: "rbd", VolumeSource: api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "bar", FSType: "ext4"}}},
|
||||
{Name: "cinder", VolumeSource: api.VolumeSource{Cinder: &api.CinderVolumeSource{"29ea5088-4f60-4757-962e-dba678767887", "ext4", false}}},
|
||||
{Name: "cinder", VolumeSource: api.VolumeSource{Cinder: &api.CinderVolumeSource{VolumeID: "29ea5088-4f60-4757-962e-dba678767887", FSType: "ext4", ReadOnly: false}}},
|
||||
{Name: "cephfs", VolumeSource: api.VolumeSource{CephFS: &api.CephFSVolumeSource{Monitors: []string{"foo"}}}},
|
||||
{Name: "downwardapi", VolumeSource: api.VolumeSource{DownwardAPI: &api.DownwardAPIVolumeSource{Items: []api.DownwardAPIVolumeFile{
|
||||
{Path: "labels", FieldRef: api.ObjectFieldSelector{
|
||||
|
@ -591,9 +591,9 @@ func TestValidateVolumes(t *testing.T) {
|
|||
APIVersion: "v1",
|
||||
FieldPath: "metadata.labels"}},
|
||||
}}}},
|
||||
{Name: "fc", VolumeSource: api.VolumeSource{FC: &api.FCVolumeSource{[]string{"some_wwn"}, &lun, "ext4", false}}},
|
||||
{Name: "fc", VolumeSource: api.VolumeSource{FC: &api.FCVolumeSource{TargetWWNs: []string{"some_wwn"}, Lun: &lun, FSType: "ext4", ReadOnly: false}}},
|
||||
{Name: "flexvolume", VolumeSource: api.VolumeSource{FlexVolume: &api.FlexVolumeSource{Driver: "kubernetes.io/blue", FSType: "ext4"}}},
|
||||
{Name: "azure", VolumeSource: api.VolumeSource{AzureFile: &api.AzureFileVolumeSource{"key", "share", false}}},
|
||||
{Name: "azure", VolumeSource: api.VolumeSource{AzureFile: &api.AzureFileVolumeSource{SecretName: "key", ShareName: "share", ReadOnly: false}}},
|
||||
}
|
||||
names, errs := validateVolumes(successCase, field.NewPath("field"))
|
||||
if len(errs) != 0 {
|
||||
|
@ -639,11 +639,11 @@ func TestValidateVolumes(t *testing.T) {
|
|||
APIVersion: "v1",
|
||||
FieldPath: "metadata.labels"}}},
|
||||
}}
|
||||
zeroWWN := api.VolumeSource{FC: &api.FCVolumeSource{[]string{}, &lun, "ext4", false}}
|
||||
emptyLun := api.VolumeSource{FC: &api.FCVolumeSource{[]string{"wwn"}, nil, "ext4", false}}
|
||||
zeroWWN := api.VolumeSource{FC: &api.FCVolumeSource{TargetWWNs: []string{}, Lun: &lun, FSType: "ext4", ReadOnly: false}}
|
||||
emptyLun := api.VolumeSource{FC: &api.FCVolumeSource{TargetWWNs: []string{"wwn"}, Lun: nil, FSType: "ext4", ReadOnly: false}}
|
||||
slashInName := api.VolumeSource{Flocker: &api.FlockerVolumeSource{DatasetName: "foo/bar"}}
|
||||
emptyAzureSecret := api.VolumeSource{AzureFile: &api.AzureFileVolumeSource{"", "share", false}}
|
||||
emptyAzureShare := api.VolumeSource{AzureFile: &api.AzureFileVolumeSource{"name", "", false}}
|
||||
emptyAzureSecret := api.VolumeSource{AzureFile: &api.AzureFileVolumeSource{SecretName: "", ShareName: "share", ReadOnly: false}}
|
||||
emptyAzureShare := api.VolumeSource{AzureFile: &api.AzureFileVolumeSource{SecretName: "name", ShareName: "", ReadOnly: false}}
|
||||
errorCases := map[string]struct {
|
||||
V []api.Volume
|
||||
T field.ErrorType
|
||||
|
|
|
@ -31,13 +31,13 @@ func TestAllPreferredGroupVersions(t *testing.T) {
|
|||
{
|
||||
groupMetas: []apimachinery.GroupMeta{
|
||||
{
|
||||
GroupVersion: unversioned.GroupVersion{"group1", "v1"},
|
||||
GroupVersion: unversioned.GroupVersion{Group: "group1", Version: "v1"},
|
||||
},
|
||||
{
|
||||
GroupVersion: unversioned.GroupVersion{"group2", "v2"},
|
||||
GroupVersion: unversioned.GroupVersion{Group: "group2", Version: "v2"},
|
||||
},
|
||||
{
|
||||
GroupVersion: unversioned.GroupVersion{"", "v1"},
|
||||
GroupVersion: unversioned.GroupVersion{Group: "", Version: "v1"},
|
||||
},
|
||||
},
|
||||
expect: "group1/v1,group2/v2,v1",
|
||||
|
@ -45,7 +45,7 @@ func TestAllPreferredGroupVersions(t *testing.T) {
|
|||
{
|
||||
groupMetas: []apimachinery.GroupMeta{
|
||||
{
|
||||
GroupVersion: unversioned.GroupVersion{"", "v1"},
|
||||
GroupVersion: unversioned.GroupVersion{Group: "", Version: "v1"},
|
||||
},
|
||||
},
|
||||
expect: "v1",
|
||||
|
|
|
@ -46,11 +46,11 @@ func addDefaultingFuncs(scheme *runtime.Scheme) {
|
|||
obj.ResourceContainer = "/kube-proxy"
|
||||
}
|
||||
if obj.IPTablesSyncPeriod.Duration == 0 {
|
||||
obj.IPTablesSyncPeriod = unversioned.Duration{30 * time.Second}
|
||||
obj.IPTablesSyncPeriod = unversioned.Duration{Duration: 30 * time.Second}
|
||||
}
|
||||
zero := unversioned.Duration{}
|
||||
if obj.UDPIdleTimeout == zero {
|
||||
obj.UDPIdleTimeout = unversioned.Duration{250 * time.Millisecond}
|
||||
obj.UDPIdleTimeout = unversioned.Duration{Duration: 250 * time.Millisecond}
|
||||
}
|
||||
if obj.ConntrackMax == 0 {
|
||||
obj.ConntrackMax = 256 * 1024 // 4x default (64k)
|
||||
|
@ -86,13 +86,13 @@ func addDefaultingFuncs(scheme *runtime.Scheme) {
|
|||
func(obj *LeaderElectionConfiguration) {
|
||||
zero := unversioned.Duration{}
|
||||
if obj.LeaseDuration == zero {
|
||||
obj.LeaseDuration = unversioned.Duration{15 * time.Second}
|
||||
obj.LeaseDuration = unversioned.Duration{Duration: 15 * time.Second}
|
||||
}
|
||||
if obj.RenewDeadline == zero {
|
||||
obj.RenewDeadline = unversioned.Duration{10 * time.Second}
|
||||
obj.RenewDeadline = unversioned.Duration{Duration: 10 * time.Second}
|
||||
}
|
||||
if obj.RetryPeriod == zero {
|
||||
obj.RetryPeriod = unversioned.Duration{2 * time.Second}
|
||||
obj.RetryPeriod = unversioned.Duration{Duration: 2 * time.Second}
|
||||
}
|
||||
},
|
||||
)
|
||||
|
|
|
@ -335,9 +335,9 @@ func (l *LeaderElector) maybeReportTransition() {
|
|||
func DefaultLeaderElectionConfiguration() componentconfig.LeaderElectionConfiguration {
|
||||
return componentconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: false,
|
||||
LeaseDuration: unversioned.Duration{DefaultLeaseDuration},
|
||||
RenewDeadline: unversioned.Duration{DefaultRenewDeadline},
|
||||
RetryPeriod: unversioned.Duration{DefaultRetryPeriod},
|
||||
LeaseDuration: unversioned.Duration{Duration: DefaultLeaseDuration},
|
||||
RenewDeadline: unversioned.Duration{Duration: DefaultRenewDeadline},
|
||||
RetryPeriod: unversioned.Duration{Duration: DefaultRetryPeriod},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -294,7 +294,7 @@ func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp unvers
|
|||
}
|
||||
|
||||
func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, eventtype, reason, message string) *api.Event {
|
||||
t := unversioned.Time{recorder.clock.Now()}
|
||||
t := unversioned.Time{Time: recorder.clock.Now()}
|
||||
namespace := ref.Namespace
|
||||
if namespace == "" {
|
||||
namespace = api.NamespaceDefault
|
||||
|
|
|
@ -211,7 +211,7 @@ func (d *DiscoveryClient) SwaggerSchema(version unversioned.GroupVersion) (*swag
|
|||
func setDiscoveryDefaults(config *restclient.Config) error {
|
||||
config.APIPath = ""
|
||||
config.GroupVersion = nil
|
||||
config.Codec = runtime.NoopEncoder{api.Codecs.UniversalDecoder()}
|
||||
config.Codec = runtime.NoopEncoder{Decoder: api.Codecs.UniversalDecoder()}
|
||||
if len(config.UserAgent) == 0 {
|
||||
config.UserAgent = restclient.DefaultKubernetesUserAgent()
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod contro
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
||||
dsc := &DaemonSetsController{
|
||||
kubeClient: kubeClient,
|
||||
|
|
|
@ -95,7 +95,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{client.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: client.Core().Events("")})
|
||||
|
||||
dc := &DeploymentController{
|
||||
client: client,
|
||||
|
|
|
@ -81,7 +81,7 @@ func TestGC(t *testing.T) {
|
|||
for _, pod := range test.pods {
|
||||
creationTime = creationTime.Add(1 * time.Hour)
|
||||
gcc.podStore.Store.Add(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{creationTime}},
|
||||
ObjectMeta: api.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}},
|
||||
Status: api.PodStatus{Phase: pod.phase},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ func NewJobController(kubeClient clientset.Interface, resyncPeriod controller.Re
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
||||
jm := &JobController{
|
||||
kubeClient: kubeClient,
|
||||
|
|
|
@ -141,7 +141,7 @@ func NewNodeController(
|
|||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if kubeClient != nil {
|
||||
glog.Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
} else {
|
||||
glog.Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
if latestTimestamp.Before(timestamp) {
|
||||
latestTimestamp = timestamp
|
||||
}
|
||||
heapsterMetricPoint := heapster.MetricPoint{timestamp, reportedMetricPoint.level, nil}
|
||||
heapsterMetricPoint := heapster.MetricPoint{Timestamp: timestamp, Value: reportedMetricPoint.level, FloatValue: nil}
|
||||
heapsterMetricPoints = append(heapsterMetricPoints, heapsterMetricPoint)
|
||||
}
|
||||
metric := heapster.MetricResult{
|
||||
|
|
|
@ -98,7 +98,7 @@ type ReplicaSetController struct {
|
|||
func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicaSetController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
||||
rsc := &ReplicaSetController{
|
||||
kubeClient: kubeClient,
|
||||
|
|
|
@ -961,7 +961,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
t.Errorf("Couldn't get key for object %+v: %v", rs, err)
|
||||
}
|
||||
pod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0]
|
||||
pod.DeletionTimestamp = &unversioned.Time{time.Now()}
|
||||
pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
|
||||
manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)})
|
||||
|
||||
// A pod added with a deletion timestamp should decrement deletions, not creations.
|
||||
|
@ -1005,7 +1005,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
},
|
||||
}
|
||||
manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(secondPod)})
|
||||
oldPod.DeletionTimestamp = &unversioned.Time{time.Now()}
|
||||
oldPod.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
|
||||
manager.updatePod(&oldPod, &pod)
|
||||
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rsKey)
|
||||
|
|
|
@ -97,7 +97,7 @@ type ReplicationManager struct {
|
|||
func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
||||
rm := &ReplicationManager{
|
||||
kubeClient: kubeClient,
|
||||
|
|
|
@ -943,7 +943,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err)
|
||||
}
|
||||
pod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0]
|
||||
pod.DeletionTimestamp = &unversioned.Time{time.Now()}
|
||||
pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
|
||||
manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)})
|
||||
|
||||
// A pod added with a deletion timestamp should decrement deletions, not creations.
|
||||
|
@ -987,7 +987,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
},
|
||||
}
|
||||
manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(secondPod)})
|
||||
oldPod.DeletionTimestamp = &unversioned.Time{time.Now()}
|
||||
oldPod.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
|
||||
manager.updatePod(&oldPod, &pod)
|
||||
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
||||
|
|
|
@ -89,7 +89,7 @@ type ServiceController struct {
|
|||
// (like load balancers) in sync with the registry.
|
||||
func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) *ServiceController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})
|
||||
|
||||
return &ServiceController{
|
||||
|
|
|
@ -507,8 +507,8 @@ func New(c *Config) (*GenericAPIServer, error) {
|
|||
|
||||
func (s *GenericAPIServer) NewRequestInfoResolver() *apiserver.RequestInfoResolver {
|
||||
return &apiserver.RequestInfoResolver{
|
||||
sets.NewString(strings.Trim(s.APIPrefix, "/"), strings.Trim(s.APIGroupPrefix, "/")), // all possible API prefixes
|
||||
sets.NewString(strings.Trim(s.APIPrefix, "/")), // APIPrefixes that won't have groups (legacy)
|
||||
APIPrefixes: sets.NewString(strings.Trim(s.APIPrefix, "/"), strings.Trim(s.APIGroupPrefix, "/")), // all possible API prefixes
|
||||
GrouplessAPIPrefixes: sets.NewString(strings.Trim(s.APIPrefix, "/")), // APIPrefixes that won't have groups (legacy)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ func (HorizontalPodAutoscalerV1Beta1) Generate(genericParams map[string]interfac
|
|||
scaler.Spec.MinReplicas = &min
|
||||
}
|
||||
if cpu >= 0 {
|
||||
scaler.Spec.CPUUtilization = &extensions.CPUTargetUtilization{cpu}
|
||||
scaler.Spec.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: cpu}
|
||||
}
|
||||
return &scaler, nil
|
||||
}
|
||||
|
|
|
@ -566,7 +566,7 @@ func newAllPhasePodList() *api.PodList {
|
|||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test1",
|
||||
CreationTimestamp: unversioned.Time{time.Now().AddDate(-10, 0, 0)},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: make([]api.Container, 2),
|
||||
|
@ -583,7 +583,7 @@ func newAllPhasePodList() *api.PodList {
|
|||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test2",
|
||||
CreationTimestamp: unversioned.Time{time.Now().AddDate(-10, 0, 0)},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: make([]api.Container, 2),
|
||||
|
@ -600,7 +600,7 @@ func newAllPhasePodList() *api.PodList {
|
|||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test3",
|
||||
CreationTimestamp: unversioned.Time{time.Now().AddDate(-10, 0, 0)},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: make([]api.Container, 2),
|
||||
|
@ -617,7 +617,7 @@ func newAllPhasePodList() *api.PodList {
|
|||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test4",
|
||||
CreationTimestamp: unversioned.Time{time.Now().AddDate(-10, 0, 0)},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: make([]api.Container, 2),
|
||||
|
@ -634,7 +634,7 @@ func newAllPhasePodList() *api.PodList {
|
|||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "test5",
|
||||
CreationTimestamp: unversioned.Time{time.Now().AddDate(-10, 0, 0)},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: make([]api.Container, 2),
|
||||
|
|
|
@ -110,7 +110,7 @@ func (o *ConvertOptions) Complete(f *cmdutil.Factory, out io.Writer, cmd *cobra.
|
|||
clientMapper := resource.ClientMapperFunc(f.ClientForMapping)
|
||||
if o.local {
|
||||
fmt.Fprintln(out, "running in local mode...")
|
||||
o.builder = resource.NewBuilder(mapper, typer, resource.DisabledClientForMapping{clientMapper}, f.Decoder(true))
|
||||
o.builder = resource.NewBuilder(mapper, typer, resource.DisabledClientForMapping{ClientMapper: clientMapper}, f.Decoder(true))
|
||||
} else {
|
||||
o.builder = resource.NewBuilder(mapper, typer, clientMapper, f.Decoder(true))
|
||||
schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"), cmdutil.GetFlagString(cmd, "schema-cache-dir"))
|
||||
|
|
|
@ -50,7 +50,7 @@ func TestMain(m *testing.M) {
|
|||
node = &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node",
|
||||
CreationTimestamp: unversioned.Time{time.Now()},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now()},
|
||||
},
|
||||
Spec: api.NodeSpec{
|
||||
ExternalID: "node",
|
||||
|
@ -209,7 +209,7 @@ func TestDrain(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "rc",
|
||||
Namespace: "default",
|
||||
CreationTimestamp: unversioned.Time{time.Now()},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now()},
|
||||
Labels: labels,
|
||||
SelfLink: testapi.Default.SelfLink("replicationcontrollers", "rc"),
|
||||
},
|
||||
|
@ -225,7 +225,7 @@ func TestDrain(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "bar",
|
||||
Namespace: "default",
|
||||
CreationTimestamp: unversioned.Time{time.Now()},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now()},
|
||||
Labels: labels,
|
||||
Annotations: rc_anno,
|
||||
},
|
||||
|
@ -238,7 +238,7 @@ func TestDrain(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "ds",
|
||||
Namespace: "default",
|
||||
CreationTimestamp: unversioned.Time{time.Now()},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now()},
|
||||
SelfLink: "/apis/extensions/v1beta1/namespaces/default/daemonsets/ds",
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
|
@ -253,7 +253,7 @@ func TestDrain(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "bar",
|
||||
Namespace: "default",
|
||||
CreationTimestamp: unversioned.Time{time.Now()},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now()},
|
||||
Labels: labels,
|
||||
Annotations: ds_anno,
|
||||
},
|
||||
|
@ -266,7 +266,7 @@ func TestDrain(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "job",
|
||||
Namespace: "default",
|
||||
CreationTimestamp: unversioned.Time{time.Now()},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now()},
|
||||
SelfLink: "/apis/extensions/v1beta1/namespaces/default/jobs/job",
|
||||
},
|
||||
Spec: extensions.JobSpec{
|
||||
|
@ -278,7 +278,7 @@ func TestDrain(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "bar",
|
||||
Namespace: "default",
|
||||
CreationTimestamp: unversioned.Time{time.Now()},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now()},
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{controller.CreatedByAnnotation: refJson(t, &job)},
|
||||
},
|
||||
|
@ -288,7 +288,7 @@ func TestDrain(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "bar",
|
||||
Namespace: "default",
|
||||
CreationTimestamp: unversioned.Time{time.Now()},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Now()},
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
|
|
|
@ -101,7 +101,7 @@ func TestGarbageCollectZeroMaxContainers(t *testing.T) {
|
|||
})
|
||||
addPods(gc.podGetter, "foo")
|
||||
|
||||
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{time.Minute, 1, 0}))
|
||||
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0}))
|
||||
assert.Len(t, fakeDocker.Removed, 1)
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ func TestGarbageCollectNoMaxPerPodContainerLimit(t *testing.T) {
|
|||
})
|
||||
addPods(gc.podGetter, "foo", "foo1", "foo2", "foo3", "foo4")
|
||||
|
||||
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{time.Minute, -1, 4}))
|
||||
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4}))
|
||||
assert.Len(t, fakeDocker.Removed, 1)
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,6 @@ func TestGarbageCollectNoMaxLimit(t *testing.T) {
|
|||
})
|
||||
addPods(gc.podGetter, "foo", "foo1", "foo2", "foo3", "foo4")
|
||||
|
||||
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{time.Minute, 1, -1}))
|
||||
assert.Len(t, fakeDocker.Removed, 0)
|
||||
}
|
||||
|
||||
|
@ -246,7 +245,7 @@ func TestGarbageCollect(t *testing.T) {
|
|||
gc, fakeDocker := newTestContainerGC(t)
|
||||
fakeDocker.SetFakeContainers(test.containers)
|
||||
addPods(gc.podGetter, "foo", "foo1", "foo2", "foo3", "foo4", "foo5", "foo6", "foo7")
|
||||
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{time.Hour, 2, 6}))
|
||||
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Hour, MaxPerPodContainer: 2, MaxContainers: 6}))
|
||||
verifyStringArrayEqualsAnyOrder(t, fakeDocker.Removed, test.expectedRemoved)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ func TestToRuntimeContainer(t *testing.T) {
|
|||
Status: "Up 5 hours",
|
||||
}
|
||||
expected := &kubecontainer.Container{
|
||||
ID: kubecontainer.ContainerID{"docker", "ab2cdf"},
|
||||
ID: kubecontainer.ContainerID{Type: "docker", ID: "ab2cdf"},
|
||||
Name: "bar",
|
||||
Image: "bar_image",
|
||||
Hash: 0x5678,
|
||||
|
|
|
@ -394,7 +394,7 @@ func apiContainerToContainer(c docker.APIContainers) kubecontainer.Container {
|
|||
return kubecontainer.Container{}
|
||||
}
|
||||
return kubecontainer.Container{
|
||||
ID: kubecontainer.ContainerID{"docker", c.ID},
|
||||
ID: kubecontainer.ContainerID{Type: "docker", ID: c.ID},
|
||||
Name: dockerName.ContainerName,
|
||||
Hash: hash,
|
||||
}
|
||||
|
@ -408,7 +408,7 @@ func dockerContainersToPod(containers []*docker.APIContainers) kubecontainer.Pod
|
|||
continue
|
||||
}
|
||||
pod.Containers = append(pod.Containers, &kubecontainer.Container{
|
||||
ID: kubecontainer.ContainerID{"docker", c.ID},
|
||||
ID: kubecontainer.ContainerID{Type: "docker", ID: c.ID},
|
||||
Name: dockerName.ContainerName,
|
||||
Hash: hash,
|
||||
Image: c.Image,
|
||||
|
@ -1064,8 +1064,8 @@ func TestSyncPodBackoff(t *testing.T) {
|
|||
|
||||
startCalls := []string{"create", "start", "inspect_container"}
|
||||
backOffCalls := []string{}
|
||||
startResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", nil, ""}
|
||||
backoffResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", kubecontainer.ErrCrashLoopBackOff, ""}
|
||||
startResult := &kubecontainer.SyncResult{Action: kubecontainer.StartContainer, Target: "bad", Error: nil, Message: ""}
|
||||
backoffResult := &kubecontainer.SyncResult{Action: kubecontainer.StartContainer, Target: "bad", Error: kubecontainer.ErrCrashLoopBackOff, Message: ""}
|
||||
tests := []struct {
|
||||
tick int
|
||||
backoff int
|
||||
|
|
|
@ -75,7 +75,7 @@ func makeImage(id int, size int64) container.Image {
|
|||
// Make a container with the specified ID. It will use the image with the same ID.
|
||||
func makeContainer(id int) *container.Container {
|
||||
return &container.Container{
|
||||
ID: container.ContainerID{"test", fmt.Sprintf("container-%d", id)},
|
||||
ID: container.ContainerID{Type: "test", ID: fmt.Sprintf("container-%d", id)},
|
||||
Image: imageName(id),
|
||||
}
|
||||
}
|
||||
|
@ -323,7 +323,7 @@ func TestFreeSpaceImagesAlsoDoesLookupByRepoTags(t *testing.T) {
|
|||
{
|
||||
Containers: []*container.Container{
|
||||
{
|
||||
ID: container.ContainerID{"test", "c5678"},
|
||||
ID: container.ContainerID{Type: "test", ID: "c5678"},
|
||||
Image: "salad",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -259,7 +259,7 @@ func NewMainKubelet(
|
|||
cache.NewReflector(listWatch, &api.Node{}, nodeStore, 0).Run()
|
||||
}
|
||||
nodeLister := &cache.StoreToNodeLister{Store: nodeStore}
|
||||
nodeInfo := &predicates.CachedNodeInfo{nodeLister}
|
||||
nodeInfo := &predicates.CachedNodeInfo{StoreToNodeLister: nodeLister}
|
||||
|
||||
// TODO: get the real node object of ourself,
|
||||
// and use the real node name and UID.
|
||||
|
@ -2111,7 +2111,7 @@ func (kl *Kubelet) deletePod(pod *api.Pod) error {
|
|||
if runningPod.IsEmpty() {
|
||||
return fmt.Errorf("pod not found")
|
||||
}
|
||||
podPair := kubecontainer.PodPair{pod, &runningPod}
|
||||
podPair := kubecontainer.PodPair{APIPod: pod, RunningPod: &runningPod}
|
||||
|
||||
kl.podKillingCh <- &podPair
|
||||
// TODO: delete the mirror pod here?
|
||||
|
@ -2156,7 +2156,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
|||
}
|
||||
for _, pod := range runningPods {
|
||||
if _, found := desiredPods[pod.ID]; !found {
|
||||
kl.podKillingCh <- &kubecontainer.PodPair{nil, pod}
|
||||
kl.podKillingCh <- &kubecontainer.PodPair{APIPod: nil, RunningPod: pod}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -789,7 +789,7 @@ func TestGetContainerInfo(t *testing.T) {
|
|||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
ID: kubecontainer.ContainerID{"test", containerID},
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -871,7 +871,7 @@ func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) {
|
|||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{Name: "foo",
|
||||
ID: kubecontainer.ContainerID{"test", containerID},
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -995,7 +995,7 @@ func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) {
|
|||
Namespace: "ns",
|
||||
Containers: []*kubecontainer.Container{
|
||||
{Name: "bar",
|
||||
ID: kubecontainer.ContainerID{"test", "fakeID"},
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "fakeID"},
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
@ -1078,7 +1078,7 @@ func TestRunInContainer(t *testing.T) {
|
|||
fakeCommandRunner := fakeContainerCommandRunner{}
|
||||
kubelet.runner = &fakeCommandRunner
|
||||
|
||||
containerID := kubecontainer.ContainerID{"test", "abc1234"}
|
||||
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
|
||||
fakeRuntime.PodList = []*kubecontainer.Pod{
|
||||
{
|
||||
ID: "12345678",
|
||||
|
@ -2152,7 +2152,7 @@ func TestExecInContainerNoSuchContainer(t *testing.T) {
|
|||
Namespace: podNamespace,
|
||||
Containers: []*kubecontainer.Container{
|
||||
{Name: "bar",
|
||||
ID: kubecontainer.ContainerID{"test", "barID"}},
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "barID"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -2215,7 +2215,7 @@ func TestExecInContainer(t *testing.T) {
|
|||
Namespace: podNamespace,
|
||||
Containers: []*kubecontainer.Container{
|
||||
{Name: containerID,
|
||||
ID: kubecontainer.ContainerID{"test", containerID},
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2300,7 +2300,7 @@ func TestPortForward(t *testing.T) {
|
|||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
ID: kubecontainer.ContainerID{"test", "containerFoo"},
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "containerFoo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -3523,7 +3523,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
|||
Containers: []*kubecontainer.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
ID: kubecontainer.ContainerID{"test", containerID},
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -95,7 +95,7 @@ func TestRunHandlerExec(t *testing.T) {
|
|||
fakeCommandRunner := fakeContainerCommandRunner{}
|
||||
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil)
|
||||
|
||||
containerID := kubecontainer.ContainerID{"test", "abc1234"}
|
||||
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
|
||||
containerName := "containerFoo"
|
||||
|
||||
container := api.Container{
|
||||
|
@ -137,7 +137,7 @@ func TestRunHandlerHttp(t *testing.T) {
|
|||
fakeHttp := fakeHTTP{}
|
||||
handlerRunner := NewHandlerRunner(&fakeHttp, &fakeContainerCommandRunner{}, nil)
|
||||
|
||||
containerID := kubecontainer.ContainerID{"test", "abc1234"}
|
||||
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
|
||||
containerName := "containerFoo"
|
||||
|
||||
container := api.Container{
|
||||
|
@ -168,7 +168,7 @@ func TestRunHandlerHttp(t *testing.T) {
|
|||
|
||||
func TestRunHandlerNil(t *testing.T) {
|
||||
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, nil)
|
||||
containerID := kubecontainer.ContainerID{"test", "abc1234"}
|
||||
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
|
||||
podName := "podFoo"
|
||||
podNamespace := "nsFoo"
|
||||
containerName := "containerFoo"
|
||||
|
|
|
@ -37,14 +37,14 @@ const (
|
|||
testPodUID = "pOd_UiD"
|
||||
)
|
||||
|
||||
var testContainerID = kubecontainer.ContainerID{"test", "cOnTaInEr_Id"}
|
||||
var testContainerID = kubecontainer.ContainerID{Type: "test", ID: "cOnTaInEr_Id"}
|
||||
|
||||
func getTestRunningStatus() api.PodStatus {
|
||||
containerStatus := api.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
}
|
||||
containerStatus.State.Running = &api.ContainerStateRunning{unversioned.Now()}
|
||||
containerStatus.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.Now()}
|
||||
podStatus := api.PodStatus{
|
||||
Phase: api.PodRunning,
|
||||
ContainerStatuses: []api.ContainerStatus{containerStatus},
|
||||
|
|
|
@ -198,7 +198,7 @@ func TestProbe(t *testing.T) {
|
|||
refManager: kubecontainer.NewRefManager(),
|
||||
recorder: &record.FakeRecorder{},
|
||||
}
|
||||
containerID := kubecontainer.ContainerID{"test", "foobar"}
|
||||
containerID := kubecontainer.ContainerID{Type: "test", ID: "foobar"}
|
||||
|
||||
execProbe := &api.Probe{
|
||||
Handler: api.Handler{
|
||||
|
|
|
@ -29,8 +29,8 @@ import (
|
|||
func TestCacheOperations(t *testing.T) {
|
||||
m := NewManager()
|
||||
|
||||
unsetID := kubecontainer.ContainerID{"test", "unset"}
|
||||
setID := kubecontainer.ContainerID{"test", "set"}
|
||||
unsetID := kubecontainer.ContainerID{Type: "test", ID: "unset"}
|
||||
setID := kubecontainer.ContainerID{Type: "test", ID: "set"}
|
||||
|
||||
_, found := m.Get(unsetID)
|
||||
assert.False(t, found, "unset result found")
|
||||
|
@ -49,8 +49,8 @@ func TestUpdates(t *testing.T) {
|
|||
m := NewManager()
|
||||
|
||||
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "test-pod"}}
|
||||
fooID := kubecontainer.ContainerID{"test", "foo"}
|
||||
barID := kubecontainer.ContainerID{"test", "bar"}
|
||||
fooID := kubecontainer.ContainerID{Type: "test", ID: "foo"}
|
||||
barID := kubecontainer.ContainerID{Type: "test", ID: "bar"}
|
||||
|
||||
expectUpdate := func(expected Update, msg string) {
|
||||
select {
|
||||
|
|
|
@ -56,7 +56,7 @@ func (f *fakeRktInterface) GetInfo(ctx context.Context, in *rktapi.GetInfoReques
|
|||
defer f.Unlock()
|
||||
|
||||
f.called = append(f.called, "GetInfo")
|
||||
return &rktapi.GetInfoResponse{&f.info}, f.err
|
||||
return &rktapi.GetInfoResponse{Info: &f.info}, f.err
|
||||
}
|
||||
|
||||
func (f *fakeRktInterface) ListPods(ctx context.Context, in *rktapi.ListPodsRequest, opts ...grpc.CallOption) (*rktapi.ListPodsResponse, error) {
|
||||
|
@ -65,7 +65,7 @@ func (f *fakeRktInterface) ListPods(ctx context.Context, in *rktapi.ListPodsRequ
|
|||
|
||||
f.called = append(f.called, "ListPods")
|
||||
f.podFilters = in.Filters
|
||||
return &rktapi.ListPodsResponse{f.pods}, f.err
|
||||
return &rktapi.ListPodsResponse{Pods: f.pods}, f.err
|
||||
}
|
||||
|
||||
func (f *fakeRktInterface) InspectPod(ctx context.Context, in *rktapi.InspectPodRequest, opts ...grpc.CallOption) (*rktapi.InspectPodResponse, error) {
|
||||
|
@ -75,10 +75,10 @@ func (f *fakeRktInterface) InspectPod(ctx context.Context, in *rktapi.InspectPod
|
|||
f.called = append(f.called, "InspectPod")
|
||||
for _, pod := range f.pods {
|
||||
if pod.Id == in.Id {
|
||||
return &rktapi.InspectPodResponse{pod}, f.err
|
||||
return &rktapi.InspectPodResponse{Pod: pod}, f.err
|
||||
}
|
||||
}
|
||||
return &rktapi.InspectPodResponse{nil}, f.err
|
||||
return &rktapi.InspectPodResponse{Pod: nil}, f.err
|
||||
}
|
||||
|
||||
func (f *fakeRktInterface) ListImages(ctx context.Context, in *rktapi.ListImagesRequest, opts ...grpc.CallOption) (*rktapi.ListImagesResponse, error) {
|
||||
|
@ -86,7 +86,7 @@ func (f *fakeRktInterface) ListImages(ctx context.Context, in *rktapi.ListImages
|
|||
defer f.Unlock()
|
||||
|
||||
f.called = append(f.called, "ListImages")
|
||||
return &rktapi.ListImagesResponse{f.images}, f.err
|
||||
return &rktapi.ListImagesResponse{Images: f.images}, f.err
|
||||
}
|
||||
|
||||
func (f *fakeRktInterface) InspectImage(ctx context.Context, in *rktapi.InspectImageRequest, opts ...grpc.CallOption) (*rktapi.InspectImageResponse, error) {
|
||||
|
|
|
@ -536,8 +536,8 @@ func TestStaticPodStatus(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSetContainerReadiness(t *testing.T) {
|
||||
cID1 := kubecontainer.ContainerID{"test", "1"}
|
||||
cID2 := kubecontainer.ContainerID{"test", "2"}
|
||||
cID1 := kubecontainer.ContainerID{Type: "test", ID: "1"}
|
||||
cID2 := kubecontainer.ContainerID{Type: "test", ID: "2"}
|
||||
containerStatuses := []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
|
@ -618,7 +618,7 @@ func TestSetContainerReadiness(t *testing.T) {
|
|||
verifyReadiness("all ready", &status, true, true, true)
|
||||
|
||||
t.Log("Setting non-existant container readiness should fail.")
|
||||
m.SetContainerReadiness(pod.UID, kubecontainer.ContainerID{"test", "foo"}, true)
|
||||
m.SetContainerReadiness(pod.UID, kubecontainer.ContainerID{Type: "test", ID: "foo"}, true)
|
||||
verifyUpdates(t, m, 0)
|
||||
status = expectPodStatus(t, m, pod)
|
||||
verifyReadiness("ignore non-existant", &status, true, true, true)
|
||||
|
|
|
@ -631,7 +631,7 @@ func (m *Master) InstallThirdPartyResource(rsrc *extensions.ThirdPartyResource)
|
|||
|
||||
func (m *Master) thirdpartyapi(group, kind, version string) *apiserver.APIGroupVersion {
|
||||
resourceStorage := thirdpartyresourcedataetcd.NewREST(
|
||||
generic.RESTOptions{m.thirdPartyStorage, generic.UndecoratedStorage, m.deleteCollectionWorkers}, group, kind)
|
||||
generic.RESTOptions{Storage: m.thirdPartyStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: m.deleteCollectionWorkers}, group, kind)
|
||||
|
||||
apiRoot := makeThirdPartyPath("")
|
||||
|
||||
|
@ -692,7 +692,7 @@ func (m *Master) getExtensionResources(c *Config) map[string]rest.Storage {
|
|||
if isEnabled("horizontalpodautoscalers") {
|
||||
m.constructHPAResources(c, storage)
|
||||
controllerStorage := expcontrolleretcd.NewStorage(
|
||||
generic.RESTOptions{c.StorageDestinations.Get("", "replicationControllers"), m.StorageDecorator(), m.deleteCollectionWorkers})
|
||||
generic.RESTOptions{Storage: c.StorageDestinations.Get("", "replicationControllers"), Decorator: m.StorageDecorator(), DeleteCollectionWorkers: m.deleteCollectionWorkers})
|
||||
storage["replicationcontrollers"] = controllerStorage.ReplicationController
|
||||
storage["replicationcontrollers/scale"] = controllerStorage.Scale
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
return NewREST(restOptions), server
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ const (
|
|||
|
||||
func newStorage(t *testing.T) (ControllerStorage, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
storage := NewStorage(restOptions)
|
||||
return storage, server
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, extensions.GroupName)
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
daemonSetStorage, statusStorage := NewREST(restOptions)
|
||||
return daemonSetStorage, statusStorage, server
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ const defaultReplicas = 100
|
|||
|
||||
func newStorage(t *testing.T) (*DeploymentStorage, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, extensions.GroupName)
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
deploymentStorage := NewStorage(restOptions)
|
||||
return &deploymentStorage, server
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
return NewREST(restOptions), server
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ var testTTL uint64 = 60
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
return NewREST(restOptions, testTTL), server
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*ScaleREST, *etcdtesting.EtcdTestServer, storage.Interface) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
return NewStorage(restOptions).Scale, server, etcdStorage
|
||||
}
|
||||
|
||||
|
|
|
@ -305,7 +305,7 @@ func (e *Etcd) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool
|
|||
// TODO: The Invalid error should has a field for Resource.
|
||||
// After that field is added, we should fill the Resource and
|
||||
// leave the Kind field empty. See the discussion in #18526.
|
||||
qualifiedKind := unversioned.GroupKind{e.QualifiedResource.Group, e.QualifiedResource.Resource}
|
||||
qualifiedKind := unversioned.GroupKind{Group: e.QualifiedResource.Group, Kind: e.QualifiedResource.Resource}
|
||||
fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newVersion, "must be specified for an update")}
|
||||
return nil, nil, kubeerr.NewInvalid(qualifiedKind, name, fieldErrList)
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, extensions.GroupName)
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
horizontalPodAutoscalerStorage, statusStorage := NewREST(restOptions)
|
||||
return horizontalPodAutoscalerStorage, statusStorage, server
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, extensions.GroupName)
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
ingressStorage, statusStorage := NewREST(restOptions)
|
||||
return ingressStorage, statusStorage, server
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, extensions.GroupName)
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
jobStorage, statusStorage := NewREST(restOptions)
|
||||
return jobStorage, statusStorage, server
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
return NewREST(restOptions), server
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
namespaceStorage, _, _ := NewREST(restOptions)
|
||||
return namespaceStorage, server
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ func (fakeConnectionInfoGetter) GetConnectionInfo(ctx api.Context, nodeName stri
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
storage := NewStorage(restOptions, fakeConnectionInfoGetter{}, nil)
|
||||
return storage.Node, server
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
persistentVolumeStorage, statusStorage := NewREST(restOptions)
|
||||
return persistentVolumeStorage, statusStorage, server
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
persistentVolumeClaimStorage, statusStorage := NewREST(restOptions)
|
||||
return persistentVolumeClaimStorage, statusStorage, server
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *BindingREST, *StatusREST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 3}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 3}
|
||||
storage := NewStorage(restOptions, nil, nil)
|
||||
return storage.Pod, storage.Binding, storage.Status, server
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ func (f FailDeletionStorage) Delete(ctx context.Context, key string, out runtime
|
|||
func newFailDeleteStorage(t *testing.T, called *bool) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
failDeleteStorage := FailDeletionStorage{etcdStorage, called}
|
||||
restOptions := generic.RESTOptions{failDeleteStorage, generic.UndecoratedStorage, 3}
|
||||
restOptions := generic.RESTOptions{Storage: failDeleteStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 3}
|
||||
storage := NewStorage(restOptions, nil, nil)
|
||||
return storage.Pod, server
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "extensions")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
return NewREST(restOptions), server
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
return NewREST(restOptions), server
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ const defaultReplicas = 100
|
|||
|
||||
func newStorage(t *testing.T) (*ReplicaSetStorage, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "extensions")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
replicaSetStorage := NewStorage(restOptions)
|
||||
return &replicaSetStorage, server
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
resourceQuotaStorage, statusStorage := NewREST(restOptions)
|
||||
return resourceQuotaStorage, statusStorage, server
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
return NewREST(restOptions), server
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *StatusREST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
serviceStorage, statusStorage := NewREST(restOptions)
|
||||
return serviceStorage, statusStorage, server
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
return NewREST(restOptions), server
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, extensions.GroupName)
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
return NewREST(restOptions), server
|
||||
}
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ func TestCodec(t *testing.T) {
|
|||
obj: &Foo{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "bar",
|
||||
CreationTimestamp: unversioned.Time{time.Unix(100, 0)},
|
||||
CreationTimestamp: unversioned.Time{Time: time.Unix(100, 0)},
|
||||
},
|
||||
TypeMeta: unversioned.TypeMeta{Kind: "Foo"},
|
||||
},
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
|
||||
func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, extensions.GroupName)
|
||||
restOptions := generic.RESTOptions{etcdStorage, generic.UndecoratedStorage, 1}
|
||||
restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1}
|
||||
return NewREST(restOptions, "foo", "bar"), server
|
||||
}
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ func (f CodecFactory) UniversalDeserializer() runtime.Decoder {
|
|||
//
|
||||
// TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form
|
||||
func (f CodecFactory) UniversalDecoder(versions ...unversioned.GroupVersion) runtime.Decoder {
|
||||
return f.CodecForVersions(runtime.NoopEncoder{f.universal}, nil, versions)
|
||||
return f.CodecForVersions(runtime.NoopEncoder{Decoder: f.universal}, nil, versions)
|
||||
}
|
||||
|
||||
// CodecFor creates a codec with the provided serializer. If an object is decoded and its group is not in the list,
|
||||
|
|
|
@ -213,7 +213,7 @@ func TestFakeDBus(t *testing.T) {
|
|||
if method == "org.freedesktop.DBus.GetNameOwner" {
|
||||
checkName := args[0].(string)
|
||||
if checkName != ownedName {
|
||||
return nil, godbus.Error{"org.freedesktop.DBus.Error.NameHasNoOwner", nil}
|
||||
return nil, godbus.Error{Name: "org.freedesktop.DBus.Error.NameHasNoOwner", Body: nil}
|
||||
} else {
|
||||
return []interface{}{uniqueName}, nil
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec,
|
|||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
diskMounter: &mount.SafeFormatAndMount{plugin.host.GetMounter(), exec.New()}}, nil
|
||||
diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {
|
||||
|
|
|
@ -101,7 +101,7 @@ func (plugin *cinderPlugin) newBuilderInternal(spec *volume.Spec, podUID types.U
|
|||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
blockDeviceMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil
|
||||
blockDeviceMounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {
|
||||
|
|
|
@ -199,7 +199,7 @@ func TestCanSupport(t *testing.T) {
|
|||
if plugin.Name() != configMapPluginName {
|
||||
t.Errorf("Wrong name: %s", plugin.Name())
|
||||
}
|
||||
if !plugin.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{""}}}}}) {
|
||||
if !plugin.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: ""}}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plugin.CanSupport(&volume.Spec{}) {
|
||||
|
|
|
@ -105,7 +105,7 @@ func (plugin *fcPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID,
|
|||
plugin: plugin},
|
||||
fsType: fc.FSType,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{mounter, exec.New()},
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ func (plugin *flexVolumePlugin) newBuilderInternal(spec *volume.Spec, pod *api.P
|
|||
options: source.Options,
|
||||
runner: runner,
|
||||
manager: manager,
|
||||
blockDeviceMounter: &mount.SafeFormatAndMount{mounter, runner},
|
||||
blockDeviceMounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: runner},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, pod
|
|||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
diskMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil
|
||||
diskMounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}}, nil
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {
|
||||
|
|
|
@ -105,7 +105,7 @@ func (plugin *iscsiPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UI
|
|||
plugin: plugin},
|
||||
fsType: iscsi.FSType,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{mounter, exec.New()},
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID,
|
|||
Pool: pool,
|
||||
ReadOnly: readOnly,
|
||||
manager: manager,
|
||||
mounter: &mount.SafeFormatAndMount{mounter, exec.New()},
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
|
||||
plugin: plugin,
|
||||
},
|
||||
Mon: source.CephMonitors,
|
||||
|
@ -150,7 +150,7 @@ func (plugin *rbdPlugin) newCleanerInternal(volName string, podUID types.UID, ma
|
|||
podUID: podUID,
|
||||
volName: volName,
|
||||
manager: manager,
|
||||
mounter: &mount.SafeFormatAndMount{mounter, exec.New()},
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
|
||||
plugin: plugin,
|
||||
},
|
||||
Mon: make([]string, 0),
|
||||
|
|
|
@ -73,7 +73,7 @@ func machine1PrioritizerExtender(pod *api.Pod, nodes *api.NodeList) (*schedulera
|
|||
if node.Name == "machine1" {
|
||||
score = 10
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{node.Name, score})
|
||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: score})
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ func machine2PrioritizerExtender(pod *api.Pod, nodes *api.NodeList) (*schedulera
|
|||
if node.Name == "machine2" {
|
||||
score = 10
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{node.Name, score})
|
||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: score})
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ func machine2Prioritizer(_ *api.Pod, nodeNameToInfo map[string]*schedulercache.N
|
|||
if node.Name == "machine2" {
|
||||
score = 10
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{node.Name, score})
|
||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: score})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
@ -227,7 +227,7 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String,
|
|||
ReplicaSetLister: f.ReplicaSetLister,
|
||||
// All fit predicates only need to consider schedulable nodes.
|
||||
NodeLister: f.NodeLister.NodeCondition(getNodeConditionPredicate()),
|
||||
NodeInfo: &predicates.CachedNodeInfo{f.NodeLister},
|
||||
NodeInfo: &predicates.CachedNodeInfo{StoreToNodeLister: f.NodeLister},
|
||||
PVInfo: f.PVLister,
|
||||
PVCInfo: f.PVCLister,
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ func (ci *ConformanceImage) List() ([]string, error) {
|
|||
}
|
||||
|
||||
func (ci *ConformanceImage) Remove() error {
|
||||
ci.Runtime.GarbageCollect(kubecontainer.ContainerGCPolicy{time.Second * 30, 1, 0})
|
||||
ci.Runtime.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Second * 30, MaxPerPodContainer: 1, MaxContainers: 0})
|
||||
|
||||
var err error
|
||||
for start := time.Now(); time.Since(start) < time.Minute*2; time.Sleep(time.Second * 30) {
|
||||
|
|
Loading…
Reference in New Issue