Merge pull request #36651 from mwielgus/evict-from-b1

Automatic merge from submit-queue

Switch pod eviction client from v1alpha1 to v1beta

Generated client 1.5 has a function to evict a pod. The function uses v1alpha1.Eviction object instead of v1beta1. This pr changes the api version that is being used.

cc: @davidopp @caesarxuchao
pull/6/head
Kubernetes Submit Queue 2016-11-11 12:22:34 -08:00 committed by GitHub
commit 2822e4fd78
40 changed files with 65139 additions and 50767 deletions

View File

@ -44,7 +44,7 @@ go_library(
"//pkg/api/unversioned:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/policy/v1alpha1:go_default_library",
"//pkg/apis/policy/v1beta1:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/fields:go_default_library",
"//pkg/runtime:go_default_library",

View File

@ -42,7 +42,7 @@ go_library(
"//pkg/api:go_default_library",
"//pkg/api/unversioned:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/policy/v1alpha1:go_default_library",
"//pkg/apis/policy/v1beta1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/testing/core:go_default_library",

View File

@ -18,7 +18,7 @@ package fake
import (
"k8s.io/kubernetes/pkg/api/v1"
policy "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core"
)

View File

@ -19,7 +19,7 @@ package v1
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
policy "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
"k8s.io/kubernetes/pkg/client/restclient"
)

View File

@ -18,7 +18,7 @@ package fake
import (
"k8s.io/client-go/pkg/api/v1"
policy "k8s.io/client-go/pkg/apis/policy/v1alpha1"
policy "k8s.io/client-go/pkg/apis/policy/v1beta1"
"k8s.io/client-go/rest"
"k8s.io/client-go/testing"
)

View File

@ -19,7 +19,7 @@ package v1
import (
"k8s.io/client-go/pkg/api"
"k8s.io/client-go/pkg/api/v1"
policy "k8s.io/client-go/pkg/apis/policy/v1alpha1"
policy "k8s.io/client-go/pkg/apis/policy/v1beta1"
"k8s.io/client-go/rest"
)

File diff suppressed because it is too large Load Diff

View File

@ -289,6 +289,8 @@ type VolumeSource struct {
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty"`
// PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -349,6 +351,8 @@ type PersistentVolumeSource struct {
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty"`
// PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty"`
}
type PersistentVolumeClaimVolumeSource struct {
@ -936,6 +940,16 @@ type VsphereVirtualDiskVolumeSource struct {
FSType string `json:"fsType,omitempty"`
}
// Represents a Photon Controller persistent disk resource.
type PhotonPersistentDiskVolumeSource struct {
// ID that identifies Photon Controller persistent disk
PdID string `json:"pdID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty"`
}
type AzureDataDiskCachingMode string
const (

File diff suppressed because it is too large Load Diff

View File

@ -2033,6 +2033,9 @@ message PersistentVolumeSource {
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
optional AzureDiskVolumeSource azureDisk = 16;
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17;
}
// PersistentVolumeSpec is the specification of a persistent volume.
@ -2082,6 +2085,17 @@ message PersistentVolumeStatus {
optional string reason = 3;
}
// Represents a Photon Controller persistent disk resource.
message PhotonPersistentDiskVolumeSource {
// ID that identifies Photon Controller persistent disk
optional string pdID = 1;
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
optional string fsType = 2;
}
// Pod is a collection of containers that can run on a host. This resource is created
// by clients and scheduled onto hosts.
message Pod {
@ -3541,6 +3555,9 @@ message VolumeSource {
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
optional AzureDiskVolumeSource azureDisk = 22;
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23;
}
// Represents a vSphere volume resource.

File diff suppressed because it is too large Load Diff

View File

@ -322,6 +322,8 @@ type VolumeSource struct {
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
@ -405,6 +407,8 @@ type PersistentVolumeSource struct {
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
}
// +genclient=true
@ -1023,6 +1027,17 @@ type VsphereVirtualDiskVolumeSource struct {
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
// Represents a Photon Controller persistent disk resource.
type PhotonPersistentDiskVolumeSource struct {
// ID that identifies Photon Controller persistent disk
PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
type AzureDataDiskCachingMode string
const (

View File

@ -1098,6 +1098,7 @@ var map_PersistentVolumeSource = map[string]string{
"vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
"quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
"azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
"photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
}
func (PersistentVolumeSource) SwaggerDoc() map[string]string {
@ -1127,6 +1128,16 @@ func (PersistentVolumeStatus) SwaggerDoc() map[string]string {
return map_PersistentVolumeStatus
}
var map_PhotonPersistentDiskVolumeSource = map[string]string{
"": "Represents a Photon Controller persistent disk resource.",
"pdID": "ID that identifies Photon Controller persistent disk",
"fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
}
func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string {
return map_PhotonPersistentDiskVolumeSource
}
var map_Pod = map[string]string{
"": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.",
"metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
@ -1787,18 +1798,19 @@ var map_VolumeSource = map[string]string{
"iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md",
"glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md",
"persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims",
"rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md",
"flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.",
"cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
"cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
"flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
"downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume",
"fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
"azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
"configMap": "ConfigMap represents a configMap that should populate this volume",
"vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
"quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
"azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
"rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md",
"flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.",
"cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
"cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
"flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
"downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume",
"fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
"azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
"configMap": "ConfigMap represents a configMap that should populate this volume",
"vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
"quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
"azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
"photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
}
func (VolumeSource) SwaggerDoc() map[string]string {

View File

@ -229,6 +229,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec,
Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus,
Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus,
Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource,
Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource,
Convert_v1_Pod_To_api_Pod,
Convert_api_Pod_To_v1_Pod,
Convert_v1_PodAffinity_To_api_PodAffinity,
@ -2649,6 +2651,7 @@ func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *Per
out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume))
out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
return nil
}
@ -2673,6 +2676,7 @@ func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api
out.AzureFile = (*AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile))
out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume))
out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
return nil
}
@ -2732,6 +2736,26 @@ func Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.Per
return autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s)
}
func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error {
out.PdID = in.PdID
out.FSType = in.FSType
return nil
}
func Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error {
return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in, out, s)
}
func autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *PhotonPersistentDiskVolumeSource, s conversion.Scope) error {
out.PdID = in.PdID
out.FSType = in.FSType
return nil
}
func Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *PhotonPersistentDiskVolumeSource, s conversion.Scope) error {
return autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s)
}
func autoConvert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error {
if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
return err
@ -4266,6 +4290,7 @@ func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.
out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume))
out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
return nil
}
@ -4296,6 +4321,7 @@ func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *
out.ConfigMap = (*ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap))
out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume))
out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
return nil
}

View File

@ -132,6 +132,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeSource, InType: reflect.TypeOf(&PersistentVolumeSource{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeSpec, InType: reflect.TypeOf(&PersistentVolumeSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeStatus, InType: reflect.TypeOf(&PersistentVolumeStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PhotonPersistentDiskVolumeSource, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Pod, InType: reflect.TypeOf(&Pod{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAffinity, InType: reflect.TypeOf(&PodAffinity{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAffinityTerm, InType: reflect.TypeOf(&PodAffinityTerm{})},
@ -2231,6 +2232,13 @@ func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conv
} else {
out.AzureDisk = nil
}
if in.PhotonPersistentDisk != nil {
in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk
*out = new(PhotonPersistentDiskVolumeSource)
**out = **in
} else {
out.PhotonPersistentDisk = nil
}
return nil
}
}
@ -2283,6 +2291,16 @@ func DeepCopy_v1_PersistentVolumeStatus(in interface{}, out interface{}, c *conv
}
}
func DeepCopy_v1_PhotonPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*PhotonPersistentDiskVolumeSource)
out := out.(*PhotonPersistentDiskVolumeSource)
out.PdID = in.PdID
out.FSType = in.FSType
return nil
}
}
func DeepCopy_v1_Pod(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*Pod)
@ -3706,6 +3724,13 @@ func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Clo
} else {
out.AzureDisk = nil
}
if in.PhotonPersistentDisk != nil {
in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk
*out = new(PhotonPersistentDiskVolumeSource)
**out = **in
} else {
out.PhotonPersistentDisk = nil
}
return nil
}
}

View File

@ -136,6 +136,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeSource, InType: reflect.TypeOf(&PersistentVolumeSource{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeSpec, InType: reflect.TypeOf(&PersistentVolumeSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeStatus, InType: reflect.TypeOf(&PersistentVolumeStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PhotonPersistentDiskVolumeSource, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Pod, InType: reflect.TypeOf(&Pod{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAffinity, InType: reflect.TypeOf(&PodAffinity{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAffinityTerm, InType: reflect.TypeOf(&PodAffinityTerm{})},
@ -2290,6 +2291,13 @@ func DeepCopy_api_PersistentVolumeSource(in interface{}, out interface{}, c *con
} else {
out.AzureDisk = nil
}
if in.PhotonPersistentDisk != nil {
in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk
*out = new(PhotonPersistentDiskVolumeSource)
**out = **in
} else {
out.PhotonPersistentDisk = nil
}
return nil
}
}
@ -2342,6 +2350,16 @@ func DeepCopy_api_PersistentVolumeStatus(in interface{}, out interface{}, c *con
}
}
func DeepCopy_api_PhotonPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*PhotonPersistentDiskVolumeSource)
out := out.(*PhotonPersistentDiskVolumeSource)
out.PdID = in.PdID
out.FSType = in.FSType
return nil
}
}
func DeepCopy_api_Pod(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*Pod)
@ -3752,6 +3770,13 @@ func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cl
} else {
out.AzureDisk = nil
}
if in.PhotonPersistentDisk != nil {
in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk
*out = new(PhotonPersistentDiskVolumeSource)
**out = **in
} else {
out.PhotonPersistentDisk = nil
}
return nil
}
}

File diff suppressed because it is too large Load Diff

View File

@ -326,8 +326,6 @@ type KubeletConfiguration struct {
RktPath string `json:"rktPath,omitempty"`
// experimentalMounterPath is the path of mounter binary. Leave empty to use the default mount path
ExperimentalMounterPath string `json:"experimentalMounterPath,omitempty"`
// experimentalMounterRootfsPath is the absolute path to root filesystem for the mounter binary.
ExperimentalMounterRootfsPath string `json:"experimentalMounterRootfsPath,omitempty"`
// rktApiEndpoint is the endpoint of the rkt API service to communicate with.
// +optional
RktAPIEndpoint string `json:"rktAPIEndpoint,omitempty"`
@ -468,6 +466,10 @@ type KubeletConfiguration struct {
// TODO(#34726:1.8.0): Remove the opt-in for failing when swap is enabled.
// Tells the Kubelet to fail to start if swap is enabled on the node.
ExperimentalFailSwapOn bool `json:"experimentalFailSwapOn,omitempty"`
// This flag, if set, enables a check prior to mount operations to verify that the required components
// (binaries, etc.) to mount the volume are available on the underlying node. If the check is enabled
// and fails the mount operation fails.
ExperimentalCheckNodeCapabilitiesBeforeMount bool `json:"ExperimentalCheckNodeCapabilitiesBeforeMount,omitempty"`
}
type KubeletAuthorizationMode string

View File

@ -340,7 +340,7 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
obj.SerializeImagePulls = boolVar(true)
}
if obj.SeccompProfileRoot == "" {
filepath.Join(defaultRootDir, "seccomp")
obj.SeccompProfileRoot = filepath.Join(defaultRootDir, "seccomp")
}
if obj.StreamingConnectionIdleTimeout == zeroDuration {
obj.StreamingConnectionIdleTimeout = unversioned.Duration{Duration: 4 * time.Hour}

View File

@ -374,8 +374,6 @@ type KubeletConfiguration struct {
// experimentalMounterPath is the path to mounter binary. If not set, kubelet will attempt to use mount
// binary that is available via $PATH,
ExperimentalMounterPath string `json:"experimentalMounterPath,omitempty"`
// experimentalMounterRootfsPath is the absolute path to root filesystem for the mounter binary.
ExperimentalMounterRootfsPath string `json:"experimentalMounterRootfsPath,omitempty"`
// rktApiEndpoint is the endpoint of the rkt API service to communicate with.
RktAPIEndpoint string `json:"rktAPIEndpoint"`
// rktStage1Image is the image to use as stage1. Local paths and
@ -507,6 +505,10 @@ type KubeletConfiguration struct {
// TODO(#34726:1.8.0): Remove the opt-in for failing when swap is enabled.
// Tells the Kubelet to fail to start if swap is enabled on the node.
ExperimentalFailSwapOn bool `json:"experimentalFailSwapOn,omitempty"`
// This flag, if set, enables a check prior to mount operations to verify that the required components
// (binaries, etc.) to mount the volume are available on the underlying node. If the check is enabled
// and fails the mount operation fails.
ExperimentalCheckNodeCapabilitiesBeforeMount bool `json:"ExperimentalCheckNodeCapabilitiesBeforeMount,omitempty"`
}
type KubeletAuthorizationMode string

View File

@ -340,7 +340,6 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
out.RktPath = in.RktPath
out.ExperimentalMounterPath = in.ExperimentalMounterPath
out.ExperimentalMounterRootfsPath = in.ExperimentalMounterRootfsPath
out.RktAPIEndpoint = in.RktAPIEndpoint
out.RktStage1Image = in.RktStage1Image
if err := api.Convert_Pointer_string_To_string(&in.LockFilePath, &out.LockFilePath, s); err != nil {
@ -408,6 +407,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
out.FeatureGates = in.FeatureGates
out.EnableCRI = in.EnableCRI
out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn
out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount
return nil
}
@ -509,7 +509,6 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
out.RktPath = in.RktPath
out.ExperimentalMounterPath = in.ExperimentalMounterPath
out.ExperimentalMounterRootfsPath = in.ExperimentalMounterRootfsPath
out.RktAPIEndpoint = in.RktAPIEndpoint
out.RktStage1Image = in.RktStage1Image
if err := api.Convert_string_To_Pointer_string(&in.LockFilePath, &out.LockFilePath, s); err != nil {
@ -577,6 +576,7 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
out.FeatureGates = in.FeatureGates
out.EnableCRI = in.EnableCRI
out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn
out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount
return nil
}

View File

@ -316,7 +316,6 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
out.RktPath = in.RktPath
out.ExperimentalMounterPath = in.ExperimentalMounterPath
out.ExperimentalMounterRootfsPath = in.ExperimentalMounterRootfsPath
out.RktAPIEndpoint = in.RktAPIEndpoint
out.RktStage1Image = in.RktStage1Image
if in.LockFilePath != nil {
@ -462,6 +461,7 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
out.FeatureGates = in.FeatureGates
out.EnableCRI = in.EnableCRI
out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn
out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount
return nil
}
}

View File

@ -319,7 +319,6 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
out.RktPath = in.RktPath
out.ExperimentalMounterPath = in.ExperimentalMounterPath
out.ExperimentalMounterRootfsPath = in.ExperimentalMounterRootfsPath
out.RktAPIEndpoint = in.RktAPIEndpoint
out.RktStage1Image = in.RktStage1Image
out.LockFilePath = in.LockFilePath
@ -393,6 +392,7 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface
out.FeatureGates = in.FeatureGates
out.EnableCRI = in.EnableCRI
out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn
out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount
return nil
}
}

View File

@ -898,6 +898,7 @@ var (
VsphereVolume FSType = "vsphereVolume"
Quobyte FSType = "quobyte"
AzureDisk FSType = "azureDisk"
PhotonPersistentDisk FSType = "photonPersistentDisk"
All FSType = "*"
)

File diff suppressed because it is too large Load Diff

View File

@ -40,17 +40,10 @@ type PodDisruptionBudgetSpec struct {
// PodDisruptionBudgetStatus represents information about the status of a
// PodDisruptionBudget. Status may trail the actual state of a system.
type PodDisruptionBudgetStatus struct {
// Number of pod disruptions that are currently allowed.
PodDisruptionsAllowed int32 `json:"disruptionsAllowed"`
// current number of healthy pods
CurrentHealthy int32 `json:"currentHealthy"`
// minimum desired number of healthy pods
DesiredHealthy int32 `json:"desiredHealthy"`
// total number of pods counted by this disruption budget
ExpectedPods int32 `json:"expectedPods"`
// Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other
// status informatio is valid only if observedGeneration equals to PDB's object generation.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// DisruptedPods contains information about pods whose eviction was
// processed by the API server eviction subresource handler but has not
@ -64,6 +57,18 @@ type PodDisruptionBudgetStatus struct {
// If everything goes smooth this map should be empty for the most of the time.
// Large number of entries in the map may indicate problems with pod deletions.
DisruptedPods map[string]unversioned.Time `json:"disruptedPods" protobuf:"bytes,5,rep,name=disruptedPods"`
// Number of pod disruptions that are currently allowed.
PodDisruptionsAllowed int32 `json:"disruptionsAllowed"`
// current number of healthy pods
CurrentHealthy int32 `json:"currentHealthy"`
// minimum desired number of healthy pods
DesiredHealthy int32 `json:"desiredHealthy"`
// total number of pods counted by this disruption budget
ExpectedPods int32 `json:"expectedPods"`
}
// +genclient=true

View File

@ -253,19 +253,10 @@ func (m *PodDisruptionBudgetStatus) MarshalTo(data []byte) (int, error) {
_ = l
data[i] = 0x8
i++
i = encodeVarintGenerated(data, i, uint64(m.PodDisruptionsAllowed))
data[i] = 0x10
i++
i = encodeVarintGenerated(data, i, uint64(m.CurrentHealthy))
data[i] = 0x18
i++
i = encodeVarintGenerated(data, i, uint64(m.DesiredHealthy))
data[i] = 0x20
i++
i = encodeVarintGenerated(data, i, uint64(m.ExpectedPods))
i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration))
if len(m.DisruptedPods) > 0 {
for k := range m.DisruptedPods {
data[i] = 0x2a
data[i] = 0x12
i++
v := m.DisruptedPods[k]
msgSize := (&v).Size()
@ -285,6 +276,18 @@ func (m *PodDisruptionBudgetStatus) MarshalTo(data []byte) (int, error) {
i += n9
}
}
data[i] = 0x18
i++
i = encodeVarintGenerated(data, i, uint64(m.PodDisruptionsAllowed))
data[i] = 0x20
i++
i = encodeVarintGenerated(data, i, uint64(m.CurrentHealthy))
data[i] = 0x28
i++
i = encodeVarintGenerated(data, i, uint64(m.DesiredHealthy))
data[i] = 0x30
i++
i = encodeVarintGenerated(data, i, uint64(m.ExpectedPods))
return i, nil
}
@ -368,10 +371,7 @@ func (m *PodDisruptionBudgetSpec) Size() (n int) {
func (m *PodDisruptionBudgetStatus) Size() (n int) {
var l int
_ = l
n += 1 + sovGenerated(uint64(m.PodDisruptionsAllowed))
n += 1 + sovGenerated(uint64(m.CurrentHealthy))
n += 1 + sovGenerated(uint64(m.DesiredHealthy))
n += 1 + sovGenerated(uint64(m.ExpectedPods))
n += 1 + sovGenerated(uint64(m.ObservedGeneration))
if len(m.DisruptedPods) > 0 {
for k, v := range m.DisruptedPods {
_ = k
@ -381,6 +381,10 @@ func (m *PodDisruptionBudgetStatus) Size() (n int) {
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
n += 1 + sovGenerated(uint64(m.PodDisruptionsAllowed))
n += 1 + sovGenerated(uint64(m.CurrentHealthy))
n += 1 + sovGenerated(uint64(m.DesiredHealthy))
n += 1 + sovGenerated(uint64(m.ExpectedPods))
return n
}
@ -457,11 +461,12 @@ func (this *PodDisruptionBudgetStatus) String() string {
}
mapStringForDisruptedPods += "}"
s := strings.Join([]string{`&PodDisruptionBudgetStatus{`,
`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
`DisruptedPods:` + mapStringForDisruptedPods + `,`,
`PodDisruptionsAllowed:` + fmt.Sprintf("%v", this.PodDisruptionsAllowed) + `,`,
`CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`,
`DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`,
`ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`,
`DisruptedPods:` + mapStringForDisruptedPods + `,`,
`}`,
}, "")
return s
@ -982,9 +987,9 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionsAllowed", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
}
m.PodDisruptionsAllowed = 0
m.ObservedGeneration = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@ -994,69 +999,12 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
}
b := data[iNdEx]
iNdEx++
m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift
m.ObservedGeneration |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType)
}
m.CurrentHealthy = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.CurrentHealthy |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType)
}
m.DesiredHealthy = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.DesiredHealthy |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType)
}
m.ExpectedPods = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.ExpectedPods |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType)
}
@ -1172,6 +1120,82 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
}
m.DisruptedPods[mapkey] = *mapvalue
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionsAllowed", wireType)
}
m.PodDisruptionsAllowed = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType)
}
m.CurrentHealthy = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.CurrentHealthy |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType)
}
m.DesiredHealthy = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.DesiredHealthy |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType)
}
m.ExpectedPods = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.ExpectedPods |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipGenerated(data[iNdEx:])
@ -1299,51 +1323,53 @@ var (
)
var fileDescriptorGenerated = []byte{
// 728 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x94, 0xcf, 0x6e, 0xd3, 0x4e,
0x10, 0xc7, 0xe3, 0x26, 0xe9, 0x2f, 0xbf, 0x6d, 0x52, 0x95, 0x85, 0x42, 0x88, 0x84, 0x8b, 0x72,
0x6a, 0x05, 0x5d, 0x2b, 0x15, 0x48, 0x85, 0x43, 0xa5, 0x9a, 0x54, 0xa2, 0x12, 0x55, 0x2b, 0x17,
0x09, 0x84, 0x04, 0x92, 0x63, 0x0f, 0xee, 0x12, 0xc7, 0xb6, 0x76, 0xd7, 0x81, 0xdc, 0x78, 0x04,
0x0e, 0x3c, 0x05, 0xaf, 0xc1, 0xa5, 0xe2, 0xd4, 0x23, 0x17, 0x2a, 0x9a, 0xbe, 0x08, 0xf2, 0x66,
0x93, 0xda, 0xf9, 0x53, 0x55, 0x2a, 0xdc, 0xbc, 0xbb, 0xf3, 0xf9, 0xce, 0x7c, 0x67, 0x67, 0x8d,
0x9e, 0xb4, 0x37, 0x39, 0xa1, 0xa1, 0xd1, 0x8e, 0x5b, 0xc0, 0x02, 0x10, 0xc0, 0x8d, 0xa8, 0xed,
0x19, 0x76, 0x44, 0xb9, 0x11, 0x85, 0x3e, 0x75, 0x7a, 0x46, 0xb7, 0xd1, 0x02, 0x61, 0x37, 0x0c,
0x0f, 0x02, 0x60, 0xb6, 0x00, 0x97, 0x44, 0x2c, 0x14, 0x21, 0x5e, 0x1b, 0xa0, 0xe4, 0x02, 0x25,
0x51, 0xdb, 0x23, 0x09, 0x4a, 0x06, 0x28, 0x51, 0x68, 0x6d, 0xdd, 0xa3, 0xe2, 0x28, 0x6e, 0x11,
0x27, 0xec, 0x18, 0x5e, 0xe8, 0x85, 0x86, 0x54, 0x68, 0xc5, 0xef, 0xe5, 0x4a, 0x2e, 0xe4, 0xd7,
0x40, 0xb9, 0xb6, 0x31, 0xb3, 0x28, 0x83, 0x01, 0x0f, 0x63, 0xe6, 0xc0, 0x78, 0x35, 0xb5, 0xc7,
0xb3, 0x99, 0x38, 0xe8, 0x02, 0xe3, 0x34, 0x0c, 0xc0, 0x9d, 0xc0, 0x1e, 0xce, 0xc6, 0xba, 0x13,
0x96, 0x6b, 0xeb, 0xd3, 0xa3, 0x59, 0x1c, 0x08, 0xda, 0x99, 0xac, 0xa9, 0x31, 0x3d, 0x3c, 0x16,
0xd4, 0x37, 0x68, 0x20, 0xb8, 0x60, 0xe3, 0x48, 0xfd, 0x87, 0x86, 0x4a, 0x3b, 0x5d, 0xea, 0x08,
0x1a, 0x06, 0xf8, 0x35, 0x2a, 0x75, 0x40, 0xd8, 0xae, 0x2d, 0xec, 0xaa, 0x76, 0x5f, 0x5b, 0x5d,
0xd8, 0x58, 0x25, 0x33, 0x9b, 0x4e, 0xba, 0x0d, 0xb2, 0xdf, 0xfa, 0x00, 0x8e, 0xd8, 0x03, 0x61,
0x9b, 0xf8, 0xf8, 0x74, 0x25, 0xd7, 0x3f, 0x5d, 0x41, 0x17, 0x7b, 0xd6, 0x48, 0x0d, 0xbb, 0xa8,
0xe2, 0x82, 0x0f, 0x02, 0xf6, 0xa3, 0x24, 0x13, 0xaf, 0xce, 0x49, 0xf9, 0x07, 0x97, 0xcb, 0x37,
0xd3, 0x88, 0x79, 0xa3, 0x7f, 0xba, 0x52, 0xc9, 0x6c, 0x59, 0x59, 0xd1, 0xfa, 0xf7, 0x39, 0x74,
0xf3, 0x20, 0x74, 0x9b, 0x94, 0xb3, 0x58, 0x6e, 0x99, 0xb1, 0xeb, 0x81, 0xf8, 0xa7, 0xbe, 0x0a,
0x3c, 0x02, 0x47, 0xd9, 0x31, 0xc9, 0x95, 0x47, 0x94, 0x4c, 0xa9, 0xf3, 0x30, 0x02, 0xc7, 0x2c,
0xab, 0x7c, 0x85, 0x64, 0x65, 0x49, 0x75, 0xec, 0xa3, 0x79, 0x2e, 0x6c, 0x11, 0xf3, 0x6a, 0x5e,
0xe6, 0x69, 0x5e, 0x33, 0x8f, 0xd4, 0x32, 0x17, 0x55, 0xa6, 0xf9, 0xc1, 0xda, 0x52, 0x39, 0xea,
0xbf, 0x34, 0x74, 0x67, 0x0a, 0xf5, 0x82, 0x72, 0x81, 0xdf, 0x4e, 0x74, 0xd2, 0xb8, 0xa4, 0x93,
0xa9, 0x87, 0x40, 0x12, 0x5c, 0x36, 0x74, 0x49, 0xa5, 0x2d, 0x0d, 0x77, 0x52, 0xed, 0x74, 0x50,
0x91, 0x0a, 0xe8, 0x24, 0xe3, 0x91, 0x5f, 0x5d, 0xd8, 0xd8, 0xba, 0x9e, 0x4f, 0xb3, 0xa2, 0x52,
0x15, 0x77, 0x13, 0x51, 0x6b, 0xa0, 0x5d, 0x3f, 0x9f, 0xee, 0x2f, 0xe9, 0x37, 0x3e, 0x42, 0xe5,
0x0e, 0x0d, 0xb6, 0xbb, 0x36, 0xf5, 0xed, 0x96, 0x0f, 0xca, 0x23, 0x99, 0x51, 0x47, 0xf2, 0xb0,
0xc8, 0xe0, 0x61, 0x91, 0xdd, 0x40, 0xec, 0xb3, 0x43, 0xc1, 0x68, 0xe0, 0x99, 0xb7, 0x54, 0xde,
0xf2, 0x5e, 0x4a, 0xcb, 0xca, 0x28, 0xe3, 0x77, 0xa8, 0xc4, 0xc1, 0x07, 0x47, 0x84, 0x4c, 0x4d,
0xcf, 0xa3, 0xab, 0x76, 0xd2, 0x6e, 0x81, 0x7f, 0xa8, 0x58, 0xb3, 0x9c, 0xb4, 0x72, 0xb8, 0xb2,
0x46, 0x9a, 0xf5, 0x6f, 0x05, 0x74, 0x77, 0xe6, 0xdd, 0xe3, 0x3d, 0x84, 0xdd, 0xd1, 0x09, 0xdf,
0xf6, 0xfd, 0xf0, 0x23, 0xb8, 0xd2, 0x6d, 0xd1, 0xbc, 0xa7, 0xaa, 0x5f, 0xce, 0xe0, 0xc3, 0x20,
0x6b, 0x0a, 0x88, 0xb7, 0xd0, 0xa2, 0x13, 0x33, 0x06, 0x81, 0x78, 0x0e, 0xb6, 0x2f, 0x8e, 0x7a,
0xd2, 0x52, 0xd1, 0xbc, 0xad, 0xa4, 0x16, 0x9f, 0x65, 0x4e, 0xad, 0xb1, 0xe8, 0x84, 0x77, 0x81,
0x53, 0x06, 0xee, 0x90, 0xcf, 0x67, 0xf9, 0x66, 0xe6, 0xd4, 0x1a, 0x8b, 0xc6, 0x9b, 0xa8, 0x0c,
0x9f, 0x22, 0x70, 0x04, 0xb8, 0x07, 0xa1, 0xcb, 0xab, 0x05, 0x49, 0x8f, 0xae, 0x61, 0x27, 0x75,
0x66, 0x65, 0x22, 0xf1, 0x57, 0x0d, 0x55, 0x94, 0x21, 0xc5, 0x16, 0xe5, 0xe8, 0xbd, 0xfa, 0x1b,
0x4f, 0x8c, 0x34, 0xd3, 0xca, 0x3b, 0x81, 0x60, 0x3d, 0x73, 0x59, 0x15, 0x55, 0xc9, 0x9c, 0x59,
0xd9, 0x22, 0x6a, 0x1d, 0x84, 0x27, 0x59, 0xbc, 0x84, 0xf2, 0x6d, 0xe8, 0xc9, 0x6b, 0xfa, 0xdf,
0x4a, 0x3e, 0xf1, 0x36, 0x2a, 0x76, 0x6d, 0x3f, 0x86, 0x2b, 0xfc, 0x4f, 0xd3, 0x23, 0xf4, 0x92,
0x76, 0xc0, 0x1a, 0x90, 0x4f, 0xe7, 0x36, 0x35, 0x73, 0xed, 0xf8, 0x4c, 0xcf, 0x9d, 0x9c, 0xe9,
0xb9, 0x9f, 0x67, 0x7a, 0xee, 0x73, 0x5f, 0xd7, 0x8e, 0xfb, 0xba, 0x76, 0xd2, 0xd7, 0xb5, 0xdf,
0x7d, 0x5d, 0xfb, 0x72, 0xae, 0xe7, 0xde, 0xfc, 0xa7, 0xcc, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff,
0xd0, 0x3c, 0x91, 0x46, 0xc1, 0x07, 0x00, 0x00,
// 758 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x94, 0xcf, 0x6f, 0xdb, 0x36,
0x14, 0xc7, 0xad, 0xd8, 0xce, 0x3c, 0xc6, 0x0e, 0x32, 0x6e, 0xd9, 0x3c, 0x03, 0x53, 0x06, 0x9f,
0x12, 0x6c, 0xa1, 0xe0, 0x60, 0x03, 0xb2, 0x1d, 0x02, 0x44, 0x73, 0xb0, 0x65, 0x58, 0xe0, 0x40,
0x19, 0xb0, 0x61, 0xc0, 0x0a, 0xe8, 0xc7, 0xab, 0xc2, 0x5a, 0x16, 0x05, 0x92, 0x52, 0xeb, 0x5b,
0xff, 0x84, 0x1e, 0xfa, 0x1f, 0xf5, 0x12, 0xf4, 0x94, 0x63, 0x2f, 0x0d, 0x1a, 0xe7, 0x7f, 0xe8,
0xb9, 0x90, 0x44, 0x3b, 0x96, 0x7f, 0x04, 0x01, 0xd2, 0xde, 0x44, 0xf2, 0x7d, 0xbe, 0xdf, 0xf7,
0x1e, 0x1f, 0x85, 0x7e, 0xe9, 0xef, 0x0b, 0x42, 0x99, 0xd1, 0x8f, 0x1d, 0xe0, 0x21, 0x48, 0x10,
0x46, 0xd4, 0xf7, 0x0d, 0x3b, 0xa2, 0xc2, 0x88, 0x58, 0x40, 0xdd, 0xa1, 0x91, 0x74, 0x1c, 0x90,
0x76, 0xc7, 0xf0, 0x21, 0x04, 0x6e, 0x4b, 0xf0, 0x48, 0xc4, 0x99, 0x64, 0x78, 0x27, 0x47, 0xc9,
0x2d, 0x4a, 0xa2, 0xbe, 0x4f, 0x52, 0x94, 0xe4, 0x28, 0x51, 0x68, 0x6b, 0xd7, 0xa7, 0xf2, 0x3c,
0x76, 0x88, 0xcb, 0x06, 0x86, 0xcf, 0x7c, 0x66, 0x64, 0x0a, 0x4e, 0xfc, 0x38, 0x5b, 0x65, 0x8b,
0xec, 0x2b, 0x57, 0x6e, 0xed, 0x2d, 0x4d, 0xca, 0xe0, 0x20, 0x58, 0xcc, 0x5d, 0x98, 0xcd, 0xa6,
0xf5, 0xf3, 0x72, 0x26, 0x0e, 0x13, 0xe0, 0x82, 0xb2, 0x10, 0xbc, 0x39, 0xec, 0xc7, 0xe5, 0x58,
0x32, 0x57, 0x72, 0x6b, 0x77, 0x71, 0x34, 0x8f, 0x43, 0x49, 0x07, 0xf3, 0x39, 0x75, 0x16, 0x87,
0xc7, 0x92, 0x06, 0x06, 0x0d, 0xa5, 0x90, 0x7c, 0x16, 0x69, 0xbf, 0xd6, 0x50, 0xed, 0x28, 0xa1,
0xae, 0xa4, 0x2c, 0xc4, 0xff, 0xa2, 0xda, 0x00, 0xa4, 0xed, 0xd9, 0xd2, 0x6e, 0x6a, 0xdf, 0x6b,
0xdb, 0x6b, 0x7b, 0xdb, 0x64, 0x69, 0xd3, 0x49, 0xd2, 0x21, 0x3d, 0xe7, 0x09, 0xb8, 0xf2, 0x04,
0xa4, 0x6d, 0xe2, 0x8b, 0xab, 0xad, 0xd2, 0xe8, 0x6a, 0x0b, 0xdd, 0xee, 0x59, 0x13, 0x35, 0xec,
0xa1, 0x86, 0x07, 0x01, 0x48, 0xe8, 0x45, 0xa9, 0x93, 0x68, 0xae, 0x64, 0xf2, 0x3f, 0xdc, 0x2d,
0xdf, 0x9d, 0x46, 0xcc, 0x2f, 0x46, 0x57, 0x5b, 0x8d, 0xc2, 0x96, 0x55, 0x14, 0x6d, 0xbf, 0x5a,
0x41, 0x5f, 0x9e, 0x32, 0xaf, 0x4b, 0x05, 0x8f, 0xb3, 0x2d, 0x33, 0xf6, 0x7c, 0x90, 0x9f, 0xb4,
0xae, 0x8a, 0x88, 0xc0, 0x55, 0xe5, 0x98, 0xe4, 0xde, 0x23, 0x4a, 0x16, 0xe4, 0x79, 0x16, 0x81,
0x6b, 0xd6, 0x95, 0x5f, 0x25, 0x5d, 0x59, 0x99, 0x3a, 0x0e, 0xd0, 0xaa, 0x90, 0xb6, 0x8c, 0x45,
0xb3, 0x9c, 0xf9, 0x74, 0x1f, 0xe8, 0x93, 0x69, 0x99, 0xeb, 0xca, 0x69, 0x35, 0x5f, 0x5b, 0xca,
0xa3, 0xfd, 0x56, 0x43, 0xdf, 0x2c, 0xa0, 0xfe, 0xa2, 0x42, 0xe2, 0xff, 0xe7, 0x3a, 0x69, 0xdc,
0xd1, 0xc9, 0xa9, 0x87, 0x40, 0x52, 0x3c, 0x6b, 0xe8, 0x86, 0xb2, 0xad, 0x8d, 0x77, 0xa6, 0xda,
0xe9, 0xa2, 0x2a, 0x95, 0x30, 0x48, 0xc7, 0xa3, 0xbc, 0xbd, 0xb6, 0x77, 0xf0, 0xb0, 0x3a, 0xcd,
0x86, 0xb2, 0xaa, 0x1e, 0xa7, 0xa2, 0x56, 0xae, 0xdd, 0xbe, 0x59, 0x5c, 0x5f, 0xda, 0x6f, 0x7c,
0x8e, 0xea, 0x03, 0x1a, 0x1e, 0x26, 0x36, 0x0d, 0x6c, 0x27, 0x00, 0x55, 0x23, 0x59, 0x92, 0x47,
0xfa, 0xb0, 0x48, 0xfe, 0xb0, 0xc8, 0x71, 0x28, 0x7b, 0xfc, 0x4c, 0x72, 0x1a, 0xfa, 0xe6, 0x57,
0xca, 0xb7, 0x7e, 0x32, 0xa5, 0x65, 0x15, 0x94, 0xf1, 0x23, 0x54, 0x13, 0x10, 0x80, 0x2b, 0x19,
0x57, 0xd3, 0xf3, 0xd3, 0x7d, 0x3b, 0x69, 0x3b, 0x10, 0x9c, 0x29, 0xd6, 0xac, 0xa7, 0xad, 0x1c,
0xaf, 0xac, 0x89, 0x66, 0xfb, 0x7d, 0x05, 0x7d, 0xbb, 0xf4, 0xee, 0xf1, 0x9f, 0x08, 0x33, 0x47,
0x00, 0x4f, 0xc0, 0xfb, 0x3d, 0xff, 0x23, 0x50, 0x16, 0x66, 0xd5, 0x96, 0xcd, 0x96, 0xca, 0x1e,
0xf7, 0xe6, 0x22, 0xac, 0x05, 0x14, 0x7e, 0xa9, 0xa1, 0x86, 0x97, 0xdb, 0x80, 0x77, 0xca, 0xbc,
0xf1, 0xed, 0xfd, 0xf3, 0x31, 0xa6, 0x94, 0x74, 0xa7, 0x95, 0x8f, 0x42, 0xc9, 0x87, 0xe6, 0xa6,
0x4a, 0xb0, 0x51, 0x38, 0xb3, 0x8a, 0x49, 0xe0, 0x13, 0x84, 0xbd, 0x89, 0xa4, 0x38, 0x0c, 0x02,
0xf6, 0x14, 0xbc, 0xec, 0x01, 0x55, 0xcd, 0xef, 0x94, 0xc2, 0x66, 0xc1, 0x77, 0x1c, 0x64, 0x2d,
0x00, 0xf1, 0x01, 0x5a, 0x77, 0x63, 0xce, 0x21, 0x94, 0x7f, 0x80, 0x1d, 0xc8, 0xf3, 0x61, 0xb3,
0x92, 0x49, 0x7d, 0xad, 0xa4, 0xd6, 0x7f, 0x2b, 0x9c, 0x5a, 0x33, 0xd1, 0x29, 0xef, 0x81, 0xa0,
0x1c, 0xbc, 0x31, 0x5f, 0x2d, 0xf2, 0xdd, 0xc2, 0xa9, 0x35, 0x13, 0x8d, 0xf7, 0x51, 0x1d, 0x9e,
0x45, 0xe0, 0x8e, 0x7b, 0xbc, 0x9a, 0xd1, 0x93, 0x49, 0x3b, 0x9a, 0x3a, 0xb3, 0x0a, 0x91, 0xad,
0x01, 0xc2, 0xf3, 0x4d, 0xc4, 0x1b, 0xa8, 0xdc, 0x87, 0x61, 0x76, 0xe5, 0x9f, 0x5b, 0xe9, 0x27,
0x3e, 0x44, 0xd5, 0xc4, 0x0e, 0x62, 0xb8, 0xc7, 0xbf, 0x79, 0x7a, 0x1c, 0xff, 0xa6, 0x03, 0xb0,
0x72, 0xf2, 0xd7, 0x95, 0x7d, 0xcd, 0xdc, 0xb9, 0xb8, 0xd6, 0x4b, 0x97, 0xd7, 0x7a, 0xe9, 0xcd,
0xb5, 0x5e, 0x7a, 0x3e, 0xd2, 0xb5, 0x8b, 0x91, 0xae, 0x5d, 0x8e, 0x74, 0xed, 0xdd, 0x48, 0xd7,
0x5e, 0xdc, 0xe8, 0xa5, 0xff, 0x3e, 0x53, 0xb7, 0xfe, 0x21, 0x00, 0x00, 0xff, 0xff, 0x59, 0x3e,
0xe1, 0xf8, 0x0d, 0x08, 0x00, 0x00,
}

View File

@ -75,17 +75,10 @@ message PodDisruptionBudgetSpec {
// PodDisruptionBudgetStatus represents information about the status of a
// PodDisruptionBudget. Status may trail the actual state of a system.
message PodDisruptionBudgetStatus {
// Number of pod disruptions that are currently allowed.
optional int32 disruptionsAllowed = 1;
// current number of healthy pods
optional int32 currentHealthy = 2;
// minimum desired number of healthy pods
optional int32 desiredHealthy = 3;
// total number of pods counted by this disruption budget
optional int32 expectedPods = 4;
// Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other
// status informatio is valid only if observedGeneration equals to PDB's object generation.
// +optional
optional int64 observedGeneration = 1;
// DisruptedPods contains information about pods whose eviction was
// processed by the API server eviction subresource handler but has not
@ -98,6 +91,18 @@ message PodDisruptionBudgetStatus {
// the list automatically by PodDisruptionBudget controller after some time.
// If everything goes smooth this map should be empty for the most of the time.
// Large number of entries in the map may indicate problems with pod deletions.
map<string, k8s.io.kubernetes.pkg.api.unversioned.Time> disruptedPods = 5;
map<string, k8s.io.kubernetes.pkg.api.unversioned.Time> disruptedPods = 2;
// Number of pod disruptions that are currently allowed.
optional int32 disruptionsAllowed = 3;
// current number of healthy pods
optional int32 currentHealthy = 4;
// minimum desired number of healthy pods
optional int32 desiredHealthy = 5;
// total number of pods counted by this disruption budget
optional int32 expectedPods = 6;
}

View File

@ -38,17 +38,10 @@ type PodDisruptionBudgetSpec struct {
// PodDisruptionBudgetStatus represents information about the status of a
// PodDisruptionBudget. Status may trail the actual state of a system.
type PodDisruptionBudgetStatus struct {
// Number of pod disruptions that are currently allowed.
PodDisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,1,opt,name=disruptionsAllowed"`
// current number of healthy pods
CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,2,opt,name=currentHealthy"`
// minimum desired number of healthy pods
DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,3,opt,name=desiredHealthy"`
// total number of pods counted by this disruption budget
ExpectedPods int32 `json:"expectedPods" protobuf:"varint,4,opt,name=expectedPods"`
// Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other
// status informatio is valid only if observedGeneration equals to PDB's object generation.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
// DisruptedPods contains information about pods whose eviction was
// processed by the API server eviction subresource handler but has not
@ -61,7 +54,19 @@ type PodDisruptionBudgetStatus struct {
// the list automatically by PodDisruptionBudget controller after some time.
// If everything goes smooth this map should be empty for the most of the time.
// Large number of entries in the map may indicate problems with pod deletions.
DisruptedPods map[string]unversioned.Time `json:"disruptedPods" protobuf:"bytes,5,rep,name=disruptedPods"`
DisruptedPods map[string]unversioned.Time `json:"disruptedPods" protobuf:"bytes,2,rep,name=disruptedPods"`
// Number of pod disruptions that are currently allowed.
PodDisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"`
// current number of healthy pods
CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"`
// minimum desired number of healthy pods
DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,5,opt,name=desiredHealthy"`
// total number of pods counted by this disruption budget
ExpectedPods int32 `json:"expectedPods" protobuf:"varint,6,opt,name=expectedPods"`
}
// +genclient=true

View File

@ -67,11 +67,12 @@ func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string {
var map_PodDisruptionBudgetStatus = map[string]string{
"": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.",
"observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status informatio is valid only if observedGeneration equals to PDB's object generation.",
"disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.",
"disruptionsAllowed": "Number of pod disruptions that are currently allowed.",
"currentHealthy": "current number of healthy pods",
"desiredHealthy": "minimum desired number of healthy pods",
"expectedPods": "total number of pods counted by this disruption budget",
"disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.",
}
func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string {

View File

@ -154,11 +154,12 @@ func Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(i
}
func autoConvert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.DisruptedPods = *(*map[string]unversioned.Time)(unsafe.Pointer(&in.DisruptedPods))
out.PodDisruptionsAllowed = in.PodDisruptionsAllowed
out.CurrentHealthy = in.CurrentHealthy
out.DesiredHealthy = in.DesiredHealthy
out.ExpectedPods = in.ExpectedPods
out.DisruptedPods = *(*map[string]unversioned.Time)(unsafe.Pointer(&in.DisruptedPods))
return nil
}
@ -167,11 +168,12 @@ func Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStat
}
func autoConvert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.DisruptedPods = *(*map[string]unversioned.Time)(unsafe.Pointer(&in.DisruptedPods))
out.PodDisruptionsAllowed = in.PodDisruptionsAllowed
out.CurrentHealthy = in.CurrentHealthy
out.DesiredHealthy = in.DesiredHealthy
out.ExpectedPods = in.ExpectedPods
out.DisruptedPods = *(*map[string]unversioned.Time)(unsafe.Pointer(&in.DisruptedPods))
return nil
}

View File

@ -126,10 +126,7 @@ func DeepCopy_v1beta1_PodDisruptionBudgetStatus(in interface{}, out interface{},
{
in := in.(*PodDisruptionBudgetStatus)
out := out.(*PodDisruptionBudgetStatus)
out.PodDisruptionsAllowed = in.PodDisruptionsAllowed
out.CurrentHealthy = in.CurrentHealthy
out.DesiredHealthy = in.DesiredHealthy
out.ExpectedPods = in.ExpectedPods
out.ObservedGeneration = in.ObservedGeneration
if in.DisruptedPods != nil {
in, out := &in.DisruptedPods, &out.DisruptedPods
*out = make(map[string]unversioned.Time)
@ -139,6 +136,10 @@ func DeepCopy_v1beta1_PodDisruptionBudgetStatus(in interface{}, out interface{},
} else {
out.DisruptedPods = nil
}
out.PodDisruptionsAllowed = in.PodDisruptionsAllowed
out.CurrentHealthy = in.CurrentHealthy
out.DesiredHealthy = in.DesiredHealthy
out.ExpectedPods = in.ExpectedPods
return nil
}
}

View File

@ -126,10 +126,7 @@ func DeepCopy_policy_PodDisruptionBudgetStatus(in interface{}, out interface{},
{
in := in.(*PodDisruptionBudgetStatus)
out := out.(*PodDisruptionBudgetStatus)
out.PodDisruptionsAllowed = in.PodDisruptionsAllowed
out.CurrentHealthy = in.CurrentHealthy
out.DesiredHealthy = in.DesiredHealthy
out.ExpectedPods = in.ExpectedPods
out.ObservedGeneration = in.ObservedGeneration
if in.DisruptedPods != nil {
in, out := &in.DisruptedPods, &out.DisruptedPods
*out = make(map[string]unversioned.Time)
@ -139,6 +136,10 @@ func DeepCopy_policy_PodDisruptionBudgetStatus(in interface{}, out interface{},
} else {
out.DisruptedPods = nil
}
out.PodDisruptionsAllowed = in.PodDisruptionsAllowed
out.CurrentHealthy = in.CurrentHealthy
out.DesiredHealthy = in.DesiredHealthy
out.ExpectedPods = in.ExpectedPods
return nil
}
}

View File

@ -69,6 +69,7 @@ func (i *DefaultInfo) GetExtra() map[string][]string {
// well-known user and group names
const (
SystemPrivilegedGroup = "system:masters"
NodesGroup = "system:nodes"
AllUnauthenticated = "system:unauthenticated"
AllAuthenticated = "system:authenticated"

View File

@ -36,33 +36,42 @@ var ForeverTestTimeout = time.Second * 30
// NeverStop may be passed to Until to make it never stop.
var NeverStop <-chan struct{} = make(chan struct{})
// Forever is syntactic sugar on top of Until
// Forever calls f every period for ever.
//
// Forever is syntactic sugar on top of Until.
func Forever(f func(), period time.Duration) {
Until(f, period, NeverStop)
}
// Until loops until stop channel is closed, running f every period.
// Until is syntactic sugar on top of JitterUntil with zero jitter
// factor, with sliding = true (which means the timer for period
// starts after the f completes).
//
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
// with sliding = true (which means the timer for period starts after the f
// completes).
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, true, stopCh)
}
// NonSlidingUntil loops until stop channel is closed, running f every
// period. NonSlidingUntil is syntactic sugar on top of JitterUntil
// with zero jitter factor, with sliding = false (meaning the timer for
// period starts at the same time as the function starts).
// period.
//
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
// factor, with sliding = false (meaning the timer for period starts at the same
// time as the function starts).
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, false, stopCh)
}
// JitterUntil loops until stop channel is closed, running f every period.
//
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged.
// Catches any panics, and keeps going. f may not be invoked if
// stop channel is already closed. Pass NeverStop to Until if you
// don't want it stop.
// If jitterFactor is not positive, the period is unchanged and not jitterd.
//
// If slidingis true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
//
// Close stopCh to stop. f may not be invoked if stop channel is already
// closed. Pass NeverStop to if you don't want it stop.
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
for {
@ -104,9 +113,11 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b
}
}
// Jitter returns a time.Duration between duration and duration + maxFactor * duration,
// to allow clients to avoid converging on periodic behavior. If maxFactor is 0.0, a
// suggested default value will be chosen.
// Jitter returns a time.Duration between duration and duration + maxFactor *
// duration.
//
// This allows clients to avoid converging on periodic behavior. If maxFactor
// is 0.0, a suggested default value will be chosen.
func Jitter(duration time.Duration, maxFactor float64) time.Duration {
if maxFactor <= 0.0 {
maxFactor = 1.0
@ -115,26 +126,31 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration {
return wait
}
// ErrWaitTimeout is returned when the condition exited without success
// ErrWaitTimeout is returned when the condition exited without success.
var ErrWaitTimeout = errors.New("timed out waiting for the condition")
// ConditionFunc returns true if the condition is satisfied, or an error
// if the loop should be aborted.
type ConditionFunc func() (done bool, err error)
// Backoff is parameters applied to a Backoff function.
// Backoff holds parameters applied to a Backoff function.
type Backoff struct {
Duration time.Duration
Factor float64
Jitter float64
Steps int
Duration time.Duration // the base duration
Factor float64 // Duration is multipled by factor each iteration
Jitter float64 // The amount of jitter applied each iteration
Steps int // Exit with error after this many steps
}
// ExponentialBackoff repeats a condition check up to steps times, increasing the wait
// by multipling the previous duration by factor. If jitter is greater than zero,
// a random amount of each duration is added (between duration and duration*(1+jitter)).
// If the condition never returns true, ErrWaitTimeout is returned. All other errors
// terminate immediately.
// ExponentialBackoff repeats a condition check with exponential backoff.
//
// It checks the condition up to Steps times, increasing the wait by multipling
// the previous duration by Factor.
//
// If Jitter is greater than zero, a random amount of each duration is added
// (between duration and duration*(1+jitter)).
//
// If the condition never returns true, ErrWaitTimeout is returned. All other
// errors terminate immediately.
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
duration := backoff.Duration
for i := 0; i < backoff.Steps; i++ {
@ -154,22 +170,33 @@ func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
}
// Poll tries a condition func until it returns true, an error, or the timeout
// is reached. condition will always be invoked at least once but some intervals
// may be missed if the condition takes too long or the time window is too short.
// is reached.
//
// Poll always waits the interval before the run of 'condition'.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
// Poll always waits the interval before the first check of the condition.
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
return pollInternal(poller(interval, timeout), condition)
}
func pollInternal(wait WaitFunc, condition ConditionFunc) error {
done := make(chan struct{})
defer close(done)
return WaitFor(wait, condition, done)
return WaitFor(wait, condition, NeverStop)
}
// PollImmediate is identical to Poll, except that it performs the first check
// immediately, not waiting interval beforehand.
// PollImmediate tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// Poll always checks 'condition' before waiting for the interval. 'condition'
// will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
return pollImmediateInternal(poller(interval, timeout), condition)
}
@ -185,16 +212,24 @@ func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error {
return pollInternal(wait, condition)
}
// PollInfinite polls forever.
// PollInfinite tries a condition func until it returns true or an error
//
// PollInfinite always waits the interval before the run of 'condition'.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
done := make(chan struct{})
defer close(done)
return PollUntil(interval, condition, done)
}
// PollImmediateInfinite is identical to PollInfinite, except that it
// performs the first check immediately, not waiting interval
// beforehand.
// PollImmediateInfinite tries a condition func until it returns true or an error
//
// PollImmediateInfinite runs the 'condition' before waiting for the interval.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
done, err := condition()
if err != nil {
@ -206,7 +241,11 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro
return PollInfinite(interval, condition)
}
// PollUntil is like Poll, but it takes a stop change instead of total duration
// PollUntil tries a condition func until it returns true, an error or stopCh is
// closed.
//
// PolUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
return WaitFor(poller(interval, 0), condition, stopCh)
}
@ -215,11 +254,16 @@ func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan st
// should be executed and is closed when the last test should be invoked.
type WaitFunc func(done <-chan struct{}) <-chan struct{}
// WaitFor gets a channel from wait(), and then invokes fn once for every value
// placed on the channel and once more when the channel is closed. If fn
// returns an error the loop ends and that error is returned, and if fn returns
// true the loop ends and nil is returned. ErrWaitTimeout will be returned if
// the channel is closed without fn ever returning true.
// WaitFor continually checks 'fn' as driven by 'wait'.
//
// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value
// placed on the channel and once more when the channel is closed.
//
// If 'fn' returns an error the loop ends and that error is returned, and if
// 'fn' returns true the loop ends and nil is returned.
//
// ErrWaitTimeout will be returned if the channel is closed without fn ever
// returning true.
func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
c := wait(done)
for {
@ -238,11 +282,14 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
return ErrWaitTimeout
}
// poller returns a WaitFunc that will send to the channel every
// interval until timeout has elapsed and then close the channel.
// Over very short intervals you may receive no ticks before
// the channel is closed. If timeout is 0, the channel
// will never be closed.
// poller returns a WaitFunc that will send to the channel every interval until
// timeout has elapsed and then closes the channel.
//
// Over very short intervals you may receive no ticks before the channel is
// closed. A timeout of 0 is interpreted as an infinity.
//
// Output ticks are not buffered. If the channel is not ready to receive an
// item, the tick is skipped.
func poller(interval, timeout time.Duration) WaitFunc {
return WaitFunc(func(done <-chan struct{}) <-chan struct{} {
ch := make(chan struct{})

View File

@ -29,7 +29,7 @@ import (
type Config struct {
// The queue for your objects; either a FIFO or
// a DeltaFIFO. Your Process() function should accept
// the output of this Oueue's Pop() method.
// the output of this Queue's Pop() method.
Queue
// Something that can list and watch your objects.
@ -121,6 +121,11 @@ func (c *Controller) Requeue(obj interface{}) error {
// TODO: Consider doing the processing in parallel. This will require a little thought
// to make sure that we don't end up processing the same object multiple times
// concurrently.
//
// TODO: Plumb through the stopCh here (and down to the queue) so that this can
// actually exit when the controller is stopped. Or just give up on this stuff
// ever being stoppable. Converting this whole package to use Context would
// also be helpful.
func (c *Controller) processLoop() {
for {
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
@ -134,7 +139,7 @@ func (c *Controller) processLoop() {
}
// ResourceEventHandler can handle notifications for events that happen to a
// resource. The events are informational only, so you can't return an
// resource. The events are informational only, so you can't return an
// error.
// * OnAdd is called when an object is added.
// * OnUpdate is called when an object is modified. Note that oldObj is the

View File

@ -283,6 +283,9 @@ func TestHammerController(t *testing.T) {
time.Sleep(100 * time.Millisecond)
close(stop)
// TODO: Verify that no goroutines were leaked here and that everything shut
// down cleanly.
outputSetLock.Lock()
t.Logf("got: %#v", outputSet)
}

View File

@ -45,7 +45,7 @@ import (
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
type Reflector struct {
// name identifies this reflector. By default it will be a file:line if possible.
// name identifies this reflector. By default it will be a file:line if possible.
name string
// The type of object we expect to place in the store.
@ -74,12 +74,6 @@ var (
// However, it can be modified to avoid periodic resync to break the
// TCP connection.
minWatchTimeout = 5 * time.Minute
// If we are within 'forceResyncThreshold' from the next planned resync
// and are just before issuing Watch(), resync will be forced now.
forceResyncThreshold = 3 * time.Second
// We try to set timeouts for Watch() so that we will finish about
// than 'timeoutThreshold' from next planned periodic resync.
timeoutThreshold = 1 * time.Second
)
// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
@ -114,7 +108,7 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
return r
}
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
// call chains to NewReflector, so they'd be low entropy names for reflectors
var internalPackages = []string{"kubernetes/pkg/client/cache/", "/runtime/asm_"}

6
vendor/BUILD vendored
View File

@ -8951,7 +8951,7 @@ go_library(
"//vendor:k8s.io/client-go/pkg/api/unversioned",
"//vendor:k8s.io/client-go/pkg/api/v1",
"//vendor:k8s.io/client-go/pkg/apimachinery/registered",
"//vendor:k8s.io/client-go/pkg/apis/policy/v1alpha1",
"//vendor:k8s.io/client-go/pkg/apis/policy/v1beta1",
"//vendor:k8s.io/client-go/pkg/fields",
"//vendor:k8s.io/client-go/pkg/runtime",
"//vendor:k8s.io/client-go/pkg/runtime/serializer",
@ -8994,7 +8994,7 @@ go_library(
"//vendor:k8s.io/client-go/pkg/api",
"//vendor:k8s.io/client-go/pkg/api/unversioned",
"//vendor:k8s.io/client-go/pkg/api/v1",
"//vendor:k8s.io/client-go/pkg/apis/policy/v1alpha1",
"//vendor:k8s.io/client-go/pkg/apis/policy/v1beta1",
"//vendor:k8s.io/client-go/pkg/fields",
"//vendor:k8s.io/client-go/pkg/labels",
"//vendor:k8s.io/client-go/pkg/runtime",
@ -9870,11 +9870,13 @@ go_library(
"k8s.io/client-go/pkg/apis/componentconfig/doc.go",
"k8s.io/client-go/pkg/apis/componentconfig/helpers.go",
"k8s.io/client-go/pkg/apis/componentconfig/register.go",
"k8s.io/client-go/pkg/apis/componentconfig/types.generated.go",
"k8s.io/client-go/pkg/apis/componentconfig/types.go",
"k8s.io/client-go/pkg/apis/componentconfig/zz_generated.deepcopy.go",
],
tags = ["automanaged"],
deps = [
"//vendor:github.com/ugorji/go/codec",
"//vendor:k8s.io/client-go/pkg/api/unversioned",
"//vendor:k8s.io/client-go/pkg/conversion",
"//vendor:k8s.io/client-go/pkg/runtime",