diff --git a/cmd/kubeadm/app/phases/validate/BUILD b/cmd/kubeadm/app/phases/validate/BUILD index 6ae20b2732..7310aaaa07 100644 --- a/cmd/kubeadm/app/phases/validate/BUILD +++ b/cmd/kubeadm/app/phases/validate/BUILD @@ -21,7 +21,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/pkg/api:go_default_library", - "//vendor/k8s.io/client-go/pkg/api/v1:go_default_library", + "//vendor/k8s.io/client-go/pkg/api/v1/node:go_default_library", "//vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1:go_default_library", ], ) diff --git a/cmd/kubeadm/app/phases/validate/validate.go b/cmd/kubeadm/app/phases/validate/validate.go index eb67a5d979..aa737a92aa 100644 --- a/cmd/kubeadm/app/phases/validate/validate.go +++ b/cmd/kubeadm/app/phases/validate/validate.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/api/v1" + nodeutil "k8s.io/client-go/pkg/api/v1/node" extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -51,7 +51,7 @@ func Validate(kubeconfigPath string) error { return false, nil } n := &nodeList.Items[0] - if !v1.IsNodeReady(n) { + if !nodeutil.IsNodeReady(n) { fmt.Println("[validate] First node has registered, but is not ready yet") return false, nil } diff --git a/hack/.linted_packages b/hack/.linted_packages index bd85c8e10f..1ab1f0a561 100644 --- a/hack/.linted_packages +++ b/hack/.linted_packages @@ -61,6 +61,7 @@ pkg/api/pod pkg/api/resource pkg/api/service pkg/api/v1 +pkg/api/v1/node pkg/api/v1/service pkg/apis/abac/v0 pkg/apis/abac/v1beta1 diff --git a/pkg/api/resource/BUILD b/pkg/api/resource/BUILD index e70212ecf2..1625024d41 100644 --- a/pkg/api/resource/BUILD +++ b/pkg/api/resource/BUILD @@ -10,7 +10,7 @@ load( go_library( name = "go_default_library", - srcs = ["resource_helpers.go"], + srcs = ["helpers.go"], tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", @@ -33,7 +33,7 @@ filegroup( go_test( name = "go_default_test", - srcs = ["resource_helpers_test.go"], + srcs = ["helpers_test.go"], library = ":go_default_library", tags = ["automanaged"], deps = [ diff --git a/pkg/api/resource/resource_helpers.go b/pkg/api/resource/helpers.go similarity index 100% rename from pkg/api/resource/resource_helpers.go rename to pkg/api/resource/helpers.go diff --git a/pkg/api/resource/resource_helpers_test.go b/pkg/api/resource/helpers_test.go similarity index 100% rename from pkg/api/resource/resource_helpers_test.go rename to pkg/api/resource/helpers_test.go diff --git a/pkg/api/v1/BUILD b/pkg/api/v1/BUILD index ff47f1717d..a63a36f7c1 100644 --- a/pkg/api/v1/BUILD +++ b/pkg/api/v1/BUILD @@ -18,9 +18,9 @@ go_library( "generate.go", "generated.pb.go", "meta.go", - "ref.go", + "objectreference.go", "register.go", - "resource_helpers.go", + "resource.go", "taint.go", "toleration.go", "types.generated.go", @@ -39,7 +39,6 @@ go_library( "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/github.com/ugorji/go/codec:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", @@ -79,17 +78,11 @@ go_test( go_test( name = "go_default_test", srcs = [ - "resource_helpers_test.go", "taint_test.go", "toleration_test.go", ], library = ":go_default_library", tags = ["automanaged"], - deps = [ - "//vendor/github.com/stretchr/testify/assert:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - ], ) filegroup( @@ -105,7 +98,10 @@ filegroup( ":package-srcs", "//pkg/api/v1/endpoints:all-srcs", "//pkg/api/v1/helper:all-srcs", + "//pkg/api/v1/node:all-srcs", "//pkg/api/v1/pod:all-srcs", + "//pkg/api/v1/ref:all-srcs", + "//pkg/api/v1/resource:all-srcs", "//pkg/api/v1/service:all-srcs", "//pkg/api/v1/validation:all-srcs", ], diff --git a/pkg/api/v1/node/BUILD b/pkg/api/v1/node/BUILD new file mode 100644 index 0000000000..44f2c53f51 --- /dev/null +++ b/pkg/api/v1/node/BUILD @@ -0,0 +1,28 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["util.go"], + tags = ["automanaged"], + deps = ["//pkg/api/v1:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/api/v1/node/util.go b/pkg/api/v1/node/util.go new file mode 100644 index 0000000000..90d4dbb346 --- /dev/null +++ b/pkg/api/v1/node/util.go @@ -0,0 +1,47 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: merge with pkg/util/node + +package node + +import ( + "k8s.io/kubernetes/pkg/api/v1" +) + +// GetNodeCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) (int, *v1.NodeCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// IsNodeReady returns true if a node is ready; false otherwise. +func IsNodeReady(node *v1.Node) bool { + for _, c := range node.Status.Conditions { + if c.Type == v1.NodeReady { + return c.Status == v1.ConditionTrue + } + } + return false +} diff --git a/pkg/api/v1/objectreference.go b/pkg/api/v1/objectreference.go new file mode 100644 index 0000000000..ee5335ee87 --- /dev/null +++ b/pkg/api/v1/objectreference.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// intend only to get a reference to that object. This simplifies the event recording interface. +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/pkg/api/v1/pod/BUILD b/pkg/api/v1/pod/BUILD index f6661cfc31..f9ae2cbfb1 100644 --- a/pkg/api/v1/pod/BUILD +++ b/pkg/api/v1/pod/BUILD @@ -14,6 +14,7 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", ], ) @@ -25,6 +26,7 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/pkg/api/v1/pod/util.go b/pkg/api/v1/pod/util.go index e07c01fca4..bb24a3c762 100644 --- a/pkg/api/v1/pod/util.go +++ b/pkg/api/v1/pod/util.go @@ -19,7 +19,9 @@ package pod import ( "encoding/json" "fmt" + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/pkg/api/v1" ) @@ -188,3 +190,105 @@ func visitContainerSecretNames(container *v1.Container, visitor func(string) boo } return true } + +// GetContainerStatus extracts the status of container "name" from "statuses". +// It also returns if "name" exists. +func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i], true + } + } + return v1.ContainerStatus{}, false +} + +// GetExistingContainerStatus extracts the status of container "name" from "statuses", +// and returns empty status if "name" does not exist. +func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.ContainerStatus { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i] + } + } + return v1.ContainerStatus{} +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *v1.Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodReady retruns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status v1.PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == v1.ConditionTrue +} + +// Extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { + _, condition := GetPodCondition(&status, v1.PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { + condition.LastTransitionTime = metav1.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } else { + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual + } +} diff --git a/pkg/api/v1/pod/util_test.go b/pkg/api/v1/pod/util_test.go index 4e04f83451..63f507894e 100644 --- a/pkg/api/v1/pod/util_test.go +++ b/pkg/api/v1/pod/util_test.go @@ -20,7 +20,9 @@ import ( "reflect" "strings" "testing" + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" @@ -342,3 +344,58 @@ func collectSecretPaths(t *testing.T, path *field.Path, name string, tp reflect. return secretPaths } + +func newPod(now metav1.Time, ready bool, beforeSec int) *v1.Pod { + conditionStatus := v1.ConditionFalse + if ready { + conditionStatus = v1.ConditionTrue + } + return &v1.Pod{ + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + LastTransitionTime: metav1.NewTime(now.Time.Add(-1 * time.Duration(beforeSec) * time.Second)), + Status: conditionStatus, + }, + }, + }, + } +} + +func TestIsPodAvailable(t *testing.T) { + now := metav1.Now() + tests := []struct { + pod *v1.Pod + minReadySeconds int32 + expected bool + }{ + { + pod: newPod(now, false, 0), + minReadySeconds: 0, + expected: false, + }, + { + pod: newPod(now, true, 0), + minReadySeconds: 1, + expected: false, + }, + { + pod: newPod(now, true, 0), + minReadySeconds: 0, + expected: true, + }, + { + pod: newPod(now, true, 51), + minReadySeconds: 50, + expected: true, + }, + } + + for i, test := range tests { + isAvailable := IsPodAvailable(test.pod, test.minReadySeconds, now) + if isAvailable != test.expected { + t.Errorf("[tc #%d] expected available pod: %t, got: %t", i, test.expected, isAvailable) + } + } +} diff --git a/pkg/api/v1/ref/BUILD b/pkg/api/v1/ref/BUILD new file mode 100644 index 0000000000..9ebaa431ae --- /dev/null +++ b/pkg/api/v1/ref/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["ref.go"], + tags = ["automanaged"], + deps = [ + "//pkg/api/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/api/v1/ref.go b/pkg/api/v1/ref/ref.go similarity index 80% rename from pkg/api/v1/ref.go rename to pkg/api/v1/ref/ref.go index 5d33719fef..5f6f692f67 100644 --- a/pkg/api/v1/ref.go +++ b/pkg/api/v1/ref/ref.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package ref import ( "errors" @@ -22,10 +22,9 @@ import ( "net/url" "strings" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api/v1" ) var ( @@ -38,11 +37,11 @@ var ( // object, or an error if the object doesn't follow the conventions // that would allow this. // TODO: should take a meta.Interface see http://issue.k8s.io/7127 -func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) { +func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReference, error) { if obj == nil { return nil, ErrNilObject } - if ref, ok := obj.(*ObjectReference); ok { + if ref, ok := obj.(*v1.ObjectReference); ok { // Don't make a reference to a reference. return ref, nil } @@ -94,14 +93,14 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, // only has list metadata if objectMeta == nil { - return &ObjectReference{ + return &v1.ObjectReference{ Kind: kind, APIVersion: version, ResourceVersion: listMeta.GetResourceVersion(), }, nil } - return &ObjectReference{ + return &v1.ObjectReference{ Kind: kind, APIVersion: version, Name: objectMeta.GetName(), @@ -112,7 +111,7 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, } // GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. -func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) { +func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*v1.ObjectReference, error) { ref, err := GetReference(scheme, obj) if err != nil { return nil, err @@ -120,14 +119,3 @@ func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath s ref.FieldPath = fieldPath return ref, nil } - -// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that -// intend only to get a reference to that object. This simplifies the event recording interface. -func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} -func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/pkg/api/v1/resource.go b/pkg/api/v1/resource.go new file mode 100644 index 0000000000..2dca986679 --- /dev/null +++ b/pkg/api/v1/resource.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +// Returns string version of ResourceName. +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} diff --git a/pkg/api/v1/resource/BUILD b/pkg/api/v1/resource/BUILD new file mode 100644 index 0000000000..c1ac5dbffc --- /dev/null +++ b/pkg/api/v1/resource/BUILD @@ -0,0 +1,45 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["helpers_test.go"], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/api/v1:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["helpers.go"], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/api/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/api/v1/resource/helpers.go b/pkg/api/v1/resource/helpers.go new file mode 100644 index 0000000000..08f1d46c25 --- /dev/null +++ b/pkg/api/v1/resource/helpers.go @@ -0,0 +1,200 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "math" + "strconv" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" +) + +// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all +// containers of the pod. +func PodRequestsAndLimits(pod *v1.Pod) (reqs map[v1.ResourceName]resource.Quantity, limits map[v1.ResourceName]resource.Quantity, err error) { + reqs, limits = map[v1.ResourceName]resource.Quantity{}, map[v1.ResourceName]resource.Quantity{} + for _, container := range pod.Spec.Containers { + for name, quantity := range container.Resources.Requests { + if value, ok := reqs[name]; !ok { + reqs[name] = *quantity.Copy() + } else { + value.Add(quantity) + reqs[name] = value + } + } + for name, quantity := range container.Resources.Limits { + if value, ok := limits[name]; !ok { + limits[name] = *quantity.Copy() + } else { + value.Add(quantity) + limits[name] = value + } + } + } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + for name, quantity := range container.Resources.Requests { + value, ok := reqs[name] + if !ok { + reqs[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + reqs[name] = *quantity.Copy() + } + } + for name, quantity := range container.Resources.Limits { + value, ok := limits[name] + if !ok { + limits[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + limits[name] = *quantity.Copy() + } + } + } + return +} + +// finds and returns the request for a specific resource. +func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 { + if resource == v1.ResourcePods { + return 1 + } + totalResources := int64(0) + for _, container := range pod.Spec.Containers { + if rQuantity, ok := container.Resources.Requests[resource]; ok { + if resource == v1.ResourceCPU { + totalResources += rQuantity.MilliValue() + } else { + totalResources += rQuantity.Value() + } + } + } + // take max_resource(sum_pod, any_init_container) + for _, container := range pod.Spec.InitContainers { + if rQuantity, ok := container.Resources.Requests[resource]; ok { + if resource == v1.ResourceCPU && rQuantity.MilliValue() > totalResources { + totalResources = rQuantity.MilliValue() + } else if rQuantity.Value() > totalResources { + totalResources = rQuantity.Value() + } + } + } + return totalResources +} + +// ExtractResourceValueByContainerName extracts the value of a resource +// by providing container name +func ExtractResourceValueByContainerName(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string) (string, error) { + container, err := findContainerInPod(pod, containerName) + if err != nil { + return "", err + } + return ExtractContainerResourceValue(fs, container) +} + +// ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource +// by providing container name and node allocatable +func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string, nodeAllocatable v1.ResourceList) (string, error) { + realContainer, err := findContainerInPod(pod, containerName) + if err != nil { + return "", err + } + + containerCopy, err := api.Scheme.DeepCopy(realContainer) + if err != nil { + return "", fmt.Errorf("failed to perform a deep copy of container object: %v", err) + } + + container, ok := containerCopy.(*v1.Container) + if !ok { + return "", fmt.Errorf("unexpected type returned from deep copy of container object") + } + + MergeContainerResourceLimits(container, nodeAllocatable) + + return ExtractContainerResourceValue(fs, container) +} + +// ExtractContainerResourceValue extracts the value of a resource +// in an already known container +func ExtractContainerResourceValue(fs *v1.ResourceFieldSelector, container *v1.Container) (string, error) { + divisor := resource.Quantity{} + if divisor.Cmp(fs.Divisor) == 0 { + divisor = resource.MustParse("1") + } else { + divisor = fs.Divisor + } + + switch fs.Resource { + case "limits.cpu": + return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) + case "limits.memory": + return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) + case "requests.cpu": + return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) + case "requests.memory": + return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) + } + + return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) +} + +// convertResourceCPUToString converts cpu value to the format of divisor and returns +// ceiling of the value. +func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { + c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) + return strconv.FormatInt(c, 10), nil +} + +// convertResourceMemoryToString converts memory value to the format of divisor and returns +// ceiling of the value. +func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} + +// findContainerInPod finds a container by its name in the provided pod +func findContainerInPod(pod *v1.Pod, containerName string) (*v1.Container, error) { + for _, container := range pod.Spec.Containers { + if container.Name == containerName { + return &container, nil + } + } + return nil, fmt.Errorf("container %s not found", containerName) +} + +// MergeContainerResourceLimits checks if a limit is applied for +// the container, and if not, it sets the limit to the passed resource list. +func MergeContainerResourceLimits(container *v1.Container, + allocatable v1.ResourceList) { + if container.Resources.Limits == nil { + container.Resources.Limits = make(v1.ResourceList) + } + for _, resource := range []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory} { + if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() { + if cap, exists := allocatable[resource]; exists { + container.Resources.Limits[resource] = *cap.Copy() + } + } + } +} diff --git a/pkg/api/v1/resource_helpers_test.go b/pkg/api/v1/resource/helpers_test.go similarity index 64% rename from pkg/api/v1/resource_helpers_test.go rename to pkg/api/v1/resource/helpers_test.go index b22f91bae0..3dfbd0ac8e 100644 --- a/pkg/api/v1/resource_helpers_test.go +++ b/pkg/api/v1/resource/helpers_test.go @@ -14,23 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package resource import ( "testing" - "time" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/api/v1" ) func TestResourceHelpers(t *testing.T) { cpuLimit := resource.MustParse("10") memoryLimit := resource.MustParse("10G") - resourceSpec := ResourceRequirements{ - Limits: ResourceList{ + resourceSpec := v1.ResourceRequirements{ + Limits: v1.ResourceList{ "cpu": cpuLimit, "memory": memoryLimit, "kube.io/storage": memoryLimit, @@ -42,8 +41,8 @@ func TestResourceHelpers(t *testing.T) { if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 { t.Errorf("expected memorylimit %v, got %v", memoryLimit, res) } - resourceSpec = ResourceRequirements{ - Limits: ResourceList{ + resourceSpec = v1.ResourceRequirements{ + Limits: v1.ResourceList{ "memory": memoryLimit, "kube.io/storage": memoryLimit, }, @@ -57,7 +56,7 @@ func TestResourceHelpers(t *testing.T) { } func TestDefaultResourceHelpers(t *testing.T) { - resourceList := ResourceList{} + resourceList := v1.ResourceList{} if resourceList.Cpu().Format != resource.DecimalSI { t.Errorf("expected %v, actual %v", resource.DecimalSI, resourceList.Cpu().Format) } @@ -66,71 +65,16 @@ func TestDefaultResourceHelpers(t *testing.T) { } } -func newPod(now metav1.Time, ready bool, beforeSec int) *Pod { - conditionStatus := ConditionFalse - if ready { - conditionStatus = ConditionTrue - } - return &Pod{ - Status: PodStatus{ - Conditions: []PodCondition{ - { - Type: PodReady, - LastTransitionTime: metav1.NewTime(now.Time.Add(-1 * time.Duration(beforeSec) * time.Second)), - Status: conditionStatus, - }, - }, - }, - } -} - -func TestIsPodAvailable(t *testing.T) { - now := metav1.Now() - tests := []struct { - pod *Pod - minReadySeconds int32 - expected bool - }{ - { - pod: newPod(now, false, 0), - minReadySeconds: 0, - expected: false, - }, - { - pod: newPod(now, true, 0), - minReadySeconds: 1, - expected: false, - }, - { - pod: newPod(now, true, 0), - minReadySeconds: 0, - expected: true, - }, - { - pod: newPod(now, true, 51), - minReadySeconds: 50, - expected: true, - }, - } - - for i, test := range tests { - isAvailable := IsPodAvailable(test.pod, test.minReadySeconds, now) - if isAvailable != test.expected { - t.Errorf("[tc #%d] expected available pod: %t, got: %t", i, test.expected, isAvailable) - } - } -} - func TestExtractResourceValue(t *testing.T) { cases := []struct { - fs *ResourceFieldSelector - pod *Pod + fs *v1.ResourceFieldSelector + pod *v1.Pod cName string expectedValue string expectedError error }{ { - fs: &ResourceFieldSelector{ + fs: &v1.ResourceFieldSelector{ Resource: "limits.cpu", }, cName: "foo", @@ -138,7 +82,7 @@ func TestExtractResourceValue(t *testing.T) { expectedValue: "9", }, { - fs: &ResourceFieldSelector{ + fs: &v1.ResourceFieldSelector{ Resource: "requests.cpu", }, cName: "foo", @@ -146,7 +90,7 @@ func TestExtractResourceValue(t *testing.T) { expectedValue: "0", }, { - fs: &ResourceFieldSelector{ + fs: &v1.ResourceFieldSelector{ Resource: "requests.cpu", }, cName: "foo", @@ -154,7 +98,7 @@ func TestExtractResourceValue(t *testing.T) { expectedValue: "8", }, { - fs: &ResourceFieldSelector{ + fs: &v1.ResourceFieldSelector{ Resource: "requests.cpu", }, cName: "foo", @@ -162,7 +106,7 @@ func TestExtractResourceValue(t *testing.T) { expectedValue: "1", }, { - fs: &ResourceFieldSelector{ + fs: &v1.ResourceFieldSelector{ Resource: "requests.cpu", Divisor: resource.MustParse("100m"), }, @@ -171,7 +115,7 @@ func TestExtractResourceValue(t *testing.T) { expectedValue: "12", }, { - fs: &ResourceFieldSelector{ + fs: &v1.ResourceFieldSelector{ Resource: "requests.memory", }, cName: "foo", @@ -179,7 +123,7 @@ func TestExtractResourceValue(t *testing.T) { expectedValue: "104857600", }, { - fs: &ResourceFieldSelector{ + fs: &v1.ResourceFieldSelector{ Resource: "requests.memory", Divisor: resource.MustParse("1Mi"), }, @@ -188,7 +132,7 @@ func TestExtractResourceValue(t *testing.T) { expectedValue: "100", }, { - fs: &ResourceFieldSelector{ + fs: &v1.ResourceFieldSelector{ Resource: "limits.memory", }, cName: "foo", @@ -208,26 +152,26 @@ func TestExtractResourceValue(t *testing.T) { } } -func getPod(cname, cpuRequest, cpuLimit, memoryRequest, memoryLimit string) *Pod { - resources := ResourceRequirements{ - Limits: make(ResourceList), - Requests: make(ResourceList), +func getPod(cname, cpuRequest, cpuLimit, memoryRequest, memoryLimit string) *v1.Pod { + resources := v1.ResourceRequirements{ + Limits: make(v1.ResourceList), + Requests: make(v1.ResourceList), } if cpuLimit != "" { - resources.Limits[ResourceCPU] = resource.MustParse(cpuLimit) + resources.Limits[v1.ResourceCPU] = resource.MustParse(cpuLimit) } if memoryLimit != "" { - resources.Limits[ResourceMemory] = resource.MustParse(memoryLimit) + resources.Limits[v1.ResourceMemory] = resource.MustParse(memoryLimit) } if cpuRequest != "" { - resources.Requests[ResourceCPU] = resource.MustParse(cpuRequest) + resources.Requests[v1.ResourceCPU] = resource.MustParse(cpuRequest) } if memoryRequest != "" { - resources.Requests[ResourceMemory] = resource.MustParse(memoryRequest) + resources.Requests[v1.ResourceMemory] = resource.MustParse(memoryRequest) } - return &Pod{ - Spec: PodSpec{ - Containers: []Container{ + return &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: cname, Resources: resources, diff --git a/pkg/api/v1/resource_helpers.go b/pkg/api/v1/resource_helpers.go deleted file mode 100644 index cc769d12eb..0000000000 --- a/pkg/api/v1/resource_helpers.go +++ /dev/null @@ -1,358 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - "math" - "strconv" - "time" - - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" -) - -// Returns string version of ResourceName. -func (self ResourceName) String() string { - return string(self) -} - -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { - return &val - } - return &resource.Quantity{Format: resource.DecimalSI} -} - -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} -} - -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { - return &val - } - return &resource.Quantity{} -} - -func (self *ResourceList) NvidiaGPU() *resource.Quantity { - if val, ok := (*self)[ResourceNvidiaGPU]; ok { - return &val - } - return &resource.Quantity{} -} - -func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) { - for i := range statuses { - if statuses[i].Name == name { - return statuses[i], true - } - } - return ContainerStatus{}, false -} - -func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus { - for i := range statuses { - if statuses[i].Name == name { - return statuses[i] - } - } - return ContainerStatus{} -} - -// IsPodAvailable returns true if a pod is available; false otherwise. -// Precondition for an available pod is that it must be ready. On top -// of that, there are two cases when a pod can be considered available: -// 1. minReadySeconds == 0, or -// 2. LastTransitionTime (is set) + minReadySeconds < current time -func IsPodAvailable(pod *Pod, minReadySeconds int32, now metav1.Time) bool { - if !IsPodReady(pod) { - return false - } - - c := GetPodReadyCondition(pod.Status) - minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second - if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { - return true - } - return false -} - -// IsPodReady returns true if a pod is ready; false otherwise. -func IsPodReady(pod *Pod) bool { - return IsPodReadyConditionTrue(pod.Status) -} - -// IsPodReady retruns true if a pod is ready; false otherwise. -func IsPodReadyConditionTrue(status PodStatus) bool { - condition := GetPodReadyCondition(status) - return condition != nil && condition.Status == ConditionTrue -} - -// Extracts the pod ready condition from the given status and returns that. -// Returns nil if the condition is not present. -func GetPodReadyCondition(status PodStatus) *PodCondition { - _, condition := GetPodCondition(&status, PodReady) - return condition -} - -// GetPodCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the index of the located condition. -func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -// GetNodeCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the index of the located condition. -func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the -// status has changed. -// Returns true if pod condition has changed or has been added. -func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { - condition.LastTransitionTime = metav1.Now() - // Try to find this pod condition. - conditionIndex, oldCondition := GetPodCondition(status, condition.Type) - - if oldCondition == nil { - // We are adding new pod condition. - status.Conditions = append(status.Conditions, *condition) - return true - } else { - // We are updating an existing condition, so we need to check if it has changed. - if condition.Status == oldCondition.Status { - condition.LastTransitionTime = oldCondition.LastTransitionTime - } - - isEqual := condition.Status == oldCondition.Status && - condition.Reason == oldCondition.Reason && - condition.Message == oldCondition.Message && - condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && - condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) - - status.Conditions[conditionIndex] = *condition - // Return true if one of the fields have changed. - return !isEqual - } -} - -// IsNodeReady returns true if a node is ready; false otherwise. -func IsNodeReady(node *Node) bool { - for _, c := range node.Status.Conditions { - if c.Type == NodeReady { - return c.Status == ConditionTrue - } - } - return false -} - -// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all -// containers of the pod. -func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) { - reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{} - for _, container := range pod.Spec.Containers { - for name, quantity := range container.Resources.Requests { - if value, ok := reqs[name]; !ok { - reqs[name] = *quantity.Copy() - } else { - value.Add(quantity) - reqs[name] = value - } - } - for name, quantity := range container.Resources.Limits { - if value, ok := limits[name]; !ok { - limits[name] = *quantity.Copy() - } else { - value.Add(quantity) - limits[name] = value - } - } - } - // init containers define the minimum of any resource - for _, container := range pod.Spec.InitContainers { - for name, quantity := range container.Resources.Requests { - value, ok := reqs[name] - if !ok { - reqs[name] = *quantity.Copy() - continue - } - if quantity.Cmp(value) > 0 { - reqs[name] = *quantity.Copy() - } - } - for name, quantity := range container.Resources.Limits { - value, ok := limits[name] - if !ok { - limits[name] = *quantity.Copy() - continue - } - if quantity.Cmp(value) > 0 { - limits[name] = *quantity.Copy() - } - } - } - return -} - -// finds and returns the request for a specific resource. -func GetResourceRequest(pod *Pod, resource ResourceName) int64 { - if resource == ResourcePods { - return 1 - } - totalResources := int64(0) - for _, container := range pod.Spec.Containers { - if rQuantity, ok := container.Resources.Requests[resource]; ok { - if resource == ResourceCPU { - totalResources += rQuantity.MilliValue() - } else { - totalResources += rQuantity.Value() - } - } - } - // take max_resource(sum_pod, any_init_container) - for _, container := range pod.Spec.InitContainers { - if rQuantity, ok := container.Resources.Requests[resource]; ok { - if resource == ResourceCPU && rQuantity.MilliValue() > totalResources { - totalResources = rQuantity.MilliValue() - } else if rQuantity.Value() > totalResources { - totalResources = rQuantity.Value() - } - } - } - return totalResources -} - -// ExtractResourceValueByContainerName extracts the value of a resource -// by providing container name -func ExtractResourceValueByContainerName(fs *ResourceFieldSelector, pod *Pod, containerName string) (string, error) { - container, err := findContainerInPod(pod, containerName) - if err != nil { - return "", err - } - return ExtractContainerResourceValue(fs, container) -} - -// ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource -// by providing container name and node allocatable -func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *ResourceFieldSelector, pod *Pod, containerName string, nodeAllocatable ResourceList) (string, error) { - realContainer, err := findContainerInPod(pod, containerName) - if err != nil { - return "", err - } - - containerCopy, err := api.Scheme.DeepCopy(realContainer) - if err != nil { - return "", fmt.Errorf("failed to perform a deep copy of container object: %v", err) - } - - container, ok := containerCopy.(*Container) - if !ok { - return "", fmt.Errorf("unexpected type returned from deep copy of container object") - } - - MergeContainerResourceLimits(container, nodeAllocatable) - - return ExtractContainerResourceValue(fs, container) -} - -// ExtractContainerResourceValue extracts the value of a resource -// in an already known container -func ExtractContainerResourceValue(fs *ResourceFieldSelector, container *Container) (string, error) { - divisor := resource.Quantity{} - if divisor.Cmp(fs.Divisor) == 0 { - divisor = resource.MustParse("1") - } else { - divisor = fs.Divisor - } - - switch fs.Resource { - case "limits.cpu": - return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) - case "limits.memory": - return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) - case "requests.cpu": - return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) - case "requests.memory": - return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) - } - - return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) -} - -// convertResourceCPUToString converts cpu value to the format of divisor and returns -// ceiling of the value. -func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { - c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) - return strconv.FormatInt(c, 10), nil -} - -// convertResourceMemoryToString converts memory value to the format of divisor and returns -// ceiling of the value. -func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { - m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) - return strconv.FormatInt(m, 10), nil -} - -// findContainerInPod finds a container by its name in the provided pod -func findContainerInPod(pod *Pod, containerName string) (*Container, error) { - for _, container := range pod.Spec.Containers { - if container.Name == containerName { - return &container, nil - } - } - return nil, fmt.Errorf("container %s not found", containerName) -} - -// MergeContainerResourceLimits checks if a limit is applied for -// the container, and if not, it sets the limit to the passed resource list. -func MergeContainerResourceLimits(container *Container, - allocatable ResourceList) { - if container.Resources.Limits == nil { - container.Resources.Limits = make(ResourceList) - } - for _, resource := range []ResourceName{ResourceCPU, ResourceMemory} { - if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() { - if cap, exists := allocatable[resource]; exists { - container.Resources.Limits[resource] = *cap.Copy() - } - } - } -} diff --git a/pkg/client/clientset_generated/clientset/typed/core/v1/BUILD b/pkg/client/clientset_generated/clientset/typed/core/v1/BUILD index 65bee7f1d4..e1ad6592df 100644 --- a/pkg/client/clientset_generated/clientset/typed/core/v1/BUILD +++ b/pkg/client/clientset_generated/clientset/typed/core/v1/BUILD @@ -39,6 +39,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/ref:go_default_library", "//pkg/apis/policy/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset/scheme:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/client/clientset_generated/clientset/typed/core/v1/event_expansion.go b/pkg/client/clientset_generated/clientset/typed/core/v1/event_expansion.go index 5182e0768f..2ae4daa112 100644 --- a/pkg/client/clientset_generated/clientset/typed/core/v1/event_expansion.go +++ b/pkg/client/clientset_generated/clientset/typed/core/v1/event_expansion.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1/ref" ) // The EventExpansion interface allows manually adding extra methods to the EventInterface. @@ -99,7 +100,7 @@ func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) // object must match this event's client namespace unless the event client // was made with the "" namespace. func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { - ref, err := v1.GetReference(scheme, objOrRef) + ref, err := ref.GetReference(scheme, objOrRef) if err != nil { return nil, err } diff --git a/pkg/client/conditions/BUILD b/pkg/client/conditions/BUILD index 84cae78cd4..c6ff98a5bf 100644 --- a/pkg/client/conditions/BUILD +++ b/pkg/client/conditions/BUILD @@ -13,6 +13,7 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", diff --git a/pkg/client/conditions/conditions.go b/pkg/client/conditions/conditions.go index 21e1b190c6..6bf11b3a35 100644 --- a/pkg/client/conditions/conditions.go +++ b/pkg/client/conditions/conditions.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) // ErrPodCompleted is returned by PodRunning or PodContainerRunning to indicate that @@ -83,7 +84,7 @@ func PodRunningAndReady(event watch.Event) (bool, error) { case v1.PodFailed, v1.PodSucceeded: return false, ErrPodCompleted case v1.PodRunning: - return v1.IsPodReady(t), nil + return podutil.IsPodReady(t), nil } } return false, nil diff --git a/pkg/controller/BUILD b/pkg/controller/BUILD index cd5fa0c2ba..3944683f72 100644 --- a/pkg/controller/BUILD +++ b/pkg/controller/BUILD @@ -22,6 +22,8 @@ go_library( "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/api/v1/helper:go_default_library", + "//pkg/api/v1/pod:go_default_library", + "//pkg/api/v1/ref:go_default_library", "//pkg/api/validation:go_default_library", "//pkg/apis/authentication/v1:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", diff --git a/pkg/controller/cloud/BUILD b/pkg/controller/cloud/BUILD index 36858503e2..2c8b4962bf 100644 --- a/pkg/controller/cloud/BUILD +++ b/pkg/controller/cloud/BUILD @@ -15,6 +15,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/node:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library", "//pkg/cloudprovider:go_default_library", diff --git a/pkg/controller/cloud/nodecontroller.go b/pkg/controller/cloud/nodecontroller.go index 049f12cfc6..09ad6ccdf7 100644 --- a/pkg/controller/cloud/nodecontroller.go +++ b/pkg/controller/cloud/nodecontroller.go @@ -31,6 +31,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + nodeutil "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1" "k8s.io/kubernetes/pkg/cloudprovider" @@ -102,7 +103,7 @@ func (cnc *CloudNodeController) Run() { // Try to get the current node status // If node status is empty, then kubelet has not posted ready status yet. In this case, process next node for rep := 0; rep < nodeStatusUpdateRetry; rep++ { - _, currentReadyCondition = v1.GetNodeCondition(&node.Status, v1.NodeReady) + _, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) if currentReadyCondition != nil { break } diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index f7ea1cd06a..732d72cfec 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -42,6 +42,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/kubernetes/pkg/api/v1/ref" "k8s.io/kubernetes/pkg/api/validation" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -441,7 +443,7 @@ func getPodsAnnotationSet(template *v1.PodTemplateSpec, object runtime.Object) ( for k, v := range template.Annotations { desiredAnnotations[k] = v } - createdByRef, err := v1.GetReference(api.Scheme, object) + createdByRef, err := ref.GetReference(api.Scheme, object) if err != nil { return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err) } @@ -674,13 +676,13 @@ func (s ByLogging) Less(i, j int) bool { return m[s[i].Status.Phase] < m[s[j].Status.Phase] } // 3. ready < not ready - if v1.IsPodReady(s[i]) != v1.IsPodReady(s[j]) { - return v1.IsPodReady(s[i]) + if podutil.IsPodReady(s[i]) != podutil.IsPodReady(s[j]) { + return podutil.IsPodReady(s[i]) } // TODO: take availability into account when we push minReadySeconds information from deployment into pods, // see https://github.com/kubernetes/kubernetes/issues/22065 // 4. Been ready for more time < less time < empty time - if v1.IsPodReady(s[i]) && v1.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + if podutil.IsPodReady(s[i]) && podutil.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i])) } // 5. Pods with containers with higher restart counts < lower restart counts @@ -713,14 +715,14 @@ func (s ActivePods) Less(i, j int) bool { } // 3. Not ready < ready // If only one of the pods is not ready, the not ready one is smaller - if v1.IsPodReady(s[i]) != v1.IsPodReady(s[j]) { - return !v1.IsPodReady(s[i]) + if podutil.IsPodReady(s[i]) != podutil.IsPodReady(s[j]) { + return !podutil.IsPodReady(s[i]) } // TODO: take availability into account when we push minReadySeconds information from deployment into pods, // see https://github.com/kubernetes/kubernetes/issues/22065 // 4. Been ready for empty time < less time < more time // If both pods are ready, the latest ready one is smaller - if v1.IsPodReady(s[i]) && v1.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + if podutil.IsPodReady(s[i]) && podutil.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j])) } // 5. Pods with containers with higher restart counts < lower restart counts @@ -744,7 +746,7 @@ func afterOrZero(t1, t2 metav1.Time) bool { } func podReadyTime(pod *v1.Pod) metav1.Time { - if v1.IsPodReady(pod) { + if podutil.IsPodReady(pod) { for _, c := range pod.Status.Conditions { // we only care about pod ready conditions if c.Type == v1.PodReady && c.Status == v1.ConditionTrue { diff --git a/pkg/controller/cronjob/BUILD b/pkg/controller/cronjob/BUILD index c6a438e5ab..232d785e87 100644 --- a/pkg/controller/cronjob/BUILD +++ b/pkg/controller/cronjob/BUILD @@ -20,6 +20,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/ref:go_default_library", "//pkg/apis/batch/v1:go_default_library", "//pkg/apis/batch/v2alpha1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", diff --git a/pkg/controller/cronjob/cronjob_controller.go b/pkg/controller/cronjob/cronjob_controller.go index 6613814d55..1d7af75f7e 100644 --- a/pkg/controller/cronjob/cronjob_controller.go +++ b/pkg/controller/cronjob/cronjob_controller.go @@ -47,6 +47,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1/ref" batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -393,5 +394,5 @@ func deleteJob(sj *batchv2alpha1.CronJob, job *batchv1.Job, jc jobControlInterfa } func getRef(object runtime.Object) (*v1.ObjectReference, error) { - return v1.GetReference(api.Scheme, object) + return ref.GetReference(api.Scheme, object) } diff --git a/pkg/controller/cronjob/utils.go b/pkg/controller/cronjob/utils.go index 7762b26587..4d59ab5b3b 100644 --- a/pkg/controller/cronjob/utils.go +++ b/pkg/controller/cronjob/utils.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1/ref" batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" ) @@ -216,7 +217,7 @@ func getTimeHash(scheduledTime time.Time) int64 { // makeCreatedByRefJson makes a json string with an object reference for use in "created-by" annotation value func makeCreatedByRefJson(object runtime.Object) (string, error) { - createdByRef, err := v1.GetReference(api.Scheme, object) + createdByRef, err := ref.GetReference(api.Scheme, object) if err != nil { return "", fmt.Errorf("unable to get controller reference: %v", err) } diff --git a/pkg/controller/daemon/BUILD b/pkg/controller/daemon/BUILD index e5cdc8eb84..62f4569549 100644 --- a/pkg/controller/daemon/BUILD +++ b/pkg/controller/daemon/BUILD @@ -20,6 +20,7 @@ go_library( "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/api/v1/helper:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library", @@ -64,6 +65,7 @@ go_test( "//pkg/api:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", "//pkg/client/informers/informers_generated/externalversions:go_default_library", diff --git a/pkg/controller/daemon/daemoncontroller.go b/pkg/controller/daemon/daemoncontroller.go index 984ce8a8c8..f51681246b 100644 --- a/pkg/controller/daemon/daemoncontroller.go +++ b/pkg/controller/daemon/daemoncontroller.go @@ -38,6 +38,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1" @@ -323,7 +324,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) { // Two different versions of the same pod will always have different RVs. return } - changedToReady := !v1.IsPodReady(oldPod) && v1.IsPodReady(curPod) + changedToReady := !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels) curControllerRef := controller.GetControllerOf(curPod) @@ -732,9 +733,9 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet) daemonPods, _ := nodeToDaemonPods[node.Name] sort.Sort(podByCreationTimestamp(daemonPods)) pod := daemonPods[0] - if v1.IsPodReady(pod) { + if podutil.IsPodReady(pod) { numberReady++ - if v1.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) { + if podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) { numberAvailable++ } } diff --git a/pkg/controller/daemon/daemoncontroller_test.go b/pkg/controller/daemon/daemoncontroller_test.go index a8e6052fbf..9d1c94039e 100644 --- a/pkg/controller/daemon/daemoncontroller_test.go +++ b/pkg/controller/daemon/daemoncontroller_test.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" @@ -333,7 +334,7 @@ func markPodsReady(store cache.Store) { for _, obj := range store.List() { pod := obj.(*v1.Pod) condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} - v1.UpdatePodCondition(&pod.Status, &condition) + podutil.UpdatePodCondition(&pod.Status, &condition) } } diff --git a/pkg/controller/daemon/update.go b/pkg/controller/daemon/update.go index 56645b4eb1..c8322fae07 100644 --- a/pkg/controller/daemon/update.go +++ b/pkg/controller/daemon/update.go @@ -25,6 +25,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" intstrutil "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/controller/daemon/util" ) @@ -112,7 +113,7 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, } available := false for _, pod := range daemonPods { - if v1.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) { + if podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) { available = true break } diff --git a/pkg/controller/daemon/util/BUILD b/pkg/controller/daemon/util/BUILD index 1ae12fc6d1..5e533f83ab 100644 --- a/pkg/controller/daemon/util/BUILD +++ b/pkg/controller/daemon/util/BUILD @@ -15,6 +15,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/util/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/daemon/util/daemonset_util.go b/pkg/controller/daemon/util/daemonset_util.go index 2b1d71021d..c935076af9 100644 --- a/pkg/controller/daemon/util/daemonset_util.go +++ b/pkg/controller/daemon/util/daemonset_util.go @@ -22,6 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" labelsutil "k8s.io/kubernetes/pkg/util/labels" ) @@ -52,7 +53,7 @@ func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []* unavailablePods := []*v1.Pod{} availablePods := []*v1.Pod{} for _, pod := range pods { - if v1.IsPodAvailable(pod, minReadySeconds, metav1.Now()) { + if podutil.IsPodAvailable(pod, minReadySeconds, metav1.Now()) { availablePods = append(availablePods, pod) } else { unavailablePods = append(unavailablePods, pod) diff --git a/pkg/controller/disruption/BUILD b/pkg/controller/disruption/BUILD index 806d0be404..feaf20d004 100644 --- a/pkg/controller/disruption/BUILD +++ b/pkg/controller/disruption/BUILD @@ -15,6 +15,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/apis/policy/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/typed/policy/v1beta1:go_default_library", diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index 3cebdf0223..0ce105cb59 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -34,6 +34,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" policyclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1" @@ -589,7 +590,7 @@ Pod: if disruptionTime, found := disruptedPods[pod.Name]; found && disruptionTime.Time.Add(DeletionTimeout).After(currentTime) { continue } - if v1.IsPodReady(pod) { + if podutil.IsPodReady(pod) { currentHealthy++ continue Pod } diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index 1f243565de..1570ffdfad 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -380,7 +380,7 @@ func (e *EndpointController) syncService(key string) error { epa.Hostname = hostname } - if tolerateUnreadyEndpoints || v1.IsPodReady(pod) { + if tolerateUnreadyEndpoints || podutil.IsPodReady(pod) { subsets = append(subsets, v1.EndpointSubset{ Addresses: []v1.EndpointAddress{epa}, Ports: []v1.EndpointPort{epp}, diff --git a/pkg/controller/node/BUILD b/pkg/controller/node/BUILD index 056b2dae22..e1515612ee 100644 --- a/pkg/controller/node/BUILD +++ b/pkg/controller/node/BUILD @@ -29,6 +29,7 @@ go_library( "//pkg/api/helper:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/api/v1/helper:go_default_library", + "//pkg/api/v1/node:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library", "//pkg/client/informers/informers_generated/externalversions/extensions/v1beta1:go_default_library", diff --git a/pkg/controller/node/nodecontroller.go b/pkg/controller/node/nodecontroller.go index 254085c872..f632e6ef6c 100644 --- a/pkg/controller/node/nodecontroller.go +++ b/pkg/controller/node/nodecontroller.go @@ -41,6 +41,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + nodeutil "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1" extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1" @@ -413,7 +414,7 @@ func (nc *NodeController) doTaintingPass() { zone := utilnode.GetZoneKey(node) EvictionsNumber.WithLabelValues(zone).Inc() } - _, condition := v1.GetNodeCondition(&node.Status, v1.NodeReady) + _, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) // Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive. taintToAdd := v1.Taint{} oppositeTaint := v1.Taint{} @@ -874,7 +875,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1. var err error var gracePeriod time.Duration var observedReadyCondition v1.NodeCondition - _, currentReadyCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady) + _, currentReadyCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) if currentReadyCondition == nil { // If ready condition is nil, then kubelet (or nodecontroller) never posted node status. // A fake ready condition is created, where LastProbeTime and LastTransitionTime is set @@ -914,9 +915,9 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1. // if that's the case, but it does not seem necessary. var savedCondition *v1.NodeCondition if found { - _, savedCondition = v1.GetNodeCondition(&savedNodeStatus.status, v1.NodeReady) + _, savedCondition = nodeutil.GetNodeCondition(&savedNodeStatus.status, v1.NodeReady) } - _, observedCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady) + _, observedCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) if !found { glog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name) savedNodeStatus = nodeStatusData{ @@ -993,7 +994,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1. remainingNodeConditionTypes := []v1.NodeConditionType{v1.NodeOutOfDisk, v1.NodeMemoryPressure, v1.NodeDiskPressure} nowTimestamp := nc.now() for _, nodeConditionType := range remainingNodeConditionTypes { - _, currentCondition := v1.GetNodeCondition(&node.Status, nodeConditionType) + _, currentCondition := nodeutil.GetNodeCondition(&node.Status, nodeConditionType) if currentCondition == nil { glog.V(2).Infof("Condition %v of node %v was never updated by kubelet", nodeConditionType, node.Name) node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{ @@ -1016,7 +1017,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1. } } - _, currentCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady) + _, currentCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) if !apiequality.Semantic.DeepEqual(currentCondition, &observedReadyCondition) { if _, err = nc.kubeClient.Core().Nodes().UpdateStatus(node); err != nil { glog.Errorf("Error updating node %s: %v", node.Name, err) diff --git a/pkg/controller/node/testutil/BUILD b/pkg/controller/node/testutil/BUILD index 43454e5a4b..e29fd27be7 100644 --- a/pkg/controller/node/testutil/BUILD +++ b/pkg/controller/node/testutil/BUILD @@ -28,6 +28,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/pkg/api/v1:go_default_library", + "//vendor/k8s.io/client-go/pkg/api/v1/ref:go_default_library", "//vendor/k8s.io/client-go/util/clock:go_default_library", ], ) diff --git a/pkg/controller/node/testutil/test_utils.go b/pkg/controller/node/testutil/test_utils.go index da2fdbc360..7d22059ce9 100644 --- a/pkg/controller/node/testutil/test_utils.go +++ b/pkg/controller/node/testutil/test_utils.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/watch" clientv1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/api/v1/ref" "k8s.io/client-go/util/clock" "k8s.io/kubernetes/pkg/api" @@ -356,7 +357,7 @@ func (f *FakeRecorder) PastEventf(obj runtime.Object, timestamp metav1.Time, eve func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp metav1.Time, eventtype, reason, message string) { f.Lock() defer f.Unlock() - ref, err := clientv1.GetReference(api.Scheme, obj) + ref, err := ref.GetReference(api.Scheme, obj) if err != nil { glog.Errorf("Encoutered error while getting reference: %v", err) return diff --git a/pkg/controller/podautoscaler/BUILD b/pkg/controller/podautoscaler/BUILD index a4f3b9729b..4d6b4cded3 100644 --- a/pkg/controller/podautoscaler/BUILD +++ b/pkg/controller/podautoscaler/BUILD @@ -20,6 +20,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/apis/autoscaling/v1:go_default_library", "//pkg/apis/autoscaling/v2alpha1:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", diff --git a/pkg/controller/podautoscaler/replica_calculator.go b/pkg/controller/podautoscaler/replica_calculator.go index a2924ef3d5..2ab69ac7d4 100644 --- a/pkg/controller/podautoscaler/replica_calculator.go +++ b/pkg/controller/podautoscaler/replica_calculator.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1" v1coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1" metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" @@ -77,7 +78,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti requests[pod.Name] = podSum - if pod.Status.Phase != v1.PodRunning || !v1.IsPodReady(&pod) { + if pod.Status.Phase != v1.PodRunning || !podutil.IsPodReady(&pod) { // save this pod name for later, but pretend it doesn't exist for now unreadyPods.Insert(pod.Name) delete(metrics, pod.Name) @@ -192,7 +193,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet missingPods := sets.NewString() for _, pod := range podList.Items { - if pod.Status.Phase != v1.PodRunning || !v1.IsPodReady(&pod) { + if pod.Status.Phase != v1.PodRunning || !podutil.IsPodReady(&pod) { // save this pod name for later, but pretend it doesn't exist for now unreadyPods.Insert(pod.Name) delete(metrics, pod.Name) diff --git a/pkg/controller/replicaset/BUILD b/pkg/controller/replicaset/BUILD index ac54770fc2..8e901e87a1 100644 --- a/pkg/controller/replicaset/BUILD +++ b/pkg/controller/replicaset/BUILD @@ -19,6 +19,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library", diff --git a/pkg/controller/replicaset/replica_set.go b/pkg/controller/replicaset/replica_set.go index 7dc02c2371..d3125bace7 100644 --- a/pkg/controller/replicaset/replica_set.go +++ b/pkg/controller/replicaset/replica_set.go @@ -38,6 +38,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -317,7 +318,7 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) { // a Pod transitioned to Ready. // Note that this still suffers from #29229, we are just moving the problem one level // "closer" to kubelet (from the deployment to the replica set controller). - if !v1.IsPodReady(oldPod) && v1.IsPodReady(curPod) && rs.Spec.MinReadySeconds > 0 { + if !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) && rs.Spec.MinReadySeconds > 0 { glog.V(2).Infof("ReplicaSet %q will be enqueued after %ds for availability check", rs.Name, rs.Spec.MinReadySeconds) // Add a second to avoid milliseconds skew in AddAfter. // See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info. diff --git a/pkg/controller/replicaset/replica_set_utils.go b/pkg/controller/replicaset/replica_set_utils.go index ff94509604..733c989920 100644 --- a/pkg/controller/replicaset/replica_set_utils.go +++ b/pkg/controller/replicaset/replica_set_utils.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1" ) @@ -96,9 +97,9 @@ func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageRe if templateLabel.Matches(labels.Set(pod.Labels)) { fullyLabeledReplicasCount++ } - if v1.IsPodReady(pod) { + if podutil.IsPodReady(pod) { readyReplicasCount++ - if v1.IsPodAvailable(pod, rs.Spec.MinReadySeconds, metav1.Now()) { + if podutil.IsPodAvailable(pod, rs.Spec.MinReadySeconds, metav1.Now()) { availableReplicasCount++ } } diff --git a/pkg/controller/replication/BUILD b/pkg/controller/replication/BUILD index 6ef4f565a5..a595021d8c 100644 --- a/pkg/controller/replication/BUILD +++ b/pkg/controller/replication/BUILD @@ -19,6 +19,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", "//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library", diff --git a/pkg/controller/replication/replication_controller.go b/pkg/controller/replication/replication_controller.go index eebf799547..d16e930861 100644 --- a/pkg/controller/replication/replication_controller.go +++ b/pkg/controller/replication/replication_controller.go @@ -39,6 +39,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1" corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" @@ -312,7 +313,7 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) { // a Pod transitioned to Ready. // Note that this still suffers from #29229, we are just moving the problem one level // "closer" to kubelet (from the deployment to the ReplicationController controller). - if !v1.IsPodReady(oldPod) && v1.IsPodReady(curPod) && rc.Spec.MinReadySeconds > 0 { + if !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) && rc.Spec.MinReadySeconds > 0 { glog.V(2).Infof("ReplicationController %q will be enqueued after %ds for availability check", rc.Name, rc.Spec.MinReadySeconds) // Add a second to avoid milliseconds skew in AddAfter. // See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info. diff --git a/pkg/controller/replication/replication_controller_utils.go b/pkg/controller/replication/replication_controller_utils.go index 214da4ec5e..5cb3dc5d1d 100644 --- a/pkg/controller/replication/replication_controller_utils.go +++ b/pkg/controller/replication/replication_controller_utils.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1" ) @@ -106,9 +107,9 @@ func calculateStatus(rc *v1.ReplicationController, filteredPods []*v1.Pod, manag if templateLabel.Matches(labels.Set(pod.Labels)) { fullyLabeledReplicasCount++ } - if v1.IsPodReady(pod) { + if podutil.IsPodReady(pod) { readyReplicasCount++ - if v1.IsPodAvailable(pod, rc.Spec.MinReadySeconds, metav1.Now()) { + if podutil.IsPodAvailable(pod, rc.Spec.MinReadySeconds, metav1.Now()) { availableReplicasCount++ } } diff --git a/pkg/controller/route/BUILD b/pkg/controller/route/BUILD index 5c6e68487a..0526ebf08f 100644 --- a/pkg/controller/route/BUILD +++ b/pkg/controller/route/BUILD @@ -17,6 +17,7 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//pkg/api/v1/node:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library", "//pkg/client/listers/core/v1:go_default_library", @@ -42,6 +43,7 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//pkg/api/v1/node:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", "//pkg/client/informers/informers_generated/externalversions:go_default_library", "//pkg/cloudprovider:go_default_library", diff --git a/pkg/controller/route/routecontroller.go b/pkg/controller/route/routecontroller.go index 57fbdf570c..827deaa6e2 100644 --- a/pkg/controller/route/routecontroller.go +++ b/pkg/controller/route/routecontroller.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/api/v1" + v1node "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1" corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" @@ -161,7 +162,7 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R }(nodeName, nameHint, route) } else { // Update condition only if it doesn't reflect the current state. - _, condition := v1.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable) + _, condition := v1node.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable) if condition == nil || condition.Status != v1.ConditionFalse { rc.updateNetworkingCondition(types.NodeName(node.Name), true) } diff --git a/pkg/controller/route/routecontroller_test.go b/pkg/controller/route/routecontroller_test.go index ced204b892..5f41c0eac7 100644 --- a/pkg/controller/route/routecontroller_test.go +++ b/pkg/controller/route/routecontroller_test.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/types" core "k8s.io/client-go/testing" "k8s.io/kubernetes/pkg/api/v1" + nodeutil "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" "k8s.io/kubernetes/pkg/cloudprovider" @@ -208,7 +209,7 @@ func TestReconcile(t *testing.T) { for _, action := range testCase.clientset.Actions() { if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" { node := action.(core.UpdateAction).GetObject().(*v1.Node) - _, condition := v1.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable) + _, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable) if condition == nil { t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name) } else { diff --git a/pkg/controller/statefulset/BUILD b/pkg/controller/statefulset/BUILD index 2e2d81b91e..2f24d2b92d 100644 --- a/pkg/controller/statefulset/BUILD +++ b/pkg/controller/statefulset/BUILD @@ -20,6 +20,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/apis/apps/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/informers/informers_generated/externalversions/apps/v1beta1:go_default_library", @@ -56,6 +57,7 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/apis/apps/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", "//pkg/client/informers/informers_generated/externalversions:go_default_library", diff --git a/pkg/controller/statefulset/stateful_set_control_test.go b/pkg/controller/statefulset/stateful_set_control_test.go index 2a74eeea89..87bcd78ba7 100644 --- a/pkg/controller/statefulset/stateful_set_control_test.go +++ b/pkg/controller/statefulset/stateful_set_control_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" @@ -660,7 +661,7 @@ func (spc *fakeStatefulPodControl) setPodReady(set *apps.StatefulSet, ordinal in sort.Sort(ascendingOrdinal(pods)) pod := pods[ordinal] condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} - v1.UpdatePodCondition(&pod.Status, &condition) + podutil.UpdatePodCondition(&pod.Status, &condition) fakeResourceVersion(pod) spc.podsIndexer.Update(pod) return spc.podsLister.Pods(set.Namespace).List(selector) @@ -697,7 +698,7 @@ func (spc *fakeStatefulPodControl) addTerminatedPod(set *apps.StatefulSet, ordin pod.DeletionTimestamp = &deleted condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} fakeResourceVersion(pod) - v1.UpdatePodCondition(&pod.Status, &condition) + podutil.UpdatePodCondition(&pod.Status, &condition) spc.podsIndexer.Update(pod) selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) if err != nil { diff --git a/pkg/controller/statefulset/stateful_set_utils.go b/pkg/controller/statefulset/stateful_set_utils.go index a04cb25830..1f0c3cba43 100644 --- a/pkg/controller/statefulset/stateful_set_utils.go +++ b/pkg/controller/statefulset/stateful_set_utils.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" "k8s.io/kubernetes/pkg/controller" @@ -190,7 +191,7 @@ func isRunningAndReady(pod *v1.Pod) bool { if pod.Status.Phase != v1.PodRunning { return false } - podReady := v1.IsPodReady(pod) + podReady := podutil.IsPodReady(pod) // User may have specified a pod readiness override through a debug annotation. initialized, ok := pod.Annotations[apps.StatefulSetInitAnnotation] if ok { diff --git a/pkg/controller/statefulset/stateful_set_utils_test.go b/pkg/controller/statefulset/stateful_set_utils_test.go index 2b28ec5832..956674c40b 100644 --- a/pkg/controller/statefulset/stateful_set_utils_test.go +++ b/pkg/controller/statefulset/stateful_set_utils_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" "k8s.io/kubernetes/pkg/controller" ) @@ -207,7 +208,7 @@ func TestIsRunningAndReady(t *testing.T) { t.Error("isRunningAndReady does not respect Pod condition") } condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} - v1.UpdatePodCondition(&pod.Status, &condition) + podutil.UpdatePodCondition(&pod.Status, &condition) if !isRunningAndReady(pod) { t.Error("Pod should be running and ready") } diff --git a/pkg/controller/volume/persistentvolume/BUILD b/pkg/controller/volume/persistentvolume/BUILD index d3acdd2e13..a03482de4b 100644 --- a/pkg/controller/volume/persistentvolume/BUILD +++ b/pkg/controller/volume/persistentvolume/BUILD @@ -21,6 +21,7 @@ go_library( "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/api/v1/helper:go_default_library", + "//pkg/api/v1/ref:go_default_library", "//pkg/apis/storage/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library", @@ -67,6 +68,7 @@ go_test( "//pkg/api:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/ref:go_default_library", "//pkg/apis/storage/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", diff --git a/pkg/controller/volume/persistentvolume/framework_test.go b/pkg/controller/volume/persistentvolume/framework_test.go index 2f2e51e5b3..019c44f7b2 100644 --- a/pkg/controller/volume/persistentvolume/framework_test.go +++ b/pkg/controller/volume/persistentvolume/framework_test.go @@ -739,7 +739,7 @@ func newClaim(name, claimUID, capacity, boundToVolume string, phase v1.Persisten Phase: phase, }, } - // Make sure v1.GetReference(claim) works + // Make sure ref.GetReference(claim) works claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", name) if len(annotations) > 0 { diff --git a/pkg/controller/volume/persistentvolume/index_test.go b/pkg/controller/volume/persistentvolume/index_test.go index 8c4b4ea4de..75c8109f45 100644 --- a/pkg/controller/volume/persistentvolume/index_test.go +++ b/pkg/controller/volume/persistentvolume/index_test.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1/ref" ) func makePVC(size string, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentVolumeClaim { @@ -616,7 +617,7 @@ func TestFindingPreboundVolumes(t *testing.T) { Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi")}}, }, } - claimRef, err := v1.GetReference(api.Scheme, claim) + claimRef, err := ref.GetReference(api.Scheme, claim) if err != nil { t.Errorf("error getting claimRef: %v", err) } diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 425ee4d75c..16dbf6d0c6 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + "k8s.io/kubernetes/pkg/api/v1/ref" storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" @@ -771,7 +772,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV volume.Spec.ClaimRef.Namespace != claim.Namespace || volume.Spec.ClaimRef.UID != claim.UID { - claimRef, err := v1.GetReference(api.Scheme, claim) + claimRef, err := ref.GetReference(api.Scheme, claim) if err != nil { return nil, fmt.Errorf("Unexpected error getting claim reference: %v", err) } @@ -1299,7 +1300,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa // Prepare a claimRef to the claim early (to fail before a volume is // provisioned) - claimRef, err := v1.GetReference(api.Scheme, claim) + claimRef, err := ref.GetReference(api.Scheme, claim) if err != nil { glog.V(3).Infof("unexpected error getting claim reference: %v", err) return diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index 7dbfbf187b..33e5aa70f8 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -54,6 +54,7 @@ go_library( "//pkg/api/helper:go_default_library", "//pkg/api/util:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/apps/v1beta1:go_default_library", "//pkg/apis/autoscaling:go_default_library", diff --git a/pkg/kubectl/rolling_updater.go b/pkg/kubectl/rolling_updater.go index 70e72da1c4..099ee105f9 100644 --- a/pkg/kubectl/rolling_updater.go +++ b/pkg/kubectl/rolling_updater.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/helper" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/client/retry" client "k8s.io/kubernetes/pkg/client/unversioned" @@ -430,7 +431,7 @@ func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController, minR if v1Pod.DeletionTimestamp != nil { continue } - if !v1.IsPodAvailable(v1Pod, minReadySeconds, r.nowFn()) { + if !podutil.IsPodAvailable(v1Pod, minReadySeconds, r.nowFn()) { continue } switch controller.Name { diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index de0c5f65f5..e44eee88d5 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -38,6 +38,8 @@ go_library( "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/api/v1/helper:go_default_library", + "//pkg/api/v1/pod:go_default_library", + "//pkg/api/v1/resource:go_default_library", "//pkg/api/v1/validation:go_default_library", "//pkg/apis/componentconfig:go_default_library", "//pkg/apis/componentconfig/v1alpha1:go_default_library", diff --git a/pkg/kubelet/cm/BUILD b/pkg/kubelet/cm/BUILD index 108403c607..8d91a13f14 100644 --- a/pkg/kubelet/cm/BUILD +++ b/pkg/kubelet/cm/BUILD @@ -25,6 +25,7 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//pkg/api/v1/resource:go_default_library", "//pkg/apis/componentconfig:go_default_library", "//pkg/kubelet/cadvisor:go_default_library", "//pkg/kubelet/cm/util:go_default_library", diff --git a/pkg/kubelet/cm/qos_container_manager_linux.go b/pkg/kubelet/cm/qos_container_manager_linux.go index fa9951902a..8039aa12f9 100644 --- a/pkg/kubelet/cm/qos_container_manager_linux.go +++ b/pkg/kubelet/cm/qos_container_manager_linux.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1/resource" "k8s.io/kubernetes/pkg/kubelet/qos" ) @@ -147,7 +148,7 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass] // we only care about the burstable qos tier continue } - req, _, err := v1.PodRequestsAndLimits(pod) + req, _, err := resource.PodRequestsAndLimits(pod) if err != nil { return err } @@ -187,7 +188,7 @@ func (m *qosContainerManagerImpl) setMemoryReserve(configs map[v1.PodQOSClass]*C // limits are not set for Best Effort pods continue } - req, _, err := v1.PodRequestsAndLimits(pod) + req, _, err := resource.PodRequestsAndLimits(pod) if err != nil { glog.V(2).Infof("[Container Manager] Pod resource requests/limits could not be determined. Not setting QOS memory limts.") return diff --git a/pkg/kubelet/container/BUILD b/pkg/kubelet/container/BUILD index 44d4d1fb38..eb6d4d3a2a 100644 --- a/pkg/kubelet/container/BUILD +++ b/pkg/kubelet/container/BUILD @@ -28,6 +28,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/ref:go_default_library", "//pkg/client/unversioned/remotecommand:go_default_library", "//pkg/kubelet/api/v1alpha1/runtime:go_default_library", "//pkg/kubelet/events:go_default_library", diff --git a/pkg/kubelet/container/ref.go b/pkg/kubelet/container/ref.go index dcd303db3d..0251b81341 100644 --- a/pkg/kubelet/container/ref.go +++ b/pkg/kubelet/container/ref.go @@ -21,6 +21,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1/ref" ) var ImplicitContainerPrefix string = "implicitly required container " @@ -38,7 +39,7 @@ func GenerateContainerRef(pod *v1.Pod, container *v1.Container) (*v1.ObjectRefer // start (like the pod infra container). This is not a good way, ugh. fieldPath = ImplicitContainerPrefix + container.Name } - ref, err := v1.GetPartialReference(api.Scheme, pod, fieldPath) + ref, err := ref.GetPartialReference(api.Scheme, pod, fieldPath) if err != nil { return nil, err } diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 62632f715b..94dd8e18c3 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -43,6 +43,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/kubernetes/pkg/api/v1/resource" "k8s.io/kubernetes/pkg/api/v1/validation" "k8s.io/kubernetes/pkg/client/unversioned/remotecommand" "k8s.io/kubernetes/pkg/fieldpath" @@ -642,9 +644,9 @@ func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, container *v1.Container) (string, error) { containerName := fs.ContainerName if len(containerName) == 0 { - return v1.ExtractContainerResourceValue(fs, container) + return resource.ExtractContainerResourceValue(fs, container) } else { - return v1.ExtractResourceValueByContainerName(fs, pod, containerName) + return resource.ExtractResourceValueByContainerName(fs, pod, containerName) } } @@ -940,10 +942,10 @@ func hasHostPortConflicts(pods []*v1.Pod) bool { func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) { var cID string - cStatus, found := v1.GetContainerStatus(podStatus.ContainerStatuses, containerName) + cStatus, found := podutil.GetContainerStatus(podStatus.ContainerStatuses, containerName) // if not found, check the init containers if !found { - cStatus, found = v1.GetContainerStatus(podStatus.InitContainerStatuses, containerName) + cStatus, found = podutil.GetContainerStatus(podStatus.InitContainerStatuses, containerName) } if !found { return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName) @@ -1047,7 +1049,7 @@ func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { pendingInitialization := 0 failedInitialization := 0 for _, container := range spec.InitContainers { - containerStatus, ok := v1.GetContainerStatus(info, container.Name) + containerStatus, ok := podutil.GetContainerStatus(info, container.Name) if !ok { pendingInitialization++ continue @@ -1084,7 +1086,7 @@ func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { failed := 0 succeeded := 0 for _, container := range spec.Containers { - containerStatus, ok := v1.GetContainerStatus(info, container.Name) + containerStatus, ok := podutil.GetContainerStatus(info, container.Name) if !ok { unknown++ continue @@ -1179,10 +1181,10 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po // s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus() // does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure // it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true. - if _, oldPodScheduled := v1.GetPodCondition(&pod.Status, v1.PodScheduled); oldPodScheduled != nil { + if _, oldPodScheduled := podutil.GetPodCondition(&pod.Status, v1.PodScheduled); oldPodScheduled != nil { s.Conditions = append(s.Conditions, *oldPodScheduled) } - v1.UpdatePodCondition(&pod.Status, &v1.PodCondition{ + podutil.UpdatePodCondition(&pod.Status, &v1.PodCondition{ Type: v1.PodScheduled, Status: v1.ConditionTrue, }) diff --git a/pkg/kubelet/kubelet_resources.go b/pkg/kubelet/kubelet_resources.go index 98c47f353a..37f15410e0 100644 --- a/pkg/kubelet/kubelet_resources.go +++ b/pkg/kubelet/kubelet_resources.go @@ -23,6 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1/resource" ) // defaultPodLimitsForDownwardApi copies the input pod, and optional container, @@ -52,7 +53,7 @@ func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *v1.Pod, container *v1.Con return nil, nil, fmt.Errorf("unexpected type returned from deep copy of pod object") } for idx := range outputPod.Spec.Containers { - v1.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable) + resource.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable) } var outputContainer *v1.Container @@ -65,7 +66,7 @@ func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *v1.Pod, container *v1.Con if !ok { return nil, nil, fmt.Errorf("unexpected type returned from deep copy of container object") } - v1.MergeContainerResourceLimits(outputContainer, allocatable) + resource.MergeContainerResourceLimits(outputContainer, allocatable) } return outputPod, outputContainer, nil } diff --git a/pkg/kubelet/kuberuntime/BUILD b/pkg/kubelet/kuberuntime/BUILD index f02eba9ccd..1e0ebc03ae 100644 --- a/pkg/kubelet/kuberuntime/BUILD +++ b/pkg/kubelet/kuberuntime/BUILD @@ -29,6 +29,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/ref:go_default_library", "//pkg/credentialprovider:go_default_library", "//pkg/kubelet/api:go_default_library", "//pkg/kubelet/api/v1alpha1/runtime:go_default_library", diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index a09909e8c0..45208472b2 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -32,6 +32,7 @@ import ( "k8s.io/client-go/util/flowcontrol" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1/ref" "k8s.io/kubernetes/pkg/credentialprovider" internalapi "k8s.io/kubernetes/pkg/kubelet/api" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" @@ -548,7 +549,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat podContainerChanges := m.computePodContainerChanges(pod, podStatus) glog.V(3).Infof("computePodContainerChanges got %+v for pod %q", podContainerChanges, format.Pod(pod)) if podContainerChanges.CreateSandbox { - ref, err := v1.GetReference(api.Scheme, pod) + ref, err := ref.GetReference(api.Scheme, pod) if err != nil { glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) } diff --git a/pkg/kubelet/preemption/BUILD b/pkg/kubelet/preemption/BUILD index 5489a38710..30c1d012af 100644 --- a/pkg/kubelet/preemption/BUILD +++ b/pkg/kubelet/preemption/BUILD @@ -14,6 +14,7 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//pkg/api/v1/resource:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/events:go_default_library", "//pkg/kubelet/eviction:go_default_library", diff --git a/pkg/kubelet/preemption/preemption.go b/pkg/kubelet/preemption/preemption.go index f5ab045cc6..c84abfc1a0 100644 --- a/pkg/kubelet/preemption/preemption.go +++ b/pkg/kubelet/preemption/preemption.go @@ -24,6 +24,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1/resource" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/eviction" @@ -190,7 +191,7 @@ type admissionRequirementList []*admissionRequirement func (a admissionRequirementList) distance(pod *v1.Pod) float64 { dist := float64(0) for _, req := range a { - remainingRequest := float64(req.quantity - v1.GetResourceRequest(pod, req.resourceName)) + remainingRequest := float64(req.quantity - resource.GetResourceRequest(pod, req.resourceName)) if remainingRequest < 0 { remainingRequest = 0 } @@ -206,7 +207,7 @@ func (a admissionRequirementList) subtract(pods ...*v1.Pod) admissionRequirement for _, req := range a { newQuantity := req.quantity for _, pod := range pods { - newQuantity -= v1.GetResourceRequest(pod, req.resourceName) + newQuantity -= resource.GetResourceRequest(pod, req.resourceName) } if newQuantity > 0 { newList = append(newList, &admissionRequirement{ @@ -252,8 +253,8 @@ func smallerResourceRequest(pod1 *v1.Pod, pod2 *v1.Pod) bool { v1.ResourceCPU, } for _, res := range priorityList { - req1 := v1.GetResourceRequest(pod1, res) - req2 := v1.GetResourceRequest(pod2, res) + req1 := resource.GetResourceRequest(pod1, res) + req2 := resource.GetResourceRequest(pod2, res) if req1 < req2 { return true } else if req1 > req2 { diff --git a/pkg/kubelet/prober/BUILD b/pkg/kubelet/prober/BUILD index 26bdead46e..905ac32307 100644 --- a/pkg/kubelet/prober/BUILD +++ b/pkg/kubelet/prober/BUILD @@ -18,6 +18,7 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/events:go_default_library", "//pkg/kubelet/prober/results:go_default_library", diff --git a/pkg/kubelet/prober/worker.go b/pkg/kubelet/prober/worker.go index 6f55fb86dc..9aa4813926 100644 --- a/pkg/kubelet/prober/worker.go +++ b/pkg/kubelet/prober/worker.go @@ -23,6 +23,7 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -155,7 +156,7 @@ func (w *worker) doProbe() (keepGoing bool) { return false } - c, ok := v1.GetContainerStatus(status.ContainerStatuses, w.container.Name) + c, ok := podutil.GetContainerStatus(status.ContainerStatuses, w.container.Name) if !ok || len(c.ContainerID) == 0 { // Either the container has not been created yet, or it was deleted. glog.V(3).Infof("Probe target container not found: %v - %v", diff --git a/pkg/kubelet/status/BUILD b/pkg/kubelet/status/BUILD index 29e5f2b132..20ef87777a 100644 --- a/pkg/kubelet/status/BUILD +++ b/pkg/kubelet/status/BUILD @@ -45,6 +45,7 @@ go_test( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", "//pkg/kubelet/container:go_default_library", diff --git a/pkg/kubelet/status/generate.go b/pkg/kubelet/status/generate.go index f7d94c72b7..d531a6f014 100644 --- a/pkg/kubelet/status/generate.go +++ b/pkg/kubelet/status/generate.go @@ -21,6 +21,7 @@ import ( "strings" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) const ( @@ -44,7 +45,7 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, containerStatuses []v1.Containe unknownContainers := []string{} unreadyContainers := []string{} for _, container := range spec.Containers { - if containerStatus, ok := v1.GetContainerStatus(containerStatuses, container.Name); ok { + if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok { if !containerStatus.Ready { unreadyContainers = append(unreadyContainers, container.Name) } @@ -99,7 +100,7 @@ func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.Co unknownContainers := []string{} unreadyContainers := []string{} for _, container := range spec.InitContainers { - if containerStatus, ok := v1.GetContainerStatus(containerStatuses, container.Name); ok { + if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok { if !containerStatus.Ready { unreadyContainers = append(unreadyContainers, container.Name) } diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index 2b44e13042..4bd1b5771c 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -288,10 +288,10 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp } // Set ReadyCondition.LastTransitionTime. - if _, readyCondition := v1.GetPodCondition(&status, v1.PodReady); readyCondition != nil { + if _, readyCondition := podutil.GetPodCondition(&status, v1.PodReady); readyCondition != nil { // Need to set LastTransitionTime. lastTransitionTime := metav1.Now() - _, oldReadyCondition := v1.GetPodCondition(&oldStatus, v1.PodReady) + _, oldReadyCondition := podutil.GetPodCondition(&oldStatus, v1.PodReady) if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status { lastTransitionTime = oldReadyCondition.LastTransitionTime } @@ -299,10 +299,10 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp } // Set InitializedCondition.LastTransitionTime. - if _, initCondition := v1.GetPodCondition(&status, v1.PodInitialized); initCondition != nil { + if _, initCondition := podutil.GetPodCondition(&status, v1.PodInitialized); initCondition != nil { // Need to set LastTransitionTime. lastTransitionTime := metav1.Now() - _, oldInitCondition := v1.GetPodCondition(&oldStatus, v1.PodInitialized) + _, oldInitCondition := podutil.GetPodCondition(&oldStatus, v1.PodInitialized) if oldInitCondition != nil && initCondition.Status == oldInitCondition.Status { lastTransitionTime = oldInitCondition.LastTransitionTime } diff --git a/pkg/kubelet/status/status_manager_test.go b/pkg/kubelet/status/status_manager_test.go index c1de48bf93..ae0c38600f 100644 --- a/pkg/kubelet/status/status_manager_test.go +++ b/pkg/kubelet/status/status_manager_test.go @@ -33,6 +33,7 @@ import ( core "k8s.io/client-go/testing" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -184,7 +185,7 @@ func TestNewStatusSetsReadyTransitionTime(t *testing.T) { syncer.SetPodStatus(pod, podStatus) verifyUpdates(t, syncer, 1) status := expectPodStatus(t, syncer, pod) - readyCondition := v1.GetPodReadyCondition(status) + readyCondition := podutil.GetPodReadyCondition(status) if readyCondition.LastTransitionTime.IsZero() { t.Errorf("Unexpected: last transition time not set") } @@ -237,8 +238,8 @@ func TestChangedStatusUpdatesLastTransitionTime(t *testing.T) { verifyUpdates(t, syncer, 1) newStatus := expectPodStatus(t, syncer, pod) - oldReadyCondition := v1.GetPodReadyCondition(oldStatus) - newReadyCondition := v1.GetPodReadyCondition(newStatus) + oldReadyCondition := podutil.GetPodReadyCondition(oldStatus) + newReadyCondition := podutil.GetPodReadyCondition(newStatus) if newReadyCondition.LastTransitionTime.IsZero() { t.Errorf("Unexpected: last transition time not set") } @@ -276,8 +277,8 @@ func TestUnchangedStatusPreservesLastTransitionTime(t *testing.T) { verifyUpdates(t, syncer, 0) newStatus := expectPodStatus(t, syncer, pod) - oldReadyCondition := v1.GetPodReadyCondition(oldStatus) - newReadyCondition := v1.GetPodReadyCondition(newStatus) + oldReadyCondition := podutil.GetPodReadyCondition(oldStatus) + newReadyCondition := podutil.GetPodReadyCondition(newStatus) if newReadyCondition.LastTransitionTime.IsZero() { t.Errorf("Unexpected: last transition time not set") } diff --git a/pkg/volume/downwardapi/BUILD b/pkg/volume/downwardapi/BUILD index 6b404d556b..ba28cdaa23 100644 --- a/pkg/volume/downwardapi/BUILD +++ b/pkg/volume/downwardapi/BUILD @@ -14,6 +14,7 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//pkg/api/v1/resource:go_default_library", "//pkg/fieldpath:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/downwardapi/downwardapi.go b/pkg/volume/downwardapi/downwardapi.go index 3ba7d6ed42..a8c3d04290 100644 --- a/pkg/volume/downwardapi/downwardapi.go +++ b/pkg/volume/downwardapi/downwardapi.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1/resource" "k8s.io/kubernetes/pkg/fieldpath" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -244,7 +245,7 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu nodeAllocatable, err := host.GetNodeAllocatable() if err != nil { errlist = append(errlist, err) - } else if values, err := v1.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil { + } else if values, err := resource.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil { glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error()) errlist = append(errlist, err) } else { diff --git a/plugin/pkg/scheduler/factory/BUILD b/plugin/pkg/scheduler/factory/BUILD index 24cd0f4d7e..d7e3f5418e 100644 --- a/plugin/pkg/scheduler/factory/BUILD +++ b/plugin/pkg/scheduler/factory/BUILD @@ -17,6 +17,7 @@ go_library( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/informers/informers_generated/externalversions/apps/v1beta1:go_default_library", "//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library", diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index 91bfde1c2a..a9d98df6f5 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -32,6 +32,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" appsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps/v1beta1" coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1" @@ -595,7 +596,7 @@ type podConditionUpdater struct { func (p *podConditionUpdater) Update(pod *v1.Pod, condition *v1.PodCondition) error { glog.V(2).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status) - if v1.UpdatePodCondition(&pod.Status, condition) { + if podutil.UpdatePodCondition(&pod.Status, condition) { _, err := p.Client.Core().Pods(pod.Namespace).UpdateStatus(pod) return err } diff --git a/staging/copy.sh b/staging/copy.sh index 56f87107c4..49820ef98e 100755 --- a/staging/copy.sh +++ b/staging/copy.sh @@ -116,6 +116,8 @@ find "${MAIN_REPO}/pkg/version" -maxdepth 1 -type f | xargs -I{} cp {} "${CLIENT mkcp "pkg/client/clientset_generated/${CLIENTSET}" "pkg/client/clientset_generated" mkcp "pkg/client/informers/informers_generated/externalversions" "pkg/client/informers/informers_generated" mkcp "pkg/api/helper" "pkg/api" +mkcp "pkg/api/v1/resource" "pkg/api/v1" +mkcp "pkg/api/v1/node" "pkg/api/v1" pushd "${CLIENT_REPO_TEMP}" > /dev/null echo "generating vendor/" diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/BUILD b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/BUILD index 9c0d05aa68..021b07b8d2 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/BUILD +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/BUILD @@ -47,6 +47,7 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/pkg/api:go_default_library", "//vendor/k8s.io/client-go/pkg/api/v1:go_default_library", + "//vendor/k8s.io/client-go/pkg/api/v1/ref:go_default_library", "//vendor/k8s.io/client-go/pkg/apis/policy/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", ], diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go index 9b4490ea97..0ad170b3d7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/api/v1/ref" ) // The EventExpansion interface allows manually adding extra methods to the EventInterface. @@ -99,7 +100,7 @@ func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) // object must match this event's client namespace unless the event client // was made with the "" namespace. func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { - ref, err := v1.GetReference(scheme, objOrRef) + ref, err := ref.GetReference(scheme, objOrRef) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/client-go/pkg/api/v1/BUILD b/staging/src/k8s.io/client-go/pkg/api/v1/BUILD index a7448a61c3..46e8c54bcd 100644 --- a/staging/src/k8s.io/client-go/pkg/api/v1/BUILD +++ b/staging/src/k8s.io/client-go/pkg/api/v1/BUILD @@ -17,9 +17,9 @@ go_library( "generate.go", "generated.pb.go", "meta.go", - "ref.go", + "objectreference.go", "register.go", - "resource_helpers.go", + "resource.go", "taint.go", "toleration.go", "types.generated.go", @@ -34,7 +34,6 @@ go_library( "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/github.com/ugorji/go/codec:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", diff --git a/staging/src/k8s.io/client-go/pkg/api/v1/node/BUILD b/staging/src/k8s.io/client-go/pkg/api/v1/node/BUILD new file mode 100644 index 0000000000..793209ff3f --- /dev/null +++ b/staging/src/k8s.io/client-go/pkg/api/v1/node/BUILD @@ -0,0 +1,15 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["util.go"], + tags = ["automanaged"], + deps = ["//vendor/k8s.io/client-go/pkg/api/v1:go_default_library"], +) diff --git a/staging/src/k8s.io/client-go/pkg/api/v1/node/util.go b/staging/src/k8s.io/client-go/pkg/api/v1/node/util.go new file mode 100644 index 0000000000..7fd9d25d6b --- /dev/null +++ b/staging/src/k8s.io/client-go/pkg/api/v1/node/util.go @@ -0,0 +1,47 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: merge with pkg/util/node + +package node + +import ( + "k8s.io/client-go/pkg/api/v1" +) + +// GetNodeCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) (int, *v1.NodeCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// IsNodeReady returns true if a node is ready; false otherwise. +func IsNodeReady(node *v1.Node) bool { + for _, c := range node.Status.Conditions { + if c.Type == v1.NodeReady { + return c.Status == v1.ConditionTrue + } + } + return false +} diff --git a/staging/src/k8s.io/client-go/pkg/api/v1/objectreference.go b/staging/src/k8s.io/client-go/pkg/api/v1/objectreference.go new file mode 100644 index 0000000000..ee5335ee87 --- /dev/null +++ b/staging/src/k8s.io/client-go/pkg/api/v1/objectreference.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// intend only to get a reference to that object. This simplifies the event recording interface. +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/staging/src/k8s.io/client-go/pkg/api/v1/ref/BUILD b/staging/src/k8s.io/client-go/pkg/api/v1/ref/BUILD new file mode 100644 index 0000000000..f9d8716b7e --- /dev/null +++ b/staging/src/k8s.io/client-go/pkg/api/v1/ref/BUILD @@ -0,0 +1,19 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["ref.go"], + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/client-go/pkg/api/v1:go_default_library", + ], +) diff --git a/staging/src/k8s.io/client-go/pkg/api/v1/ref.go b/staging/src/k8s.io/client-go/pkg/api/v1/ref/ref.go similarity index 80% rename from staging/src/k8s.io/client-go/pkg/api/v1/ref.go rename to staging/src/k8s.io/client-go/pkg/api/v1/ref/ref.go index 5d33719fef..51f69555d7 100644 --- a/staging/src/k8s.io/client-go/pkg/api/v1/ref.go +++ b/staging/src/k8s.io/client-go/pkg/api/v1/ref/ref.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package ref import ( "errors" @@ -22,10 +22,9 @@ import ( "net/url" "strings" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api/v1" ) var ( @@ -38,11 +37,11 @@ var ( // object, or an error if the object doesn't follow the conventions // that would allow this. // TODO: should take a meta.Interface see http://issue.k8s.io/7127 -func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) { +func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReference, error) { if obj == nil { return nil, ErrNilObject } - if ref, ok := obj.(*ObjectReference); ok { + if ref, ok := obj.(*v1.ObjectReference); ok { // Don't make a reference to a reference. return ref, nil } @@ -94,14 +93,14 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, // only has list metadata if objectMeta == nil { - return &ObjectReference{ + return &v1.ObjectReference{ Kind: kind, APIVersion: version, ResourceVersion: listMeta.GetResourceVersion(), }, nil } - return &ObjectReference{ + return &v1.ObjectReference{ Kind: kind, APIVersion: version, Name: objectMeta.GetName(), @@ -112,7 +111,7 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, } // GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. -func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) { +func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*v1.ObjectReference, error) { ref, err := GetReference(scheme, obj) if err != nil { return nil, err @@ -120,14 +119,3 @@ func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath s ref.FieldPath = fieldPath return ref, nil } - -// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that -// intend only to get a reference to that object. This simplifies the event recording interface. -func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} -func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/staging/src/k8s.io/client-go/pkg/api/v1/resource.go b/staging/src/k8s.io/client-go/pkg/api/v1/resource.go new file mode 100644 index 0000000000..2dca986679 --- /dev/null +++ b/staging/src/k8s.io/client-go/pkg/api/v1/resource.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +// Returns string version of ResourceName. +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} diff --git a/staging/src/k8s.io/client-go/pkg/api/v1/resource/BUILD b/staging/src/k8s.io/client-go/pkg/api/v1/resource/BUILD new file mode 100644 index 0000000000..cb244ebc1d --- /dev/null +++ b/staging/src/k8s.io/client-go/pkg/api/v1/resource/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["helpers_test.go"], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/client-go/pkg/api/v1:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["helpers.go"], + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/client-go/pkg/api:go_default_library", + "//vendor/k8s.io/client-go/pkg/api/v1:go_default_library", + ], +) diff --git a/staging/src/k8s.io/client-go/pkg/api/v1/resource/helpers.go b/staging/src/k8s.io/client-go/pkg/api/v1/resource/helpers.go new file mode 100644 index 0000000000..7772ace6c7 --- /dev/null +++ b/staging/src/k8s.io/client-go/pkg/api/v1/resource/helpers.go @@ -0,0 +1,200 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "math" + "strconv" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/api/v1" +) + +// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all +// containers of the pod. +func PodRequestsAndLimits(pod *v1.Pod) (reqs map[v1.ResourceName]resource.Quantity, limits map[v1.ResourceName]resource.Quantity, err error) { + reqs, limits = map[v1.ResourceName]resource.Quantity{}, map[v1.ResourceName]resource.Quantity{} + for _, container := range pod.Spec.Containers { + for name, quantity := range container.Resources.Requests { + if value, ok := reqs[name]; !ok { + reqs[name] = *quantity.Copy() + } else { + value.Add(quantity) + reqs[name] = value + } + } + for name, quantity := range container.Resources.Limits { + if value, ok := limits[name]; !ok { + limits[name] = *quantity.Copy() + } else { + value.Add(quantity) + limits[name] = value + } + } + } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + for name, quantity := range container.Resources.Requests { + value, ok := reqs[name] + if !ok { + reqs[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + reqs[name] = *quantity.Copy() + } + } + for name, quantity := range container.Resources.Limits { + value, ok := limits[name] + if !ok { + limits[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + limits[name] = *quantity.Copy() + } + } + } + return +} + +// finds and returns the request for a specific resource. +func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 { + if resource == v1.ResourcePods { + return 1 + } + totalResources := int64(0) + for _, container := range pod.Spec.Containers { + if rQuantity, ok := container.Resources.Requests[resource]; ok { + if resource == v1.ResourceCPU { + totalResources += rQuantity.MilliValue() + } else { + totalResources += rQuantity.Value() + } + } + } + // take max_resource(sum_pod, any_init_container) + for _, container := range pod.Spec.InitContainers { + if rQuantity, ok := container.Resources.Requests[resource]; ok { + if resource == v1.ResourceCPU && rQuantity.MilliValue() > totalResources { + totalResources = rQuantity.MilliValue() + } else if rQuantity.Value() > totalResources { + totalResources = rQuantity.Value() + } + } + } + return totalResources +} + +// ExtractResourceValueByContainerName extracts the value of a resource +// by providing container name +func ExtractResourceValueByContainerName(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string) (string, error) { + container, err := findContainerInPod(pod, containerName) + if err != nil { + return "", err + } + return ExtractContainerResourceValue(fs, container) +} + +// ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource +// by providing container name and node allocatable +func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string, nodeAllocatable v1.ResourceList) (string, error) { + realContainer, err := findContainerInPod(pod, containerName) + if err != nil { + return "", err + } + + containerCopy, err := api.Scheme.DeepCopy(realContainer) + if err != nil { + return "", fmt.Errorf("failed to perform a deep copy of container object: %v", err) + } + + container, ok := containerCopy.(*v1.Container) + if !ok { + return "", fmt.Errorf("unexpected type returned from deep copy of container object") + } + + MergeContainerResourceLimits(container, nodeAllocatable) + + return ExtractContainerResourceValue(fs, container) +} + +// ExtractContainerResourceValue extracts the value of a resource +// in an already known container +func ExtractContainerResourceValue(fs *v1.ResourceFieldSelector, container *v1.Container) (string, error) { + divisor := resource.Quantity{} + if divisor.Cmp(fs.Divisor) == 0 { + divisor = resource.MustParse("1") + } else { + divisor = fs.Divisor + } + + switch fs.Resource { + case "limits.cpu": + return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) + case "limits.memory": + return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) + case "requests.cpu": + return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) + case "requests.memory": + return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) + } + + return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) +} + +// convertResourceCPUToString converts cpu value to the format of divisor and returns +// ceiling of the value. +func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { + c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) + return strconv.FormatInt(c, 10), nil +} + +// convertResourceMemoryToString converts memory value to the format of divisor and returns +// ceiling of the value. +func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} + +// findContainerInPod finds a container by its name in the provided pod +func findContainerInPod(pod *v1.Pod, containerName string) (*v1.Container, error) { + for _, container := range pod.Spec.Containers { + if container.Name == containerName { + return &container, nil + } + } + return nil, fmt.Errorf("container %s not found", containerName) +} + +// MergeContainerResourceLimits checks if a limit is applied for +// the container, and if not, it sets the limit to the passed resource list. +func MergeContainerResourceLimits(container *v1.Container, + allocatable v1.ResourceList) { + if container.Resources.Limits == nil { + container.Resources.Limits = make(v1.ResourceList) + } + for _, resource := range []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory} { + if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() { + if cap, exists := allocatable[resource]; exists { + container.Resources.Limits[resource] = *cap.Copy() + } + } + } +} diff --git a/staging/src/k8s.io/client-go/pkg/api/v1/resource/helpers_test.go b/staging/src/k8s.io/client-go/pkg/api/v1/resource/helpers_test.go new file mode 100644 index 0000000000..4866bf3c58 --- /dev/null +++ b/staging/src/k8s.io/client-go/pkg/api/v1/resource/helpers_test.go @@ -0,0 +1,182 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/client-go/pkg/api/v1" +) + +func TestResourceHelpers(t *testing.T) { + cpuLimit := resource.MustParse("10") + memoryLimit := resource.MustParse("10G") + resourceSpec := v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "cpu": cpuLimit, + "memory": memoryLimit, + "kube.io/storage": memoryLimit, + }, + } + if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 { + t.Errorf("expected cpulimit %v, got %v", cpuLimit, res) + } + if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 { + t.Errorf("expected memorylimit %v, got %v", memoryLimit, res) + } + resourceSpec = v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "memory": memoryLimit, + "kube.io/storage": memoryLimit, + }, + } + if res := resourceSpec.Limits.Cpu(); res.Value() != 0 { + t.Errorf("expected cpulimit %v, got %v", 0, res) + } + if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 { + t.Errorf("expected memorylimit %v, got %v", memoryLimit, res) + } +} + +func TestDefaultResourceHelpers(t *testing.T) { + resourceList := v1.ResourceList{} + if resourceList.Cpu().Format != resource.DecimalSI { + t.Errorf("expected %v, actual %v", resource.DecimalSI, resourceList.Cpu().Format) + } + if resourceList.Memory().Format != resource.BinarySI { + t.Errorf("expected %v, actual %v", resource.BinarySI, resourceList.Memory().Format) + } +} + +func TestExtractResourceValue(t *testing.T) { + cases := []struct { + fs *v1.ResourceFieldSelector + pod *v1.Pod + cName string + expectedValue string + expectedError error + }{ + { + fs: &v1.ResourceFieldSelector{ + Resource: "limits.cpu", + }, + cName: "foo", + pod: getPod("foo", "", "9", "", ""), + expectedValue: "9", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "requests.cpu", + }, + cName: "foo", + pod: getPod("foo", "", "", "", ""), + expectedValue: "0", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "requests.cpu", + }, + cName: "foo", + pod: getPod("foo", "8", "", "", ""), + expectedValue: "8", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "requests.cpu", + }, + cName: "foo", + pod: getPod("foo", "100m", "", "", ""), + expectedValue: "1", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "requests.cpu", + Divisor: resource.MustParse("100m"), + }, + cName: "foo", + pod: getPod("foo", "1200m", "", "", ""), + expectedValue: "12", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "requests.memory", + }, + cName: "foo", + pod: getPod("foo", "", "", "100Mi", ""), + expectedValue: "104857600", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "requests.memory", + Divisor: resource.MustParse("1Mi"), + }, + cName: "foo", + pod: getPod("foo", "", "", "100Mi", "1Gi"), + expectedValue: "100", + }, + { + fs: &v1.ResourceFieldSelector{ + Resource: "limits.memory", + }, + cName: "foo", + pod: getPod("foo", "", "", "10Mi", "100Mi"), + expectedValue: "104857600", + }, + } + as := assert.New(t) + for idx, tc := range cases { + actual, err := ExtractResourceValueByContainerName(tc.fs, tc.pod, tc.cName) + if tc.expectedError != nil { + as.Equal(tc.expectedError, err, "expected test case [%d] to fail with error %v; got %v", idx, tc.expectedError, err) + } else { + as.Nil(err, "expected test case [%d] to not return an error; got %v", idx, err) + as.Equal(tc.expectedValue, actual, "expected test case [%d] to return %q; got %q instead", idx, tc.expectedValue, actual) + } + } +} + +func getPod(cname, cpuRequest, cpuLimit, memoryRequest, memoryLimit string) *v1.Pod { + resources := v1.ResourceRequirements{ + Limits: make(v1.ResourceList), + Requests: make(v1.ResourceList), + } + if cpuLimit != "" { + resources.Limits[v1.ResourceCPU] = resource.MustParse(cpuLimit) + } + if memoryLimit != "" { + resources.Limits[v1.ResourceMemory] = resource.MustParse(memoryLimit) + } + if cpuRequest != "" { + resources.Requests[v1.ResourceCPU] = resource.MustParse(cpuRequest) + } + if memoryRequest != "" { + resources.Requests[v1.ResourceMemory] = resource.MustParse(memoryRequest) + } + return &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: cname, + Resources: resources, + }, + }, + }, + } +} diff --git a/staging/src/k8s.io/client-go/pkg/api/v1/resource_helpers.go b/staging/src/k8s.io/client-go/pkg/api/v1/resource_helpers.go deleted file mode 100644 index b80efd18b0..0000000000 --- a/staging/src/k8s.io/client-go/pkg/api/v1/resource_helpers.go +++ /dev/null @@ -1,358 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - "math" - "strconv" - "time" - - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api" -) - -// Returns string version of ResourceName. -func (self ResourceName) String() string { - return string(self) -} - -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { - return &val - } - return &resource.Quantity{Format: resource.DecimalSI} -} - -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} -} - -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { - return &val - } - return &resource.Quantity{} -} - -func (self *ResourceList) NvidiaGPU() *resource.Quantity { - if val, ok := (*self)[ResourceNvidiaGPU]; ok { - return &val - } - return &resource.Quantity{} -} - -func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) { - for i := range statuses { - if statuses[i].Name == name { - return statuses[i], true - } - } - return ContainerStatus{}, false -} - -func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus { - for i := range statuses { - if statuses[i].Name == name { - return statuses[i] - } - } - return ContainerStatus{} -} - -// IsPodAvailable returns true if a pod is available; false otherwise. -// Precondition for an available pod is that it must be ready. On top -// of that, there are two cases when a pod can be considered available: -// 1. minReadySeconds == 0, or -// 2. LastTransitionTime (is set) + minReadySeconds < current time -func IsPodAvailable(pod *Pod, minReadySeconds int32, now metav1.Time) bool { - if !IsPodReady(pod) { - return false - } - - c := GetPodReadyCondition(pod.Status) - minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second - if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { - return true - } - return false -} - -// IsPodReady returns true if a pod is ready; false otherwise. -func IsPodReady(pod *Pod) bool { - return IsPodReadyConditionTrue(pod.Status) -} - -// IsPodReady retruns true if a pod is ready; false otherwise. -func IsPodReadyConditionTrue(status PodStatus) bool { - condition := GetPodReadyCondition(status) - return condition != nil && condition.Status == ConditionTrue -} - -// Extracts the pod ready condition from the given status and returns that. -// Returns nil if the condition is not present. -func GetPodReadyCondition(status PodStatus) *PodCondition { - _, condition := GetPodCondition(&status, PodReady) - return condition -} - -// GetPodCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the index of the located condition. -func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -// GetNodeCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the index of the located condition. -func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the -// status has changed. -// Returns true if pod condition has changed or has been added. -func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { - condition.LastTransitionTime = metav1.Now() - // Try to find this pod condition. - conditionIndex, oldCondition := GetPodCondition(status, condition.Type) - - if oldCondition == nil { - // We are adding new pod condition. - status.Conditions = append(status.Conditions, *condition) - return true - } else { - // We are updating an existing condition, so we need to check if it has changed. - if condition.Status == oldCondition.Status { - condition.LastTransitionTime = oldCondition.LastTransitionTime - } - - isEqual := condition.Status == oldCondition.Status && - condition.Reason == oldCondition.Reason && - condition.Message == oldCondition.Message && - condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && - condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) - - status.Conditions[conditionIndex] = *condition - // Return true if one of the fields have changed. - return !isEqual - } -} - -// IsNodeReady returns true if a node is ready; false otherwise. -func IsNodeReady(node *Node) bool { - for _, c := range node.Status.Conditions { - if c.Type == NodeReady { - return c.Status == ConditionTrue - } - } - return false -} - -// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all -// containers of the pod. -func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) { - reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{} - for _, container := range pod.Spec.Containers { - for name, quantity := range container.Resources.Requests { - if value, ok := reqs[name]; !ok { - reqs[name] = *quantity.Copy() - } else { - value.Add(quantity) - reqs[name] = value - } - } - for name, quantity := range container.Resources.Limits { - if value, ok := limits[name]; !ok { - limits[name] = *quantity.Copy() - } else { - value.Add(quantity) - limits[name] = value - } - } - } - // init containers define the minimum of any resource - for _, container := range pod.Spec.InitContainers { - for name, quantity := range container.Resources.Requests { - value, ok := reqs[name] - if !ok { - reqs[name] = *quantity.Copy() - continue - } - if quantity.Cmp(value) > 0 { - reqs[name] = *quantity.Copy() - } - } - for name, quantity := range container.Resources.Limits { - value, ok := limits[name] - if !ok { - limits[name] = *quantity.Copy() - continue - } - if quantity.Cmp(value) > 0 { - limits[name] = *quantity.Copy() - } - } - } - return -} - -// finds and returns the request for a specific resource. -func GetResourceRequest(pod *Pod, resource ResourceName) int64 { - if resource == ResourcePods { - return 1 - } - totalResources := int64(0) - for _, container := range pod.Spec.Containers { - if rQuantity, ok := container.Resources.Requests[resource]; ok { - if resource == ResourceCPU { - totalResources += rQuantity.MilliValue() - } else { - totalResources += rQuantity.Value() - } - } - } - // take max_resource(sum_pod, any_init_container) - for _, container := range pod.Spec.InitContainers { - if rQuantity, ok := container.Resources.Requests[resource]; ok { - if resource == ResourceCPU && rQuantity.MilliValue() > totalResources { - totalResources = rQuantity.MilliValue() - } else if rQuantity.Value() > totalResources { - totalResources = rQuantity.Value() - } - } - } - return totalResources -} - -// ExtractResourceValueByContainerName extracts the value of a resource -// by providing container name -func ExtractResourceValueByContainerName(fs *ResourceFieldSelector, pod *Pod, containerName string) (string, error) { - container, err := findContainerInPod(pod, containerName) - if err != nil { - return "", err - } - return ExtractContainerResourceValue(fs, container) -} - -// ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource -// by providing container name and node allocatable -func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *ResourceFieldSelector, pod *Pod, containerName string, nodeAllocatable ResourceList) (string, error) { - realContainer, err := findContainerInPod(pod, containerName) - if err != nil { - return "", err - } - - containerCopy, err := api.Scheme.DeepCopy(realContainer) - if err != nil { - return "", fmt.Errorf("failed to perform a deep copy of container object: %v", err) - } - - container, ok := containerCopy.(*Container) - if !ok { - return "", fmt.Errorf("unexpected type returned from deep copy of container object") - } - - MergeContainerResourceLimits(container, nodeAllocatable) - - return ExtractContainerResourceValue(fs, container) -} - -// ExtractContainerResourceValue extracts the value of a resource -// in an already known container -func ExtractContainerResourceValue(fs *ResourceFieldSelector, container *Container) (string, error) { - divisor := resource.Quantity{} - if divisor.Cmp(fs.Divisor) == 0 { - divisor = resource.MustParse("1") - } else { - divisor = fs.Divisor - } - - switch fs.Resource { - case "limits.cpu": - return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) - case "limits.memory": - return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) - case "requests.cpu": - return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) - case "requests.memory": - return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) - } - - return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) -} - -// convertResourceCPUToString converts cpu value to the format of divisor and returns -// ceiling of the value. -func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { - c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) - return strconv.FormatInt(c, 10), nil -} - -// convertResourceMemoryToString converts memory value to the format of divisor and returns -// ceiling of the value. -func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { - m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) - return strconv.FormatInt(m, 10), nil -} - -// findContainerInPod finds a container by its name in the provided pod -func findContainerInPod(pod *Pod, containerName string) (*Container, error) { - for _, container := range pod.Spec.Containers { - if container.Name == containerName { - return &container, nil - } - } - return nil, fmt.Errorf("container %s not found", containerName) -} - -// MergeContainerResourceLimits checks if a limit is applied for -// the container, and if not, it sets the limit to the passed resource list. -func MergeContainerResourceLimits(container *Container, - allocatable ResourceList) { - if container.Resources.Limits == nil { - container.Resources.Limits = make(ResourceList) - } - for _, resource := range []ResourceName{ResourceCPU, ResourceMemory} { - if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() { - if cap, exists := allocatable[resource]; exists { - container.Resources.Limits[resource] = *cap.Copy() - } - } - } -} diff --git a/staging/src/k8s.io/client-go/tools/record/BUILD b/staging/src/k8s.io/client-go/tools/record/BUILD index 486710341e..0e39f0a925 100644 --- a/staging/src/k8s.io/client-go/tools/record/BUILD +++ b/staging/src/k8s.io/client-go/tools/record/BUILD @@ -25,6 +25,7 @@ go_test( "//vendor/k8s.io/client-go/pkg/api:go_default_library", "//vendor/k8s.io/client-go/pkg/api/install:go_default_library", "//vendor/k8s.io/client-go/pkg/api/v1:go_default_library", + "//vendor/k8s.io/client-go/pkg/api/v1/ref:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/util/clock:go_default_library", ], @@ -50,6 +51,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/pkg/api/v1:go_default_library", + "//vendor/k8s.io/client-go/pkg/api/v1/ref:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/util/clock:go_default_library", ], diff --git a/staging/src/k8s.io/client-go/tools/record/event.go b/staging/src/k8s.io/client-go/tools/record/event.go index 26e036be36..999bd1cc48 100644 --- a/staging/src/k8s.io/client-go/tools/record/event.go +++ b/staging/src/k8s.io/client-go/tools/record/event.go @@ -27,6 +27,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/api/v1/ref" restclient "k8s.io/client-go/rest" "k8s.io/client-go/util/clock" @@ -254,7 +255,7 @@ type recorderImpl struct { } func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp metav1.Time, eventtype, reason, message string) { - ref, err := v1.GetReference(recorder.scheme, object) + ref, err := ref.GetReference(recorder.scheme, object) if err != nil { glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) return diff --git a/staging/src/k8s.io/client-go/tools/record/event_test.go b/staging/src/k8s.io/client-go/tools/record/event_test.go index 8c456e6b5c..c50bd91fb4 100644 --- a/staging/src/k8s.io/client-go/tools/record/event_test.go +++ b/staging/src/k8s.io/client-go/tools/record/event_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/client-go/pkg/api" _ "k8s.io/client-go/pkg/api/install" // To register api.Pod used in tests below "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/api/v1/ref" restclient "k8s.io/client-go/rest" "k8s.io/client-go/util/clock" ) @@ -119,8 +120,8 @@ func TestEventf(t *testing.T) { UID: "differentUid", }, } - testRef, err := v1.GetPartialReference(api.Scheme, testPod, "spec.containers[2]") - testRef2, err := v1.GetPartialReference(api.Scheme, testPod2, "spec.containers[3]") + testRef, err := ref.GetPartialReference(api.Scheme, testPod, "spec.containers[2]") + testRef2, err := ref.GetPartialReference(api.Scheme, testPod2, "spec.containers[3]") if err != nil { t.Fatal(err) } @@ -531,7 +532,7 @@ func TestEventfNoNamespace(t *testing.T) { UID: "bar", }, } - testRef, err := v1.GetPartialReference(api.Scheme, testPod, "spec.containers[2]") + testRef, err := ref.GetPartialReference(api.Scheme, testPod, "spec.containers[2]") if err != nil { t.Fatal(err) } @@ -637,8 +638,8 @@ func TestMultiSinkCache(t *testing.T) { UID: "differentUid", }, } - testRef, err := v1.GetPartialReference(api.Scheme, testPod, "spec.containers[2]") - testRef2, err := v1.GetPartialReference(api.Scheme, testPod2, "spec.containers[3]") + testRef, err := ref.GetPartialReference(api.Scheme, testPod, "spec.containers[2]") + testRef2, err := ref.GetPartialReference(api.Scheme, testPod2, "spec.containers[3]") if err != nil { t.Fatal(err) } diff --git a/test/e2e/BUILD b/test/e2e/BUILD index 8e7a1fba40..c32e8aba42 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -112,6 +112,7 @@ go_library( "//pkg/api:go_default_library", "//pkg/api/annotations:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/api/v1/service:go_default_library", "//pkg/apis/apps/v1beta1:go_default_library", "//pkg/apis/batch:go_default_library", diff --git a/test/e2e/common/container_probe.go b/test/e2e/common/container_probe.go index 94022b6685..a6b0b7ee83 100644 --- a/test/e2e/common/container_probe.go +++ b/test/e2e/common/container_probe.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" @@ -81,7 +82,7 @@ var _ = framework.KubeDescribe("Probing container", func() { if err != nil { return false, err } - return v1.IsPodReady(p), nil + return podutil.IsPodReady(p), nil }, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready") p, err := podClient.Get(p.Name, metav1.GetOptions{}) @@ -369,7 +370,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, By("checking the pod's current state and verifying that restartCount is present") pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns)) - initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount + initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount) // Wait for the restart state to be as desired. @@ -379,7 +380,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) { pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name)) - restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount + restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount if restartCount != lastRestartCount { framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)", ns, pod.Name, restartCount, time.Since(start)) diff --git a/test/e2e/common/init_container.go b/test/e2e/common/init_container.go index cd5b9b68d1..f9bb152ab2 100644 --- a/test/e2e/common/init_container.go +++ b/test/e2e/common/init_container.go @@ -101,7 +101,7 @@ var _ = framework.KubeDescribe("InitContainer", func() { } Expect(endPod.Status.Phase).To(Equal(v1.PodSucceeded)) - _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized) + _, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized) Expect(init).NotTo(BeNil()) Expect(init.Status).To(Equal(v1.ConditionTrue)) @@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("InitContainer", func() { endPod := event.Object.(*v1.Pod) Expect(endPod.Status.Phase).To(Equal(v1.PodRunning)) - _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized) + _, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized) Expect(init).NotTo(BeNil()) Expect(init.Status).To(Equal(v1.ConditionTrue)) if err := podutil.SetInitContainersAndStatuses(endPod); err != nil { @@ -318,7 +318,7 @@ var _ = framework.KubeDescribe("InitContainer", func() { } Expect(endPod.Status.Phase).To(Equal(v1.PodPending)) - _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized) + _, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized) Expect(init).NotTo(BeNil()) Expect(init.Status).To(Equal(v1.ConditionFalse)) Expect(init.Reason).To(Equal("ContainersNotInitialized")) @@ -438,7 +438,7 @@ var _ = framework.KubeDescribe("InitContainer", func() { endPod := event.Object.(*v1.Pod) Expect(endPod.Status.Phase).To(Equal(v1.PodFailed)) - _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized) + _, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized) Expect(init).NotTo(BeNil()) Expect(init.Status).To(Equal(v1.ConditionFalse)) Expect(init.Reason).To(Equal("ContainersNotInitialized")) diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index 0abf9b658a..08b871c5ad 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/test/e2e/framework" @@ -103,7 +104,7 @@ func getRestartDelay(podClient *framework.PodClient, podName string, containerNa time.Sleep(time.Second) pod, err := podClient.Get(podName, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) - status, ok := v1.GetContainerStatus(pod.Status.ContainerStatuses, containerName) + status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName) if !ok { framework.Logf("getRestartDelay: status missing") continue diff --git a/test/e2e/daemon_set.go b/test/e2e/daemon_set.go index 1852475cef..17571738b0 100644 --- a/test/e2e/daemon_set.go +++ b/test/e2e/daemon_set.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -527,7 +528,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nod if pod.DeletionTimestamp != nil { continue } - if v1.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) { + if podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) { nodesToPodCount[pod.Spec.NodeName] += 1 } } @@ -613,7 +614,7 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *extensions.D allImagesUpdated = false framework.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage) } - if !v1.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) { + if !podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) { framework.Logf("Pod %s is not available", pod.Name) unavailablePods++ } diff --git a/test/e2e/etcd_failure.go b/test/e2e/etcd_failure.go index b71ab4fff3..efb8d34e39 100644 --- a/test/e2e/etcd_failure.go +++ b/test/e2e/etcd_failure.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" @@ -130,7 +130,7 @@ func checkExistingRCRecovers(f *framework.Framework) { pods, err := podClient.List(options) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { - if pod.DeletionTimestamp == nil && v1.IsPodReady(&pod) { + if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) { return true, nil } } diff --git a/test/e2e/examples.go b/test/e2e/examples.go index b86eda7c5e..05ea5746be 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" rbacv1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/framework" @@ -310,7 +311,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items)) } for _, p := range podList.Items { - isReady := v1.IsPodReady(&p) + isReady := podutil.IsPodReady(&p) if p.Status.Phase != v1.PodRunning || !isReady { framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, v1.PodRunning, p.Status.Phase, isReady) return false, nil @@ -414,7 +415,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) - stat := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName) + stat := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName) framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) if stat.RestartCount > 0 { framework.Logf("Saw %v restart, succeeded...", podName) diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index f9d032db3b..0424d7f16b 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -43,6 +43,8 @@ go_library( "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/api/v1/helper:go_default_library", + "//pkg/api/v1/node:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/api/v1/service:go_default_library", "//pkg/apis/apps/v1beta1:go_default_library", "//pkg/apis/authorization/v1beta1:go_default_library", diff --git a/test/e2e/framework/statefulset_utils.go b/test/e2e/framework/statefulset_utils.go index 120c16111d..91d7c79b69 100644 --- a/test/e2e/framework/statefulset_utils.go +++ b/test/e2e/framework/statefulset_utils.go @@ -35,6 +35,7 @@ import ( utilyaml "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/generated" @@ -213,7 +214,7 @@ func (s *StatefulSetTester) Scale(ss *apps.StatefulSet, count int32) error { if pollErr != nil { unhealthy := []string{} for _, statefulPod := range statefulPodList.Items { - delTs, phase, readiness := statefulPod.DeletionTimestamp, statefulPod.Status.Phase, v1.IsPodReady(&statefulPod) + delTs, phase, readiness := statefulPod.DeletionTimestamp, statefulPod.Status.Phase, podutil.IsPodReady(&statefulPod) if delTs != nil || phase != v1.PodRunning || !readiness { unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", statefulPod.Name, delTs, phase, readiness)) } @@ -290,7 +291,7 @@ func (s *StatefulSetTester) waitForRunning(numStatefulPods int32, ss *apps.State return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numStatefulPods, len(podList.Items)) } for _, p := range podList.Items { - isReady := v1.IsPodReady(&p) + isReady := podutil.IsPodReady(&p) desiredReadiness := shouldBeReady == isReady Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady) if p.Status.Phase != v1.PodRunning || !desiredReadiness { diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index c03b408ada..d3bcb4a33e 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -73,6 +73,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + nodeutil "k8s.io/kubernetes/pkg/api/v1/node" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" batchinternal "k8s.io/kubernetes/pkg/apis/batch" batch "k8s.io/kubernetes/pkg/apis/batch/v1" @@ -1185,7 +1187,7 @@ func initContainersInvariants(pod *v1.Pod) error { } } } - _, c := v1.GetPodCondition(&pod.Status, v1.PodInitialized) + _, c := podutil.GetPodCondition(&pod.Status, v1.PodInitialized) if c == nil { return fmt.Errorf("pod does not have initialized condition") } @@ -1304,7 +1306,7 @@ func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.C case v1.PodFailed, v1.PodSucceeded: return false, conditions.ErrPodCompleted case v1.PodRunning: - return v1.IsPodReady(pod), nil + return podutil.IsPodReady(pod), nil } return false, nil } @@ -3142,7 +3144,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error { for i := range podList.Items { pod := podList.Items[i] - if v1.IsPodReady(&pod) { + if podutil.IsPodReady(&pod) { readyPods++ } else { unready.Insert(pod.Name) @@ -3164,7 +3166,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error { return false, nil } pod := event.Object.(*v1.Pod) - if v1.IsPodReady(pod) && unready.Has(pod.Name) { + if podutil.IsPodReady(pod) && unready.Has(pod.Name) { unready.Delete(pod.Name) } return unready.Len() == 0, nil @@ -3509,7 +3511,7 @@ func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds in return false, nil } for _, pod := range pods.Items { - if !v1.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) { + if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) { return false, nil } } @@ -3598,7 +3600,7 @@ func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deploymen } for _, pod := range podList.Items { availability := "not available" - if v1.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) { + if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) { availability = "available" } Logf("Pod %s is %s:\n%+v", pod.Name, availability, pod) @@ -4209,7 +4211,7 @@ func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeCo // TODO: we should extend it for other reasons. func allowedNotReadyReasons(nodes []*v1.Node) bool { for _, node := range nodes { - index, condition := v1.GetNodeCondition(&node.Status, v1.NodeReady) + index, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) if index == -1 || !strings.Contains(condition.Message, "could not locate kubenet required CNI plugins") { return false @@ -5165,12 +5167,12 @@ func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, for _, pod := range pods.Items { if !masterNodes.Has(pod.Spec.NodeName) { if pod.Spec.NodeName != "" { - _, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled) + _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) Expect(scheduledCondition != nil).To(Equal(true)) Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue)) scheduledPods = append(scheduledPods, pod) } else { - _, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled) + _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) Expect(scheduledCondition != nil).To(Equal(true)) Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse)) if scheduledCondition.Reason == "Unschedulable" { diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index 663b4f456e..a1e825e5cd 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -20,6 +20,7 @@ go_library( deps = [ "//pkg/api/v1:go_default_library", "//pkg/api/v1/helper:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/util/system:go_default_library", "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index e51b62ec2d..eecc0cb9d4 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" @@ -863,7 +864,7 @@ func getPodsScheduled(pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Po for _, pod := range pods.Items { if !masterNodes.Has(pod.Spec.NodeName) { if pod.Spec.NodeName != "" { - _, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled) + _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) // We can't assume that the scheduledCondition is always set if Pod is assigned to Node, // as e.g. DaemonController doesn't set it when assigning Pod to a Node. Currently // Kubelet sets this condition when it gets a Pod without it, but if we were expecting @@ -873,7 +874,7 @@ func getPodsScheduled(pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Po } scheduledPods = append(scheduledPods, pod) } else { - _, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled) + _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) if scheduledCondition != nil { Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse)) } diff --git a/test/e2e/scheduling/rescheduler.go b/test/e2e/scheduling/rescheduler.go index 21e0853c6f..52fcaa515c 100644 --- a/test/e2e/scheduling/rescheduler.go +++ b/test/e2e/scheduling/rescheduler.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" @@ -106,7 +107,7 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error { } func podRunningOrUnschedulable(pod *v1.Pod) bool { - _, cond := v1.GetPodCondition(&pod.Status, v1.PodScheduled) + _, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) if cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" { return true } diff --git a/test/e2e/statefulset.go b/test/e2e/statefulset.go index 4cdfdacc7e..9f91bd7702 100644 --- a/test/e2e/statefulset.go +++ b/test/e2e/statefulset.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/controller" @@ -330,11 +331,11 @@ var _ = framework.KubeDescribe("StatefulSet", func() { return false, fmt.Errorf("Pod %v was deleted before enter running", pod.Name) } framework.Logf("Observed event %v for pod %v. Phase %v, Pod is ready %v", - event.Type, pod.Name, pod.Status.Phase, v1.IsPodReady(pod)) + event.Type, pod.Name, pod.Status.Phase, podutil.IsPodReady(pod)) if pod.Name != expectedPodName { return false, nil } - if pod.Status.Phase == v1.PodRunning && v1.IsPodReady(pod) { + if pod.Status.Phase == v1.PodRunning && podutil.IsPodReady(pod) { return true, nil } return false, nil diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index 1002bacdbe..36aa3117f5 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -25,6 +25,8 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/node:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/apis/componentconfig:go_default_library", "//pkg/apis/componentconfig/v1alpha1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", @@ -93,6 +95,7 @@ go_test( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/node:go_default_library", "//pkg/apis/componentconfig:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/kubelet:go_default_library", diff --git a/test/e2e_node/allocatable_eviction_test.go b/test/e2e_node/allocatable_eviction_test.go index 2cb0346820..1261243601 100644 --- a/test/e2e_node/allocatable_eviction_test.go +++ b/test/e2e_node/allocatable_eviction_test.go @@ -22,6 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" + nodeutil "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/test/e2e/framework" @@ -79,7 +80,7 @@ var _ = framework.KubeDescribe("AllocatableEviction [Slow] [Serial] [Disruptive] // Returns TRUE if the node has Memory Pressure, FALSE otherwise func hasMemoryPressure(f *framework.Framework, testCondition string) (bool, error) { localNodeStatus := getLocalNode(f).Status - _, pressure := v1.GetNodeCondition(&localNodeStatus, v1.NodeMemoryPressure) + _, pressure := nodeutil.GetNodeCondition(&localNodeStatus, v1.NodeMemoryPressure) Expect(pressure).NotTo(BeNil()) hasPressure := pressure.Status == v1.ConditionTrue By(fmt.Sprintf("checking if pod has %s: %v", testCondition, hasPressure)) diff --git a/test/e2e_node/container.go b/test/e2e_node/container.go index 6a79e34842..60c9438799 100644 --- a/test/e2e_node/container.go +++ b/test/e2e_node/container.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/test/e2e/framework" ) @@ -71,7 +72,7 @@ func (cc *ConformanceContainer) IsReady() (bool, error) { if err != nil { return false, err } - return v1.IsPodReady(pod), nil + return podutil.IsPodReady(pod), nil } func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) { diff --git a/test/e2e_node/e2e_node_suite_test.go b/test/e2e_node/e2e_node_suite_test.go index bf626cac92..758f705908 100644 --- a/test/e2e_node/e2e_node_suite_test.go +++ b/test/e2e_node/e2e_node_suite_test.go @@ -33,6 +33,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" + nodeutil "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" commontest "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" @@ -218,7 +219,7 @@ func waitForNodeReady() { if err != nil { return fmt.Errorf("failed to get node: %v", err) } - if !v1.IsNodeReady(node) { + if !nodeutil.IsNodeReady(node) { return fmt.Errorf("node is not ready: %+v", node) } return nil diff --git a/test/e2e_node/inode_eviction_test.go b/test/e2e_node/inode_eviction_test.go index 3528801d75..34ddeb602d 100644 --- a/test/e2e_node/inode_eviction_test.go +++ b/test/e2e_node/inode_eviction_test.go @@ -22,6 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" + nodeutil "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/test/e2e/framework" @@ -307,7 +308,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs // Returns TRUE if the node has disk pressure due to inodes exists on the node, FALSE otherwise func hasInodePressure(f *framework.Framework, testCondition string) (bool, error) { localNodeStatus := getLocalNode(f).Status - _, pressure := v1.GetNodeCondition(&localNodeStatus, v1.NodeDiskPressure) + _, pressure := nodeutil.GetNodeCondition(&localNodeStatus, v1.NodeDiskPressure) Expect(pressure).NotTo(BeNil()) hasPressure := pressure.Status == v1.ConditionTrue By(fmt.Sprintf("checking if pod has %s: %v", testCondition, hasPressure)) diff --git a/test/e2e_node/memory_eviction_test.go b/test/e2e_node/memory_eviction_test.go index 41c32c0eb1..87f3e8fe65 100644 --- a/test/e2e_node/memory_eviction_test.go +++ b/test/e2e_node/memory_eviction_test.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" + nodeutil "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/test/e2e/framework" @@ -71,7 +72,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu return fmt.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items) } node := nodeList.Items[0] - _, pressure := v1.GetNodeCondition(&node.Status, v1.NodeMemoryPressure) + _, pressure := nodeutil.GetNodeCondition(&node.Status, v1.NodeMemoryPressure) if pressure != nil && pressure.Status == v1.ConditionTrue { return fmt.Errorf("node is still reporting memory pressure condition: %s", pressure) } @@ -200,7 +201,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu glog.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items) } node := nodeList.Items[0] - _, pressure := v1.GetNodeCondition(&node.Status, v1.NodeMemoryPressure) + _, pressure := nodeutil.GetNodeCondition(&node.Status, v1.NodeMemoryPressure) glog.Infof("node pressure condition: %s", pressure) // NOTE/TODO(mtaufen): Also log (at least temporarily) the actual memory consumption on the node. diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index e6515aa9f5..6513649529 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/api/v1" + nodeutil "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1" "k8s.io/kubernetes/test/e2e/framework" @@ -435,7 +436,7 @@ func verifyNodeCondition(n coreclientset.NodeInterface, condition v1.NodeConditi if err != nil { return err } - _, c := v1.GetNodeCondition(&node.Status, condition) + _, c := nodeutil.GetNodeCondition(&node.Status, condition) if c == nil { return fmt.Errorf("node condition %q not found", condition) } diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index 170410ed84..adeb794ce1 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -33,6 +33,7 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1/ref" storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" @@ -259,7 +260,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { // putting a bind manually on a pv should only match the claim it is bound to rand.Seed(time.Now().Unix()) claim := claims[rand.Intn(maxClaims-1)] - claimRef, err := v1.GetReference(api.Scheme, claim) + claimRef, err := ref.GetReference(api.Scheme, claim) if err != nil { t.Fatalf("Unexpected error getting claimRef: %v", err) } @@ -766,7 +767,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { pv := createPV(pvName, "/tmp/foo"+strconv.Itoa(i), "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain) - claimRef, err := v1.GetReference(api.Scheme, newPVC) + claimRef, err := ref.GetReference(api.Scheme, newPVC) if err != nil { glog.V(3).Infof("unexpected error getting claim reference: %v", err) return diff --git a/test/utils/conditions.go b/test/utils/conditions.go index fc9bb1e1c9..5deebbc68e 100644 --- a/test/utils/conditions.go +++ b/test/utils/conditions.go @@ -111,7 +111,7 @@ func PodNotReady(p *v1.Pod) (bool, error) { } // podReady returns whether pod has a condition of Ready with a status of true. -// TODO: should be replaced with v1.IsPodReady +// TODO: should be replaced with podutil.IsPodReady func PodReady(pod *v1.Pod) bool { for _, cond := range pod.Status.Conditions { if cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue {