mirror of https://github.com/k3s-io/k3s
move pkg/api/v1/ref.go and pkg/api/v1/resource.go to subpackages. move some functions in resource.go to pkg/api/v1/node and pkg/api/v1/pod
parent
f96b187fcb
commit
4f9591b1de
|
@ -21,7 +21,7 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1/node:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
nodeutil "k8s.io/client-go/pkg/api/v1/node"
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
|
@ -51,7 +51,7 @@ func Validate(kubeconfigPath string) error {
|
|||
return false, nil
|
||||
}
|
||||
n := &nodeList.Items[0]
|
||||
if !v1.IsNodeReady(n) {
|
||||
if !nodeutil.IsNodeReady(n) {
|
||||
fmt.Println("[validate] First node has registered, but is not ready yet")
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -61,6 +61,7 @@ pkg/api/pod
|
|||
pkg/api/resource
|
||||
pkg/api/service
|
||||
pkg/api/v1
|
||||
pkg/api/v1/node
|
||||
pkg/api/v1/service
|
||||
pkg/apis/abac/v0
|
||||
pkg/apis/abac/v1beta1
|
||||
|
|
|
@ -10,7 +10,7 @@ load(
|
|||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["resource_helpers.go"],
|
||||
srcs = ["helpers.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
|
@ -33,7 +33,7 @@ filegroup(
|
|||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["resource_helpers_test.go"],
|
||||
srcs = ["helpers_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
|
|
|
@ -18,9 +18,9 @@ go_library(
|
|||
"generate.go",
|
||||
"generated.pb.go",
|
||||
"meta.go",
|
||||
"ref.go",
|
||||
"objectreference.go",
|
||||
"register.go",
|
||||
"resource_helpers.go",
|
||||
"resource.go",
|
||||
"taint.go",
|
||||
"toleration.go",
|
||||
"types.generated.go",
|
||||
|
@ -39,7 +39,6 @@ go_library(
|
|||
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/gogo/protobuf/sortkeys:go_default_library",
|
||||
"//vendor/github.com/ugorji/go/codec:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||
|
@ -79,17 +78,11 @@ go_test(
|
|||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"resource_helpers_test.go",
|
||||
"taint_test.go",
|
||||
"toleration_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
@ -105,7 +98,10 @@ filegroup(
|
|||
":package-srcs",
|
||||
"//pkg/api/v1/endpoints:all-srcs",
|
||||
"//pkg/api/v1/helper:all-srcs",
|
||||
"//pkg/api/v1/node:all-srcs",
|
||||
"//pkg/api/v1/pod:all-srcs",
|
||||
"//pkg/api/v1/ref:all-srcs",
|
||||
"//pkg/api/v1/resource:all-srcs",
|
||||
"//pkg/api/v1/service:all-srcs",
|
||||
"//pkg/api/v1/validation:all-srcs",
|
||||
],
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["util.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = ["//pkg/api/v1:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// TODO: merge with pkg/util/node
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
// GetNodeCondition extracts the provided condition from the given status and returns that.
|
||||
// Returns nil and -1 if the condition is not present, and the index of the located condition.
|
||||
func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) (int, *v1.NodeCondition) {
|
||||
if status == nil {
|
||||
return -1, nil
|
||||
}
|
||||
for i := range status.Conditions {
|
||||
if status.Conditions[i].Type == conditionType {
|
||||
return i, &status.Conditions[i]
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// IsNodeReady returns true if a node is ready; false otherwise.
|
||||
func IsNodeReady(node *v1.Node) bool {
|
||||
for _, c := range node.Status.Conditions {
|
||||
if c.Type == v1.NodeReady {
|
||||
return c.Status == v1.ConditionTrue
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that
|
||||
// intend only to get a reference to that object. This simplifies the event recording interface.
|
||||
func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
|
||||
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
|
||||
}
|
||||
|
||||
func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind {
|
||||
return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
|
||||
}
|
||||
|
||||
func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj }
|
|
@ -14,6 +14,7 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -25,6 +26,7 @@ go_test(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
|
|
|
@ -19,7 +19,9 @@ package pod
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
@ -188,3 +190,105 @@ func visitContainerSecretNames(container *v1.Container, visitor func(string) boo
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetContainerStatus extracts the status of container "name" from "statuses".
|
||||
// It also returns if "name" exists.
|
||||
func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) {
|
||||
for i := range statuses {
|
||||
if statuses[i].Name == name {
|
||||
return statuses[i], true
|
||||
}
|
||||
}
|
||||
return v1.ContainerStatus{}, false
|
||||
}
|
||||
|
||||
// GetExistingContainerStatus extracts the status of container "name" from "statuses",
|
||||
// and returns empty status if "name" does not exist.
|
||||
func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.ContainerStatus {
|
||||
for i := range statuses {
|
||||
if statuses[i].Name == name {
|
||||
return statuses[i]
|
||||
}
|
||||
}
|
||||
return v1.ContainerStatus{}
|
||||
}
|
||||
|
||||
// IsPodAvailable returns true if a pod is available; false otherwise.
|
||||
// Precondition for an available pod is that it must be ready. On top
|
||||
// of that, there are two cases when a pod can be considered available:
|
||||
// 1. minReadySeconds == 0, or
|
||||
// 2. LastTransitionTime (is set) + minReadySeconds < current time
|
||||
func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool {
|
||||
if !IsPodReady(pod) {
|
||||
return false
|
||||
}
|
||||
|
||||
c := GetPodReadyCondition(pod.Status)
|
||||
minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second
|
||||
if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodReady returns true if a pod is ready; false otherwise.
|
||||
func IsPodReady(pod *v1.Pod) bool {
|
||||
return IsPodReadyConditionTrue(pod.Status)
|
||||
}
|
||||
|
||||
// IsPodReady retruns true if a pod is ready; false otherwise.
|
||||
func IsPodReadyConditionTrue(status v1.PodStatus) bool {
|
||||
condition := GetPodReadyCondition(status)
|
||||
return condition != nil && condition.Status == v1.ConditionTrue
|
||||
}
|
||||
|
||||
// Extracts the pod ready condition from the given status and returns that.
|
||||
// Returns nil if the condition is not present.
|
||||
func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition {
|
||||
_, condition := GetPodCondition(&status, v1.PodReady)
|
||||
return condition
|
||||
}
|
||||
|
||||
// GetPodCondition extracts the provided condition from the given status and returns that.
|
||||
// Returns nil and -1 if the condition is not present, and the index of the located condition.
|
||||
func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
|
||||
if status == nil {
|
||||
return -1, nil
|
||||
}
|
||||
for i := range status.Conditions {
|
||||
if status.Conditions[i].Type == conditionType {
|
||||
return i, &status.Conditions[i]
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the
|
||||
// status has changed.
|
||||
// Returns true if pod condition has changed or has been added.
|
||||
func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool {
|
||||
condition.LastTransitionTime = metav1.Now()
|
||||
// Try to find this pod condition.
|
||||
conditionIndex, oldCondition := GetPodCondition(status, condition.Type)
|
||||
|
||||
if oldCondition == nil {
|
||||
// We are adding new pod condition.
|
||||
status.Conditions = append(status.Conditions, *condition)
|
||||
return true
|
||||
} else {
|
||||
// We are updating an existing condition, so we need to check if it has changed.
|
||||
if condition.Status == oldCondition.Status {
|
||||
condition.LastTransitionTime = oldCondition.LastTransitionTime
|
||||
}
|
||||
|
||||
isEqual := condition.Status == oldCondition.Status &&
|
||||
condition.Reason == oldCondition.Reason &&
|
||||
condition.Message == oldCondition.Message &&
|
||||
condition.LastProbeTime.Equal(oldCondition.LastProbeTime) &&
|
||||
condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime)
|
||||
|
||||
status.Conditions[conditionIndex] = *condition
|
||||
// Return true if one of the fields have changed.
|
||||
return !isEqual
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,9 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
|
@ -342,3 +344,58 @@ func collectSecretPaths(t *testing.T, path *field.Path, name string, tp reflect.
|
|||
|
||||
return secretPaths
|
||||
}
|
||||
|
||||
func newPod(now metav1.Time, ready bool, beforeSec int) *v1.Pod {
|
||||
conditionStatus := v1.ConditionFalse
|
||||
if ready {
|
||||
conditionStatus = v1.ConditionTrue
|
||||
}
|
||||
return &v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
LastTransitionTime: metav1.NewTime(now.Time.Add(-1 * time.Duration(beforeSec) * time.Second)),
|
||||
Status: conditionStatus,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPodAvailable(t *testing.T) {
|
||||
now := metav1.Now()
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
minReadySeconds int32
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
pod: newPod(now, false, 0),
|
||||
minReadySeconds: 0,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
pod: newPod(now, true, 0),
|
||||
minReadySeconds: 1,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
pod: newPod(now, true, 0),
|
||||
minReadySeconds: 0,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
pod: newPod(now, true, 51),
|
||||
minReadySeconds: 50,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
isAvailable := IsPodAvailable(test.pod, test.minReadySeconds, now)
|
||||
if isAvailable != test.expected {
|
||||
t.Errorf("[tc #%d] expected available pod: %t, got: %t", i, test.expected, isAvailable)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["ref.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
package ref
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
@ -22,10 +22,9 @@ import (
|
|||
"net/url"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -38,11 +37,11 @@ var (
|
|||
// object, or an error if the object doesn't follow the conventions
|
||||
// that would allow this.
|
||||
// TODO: should take a meta.Interface see http://issue.k8s.io/7127
|
||||
func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) {
|
||||
func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReference, error) {
|
||||
if obj == nil {
|
||||
return nil, ErrNilObject
|
||||
}
|
||||
if ref, ok := obj.(*ObjectReference); ok {
|
||||
if ref, ok := obj.(*v1.ObjectReference); ok {
|
||||
// Don't make a reference to a reference.
|
||||
return ref, nil
|
||||
}
|
||||
|
@ -94,14 +93,14 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference,
|
|||
|
||||
// only has list metadata
|
||||
if objectMeta == nil {
|
||||
return &ObjectReference{
|
||||
return &v1.ObjectReference{
|
||||
Kind: kind,
|
||||
APIVersion: version,
|
||||
ResourceVersion: listMeta.GetResourceVersion(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &ObjectReference{
|
||||
return &v1.ObjectReference{
|
||||
Kind: kind,
|
||||
APIVersion: version,
|
||||
Name: objectMeta.GetName(),
|
||||
|
@ -112,7 +111,7 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference,
|
|||
}
|
||||
|
||||
// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath.
|
||||
func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) {
|
||||
func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*v1.ObjectReference, error) {
|
||||
ref, err := GetReference(scheme, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -120,14 +119,3 @@ func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath s
|
|||
ref.FieldPath = fieldPath
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that
|
||||
// intend only to get a reference to that object. This simplifies the event recording interface.
|
||||
func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
|
||||
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
|
||||
}
|
||||
func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind {
|
||||
return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
|
||||
}
|
||||
|
||||
func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj }
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
// Returns string version of ResourceName.
|
||||
func (self ResourceName) String() string {
|
||||
return string(self)
|
||||
}
|
||||
|
||||
// Returns the CPU limit if specified.
|
||||
func (self *ResourceList) Cpu() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceCPU]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{Format: resource.DecimalSI}
|
||||
}
|
||||
|
||||
// Returns the Memory limit if specified.
|
||||
func (self *ResourceList) Memory() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceMemory]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{Format: resource.BinarySI}
|
||||
}
|
||||
|
||||
func (self *ResourceList) Pods() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourcePods]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{}
|
||||
}
|
||||
|
||||
func (self *ResourceList) NvidiaGPU() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceNvidiaGPU]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{}
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["helpers_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["helpers.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
|
@ -0,0 +1,200 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
|
||||
// containers of the pod.
|
||||
func PodRequestsAndLimits(pod *v1.Pod) (reqs map[v1.ResourceName]resource.Quantity, limits map[v1.ResourceName]resource.Quantity, err error) {
|
||||
reqs, limits = map[v1.ResourceName]resource.Quantity{}, map[v1.ResourceName]resource.Quantity{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for name, quantity := range container.Resources.Requests {
|
||||
if value, ok := reqs[name]; !ok {
|
||||
reqs[name] = *quantity.Copy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
reqs[name] = value
|
||||
}
|
||||
}
|
||||
for name, quantity := range container.Resources.Limits {
|
||||
if value, ok := limits[name]; !ok {
|
||||
limits[name] = *quantity.Copy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
limits[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
// init containers define the minimum of any resource
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
for name, quantity := range container.Resources.Requests {
|
||||
value, ok := reqs[name]
|
||||
if !ok {
|
||||
reqs[name] = *quantity.Copy()
|
||||
continue
|
||||
}
|
||||
if quantity.Cmp(value) > 0 {
|
||||
reqs[name] = *quantity.Copy()
|
||||
}
|
||||
}
|
||||
for name, quantity := range container.Resources.Limits {
|
||||
value, ok := limits[name]
|
||||
if !ok {
|
||||
limits[name] = *quantity.Copy()
|
||||
continue
|
||||
}
|
||||
if quantity.Cmp(value) > 0 {
|
||||
limits[name] = *quantity.Copy()
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// finds and returns the request for a specific resource.
|
||||
func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||
if resource == v1.ResourcePods {
|
||||
return 1
|
||||
}
|
||||
totalResources := int64(0)
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if rQuantity, ok := container.Resources.Requests[resource]; ok {
|
||||
if resource == v1.ResourceCPU {
|
||||
totalResources += rQuantity.MilliValue()
|
||||
} else {
|
||||
totalResources += rQuantity.Value()
|
||||
}
|
||||
}
|
||||
}
|
||||
// take max_resource(sum_pod, any_init_container)
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if rQuantity, ok := container.Resources.Requests[resource]; ok {
|
||||
if resource == v1.ResourceCPU && rQuantity.MilliValue() > totalResources {
|
||||
totalResources = rQuantity.MilliValue()
|
||||
} else if rQuantity.Value() > totalResources {
|
||||
totalResources = rQuantity.Value()
|
||||
}
|
||||
}
|
||||
}
|
||||
return totalResources
|
||||
}
|
||||
|
||||
// ExtractResourceValueByContainerName extracts the value of a resource
|
||||
// by providing container name
|
||||
func ExtractResourceValueByContainerName(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string) (string, error) {
|
||||
container, err := findContainerInPod(pod, containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ExtractContainerResourceValue(fs, container)
|
||||
}
|
||||
|
||||
// ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource
|
||||
// by providing container name and node allocatable
|
||||
func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string, nodeAllocatable v1.ResourceList) (string, error) {
|
||||
realContainer, err := findContainerInPod(pod, containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
containerCopy, err := api.Scheme.DeepCopy(realContainer)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to perform a deep copy of container object: %v", err)
|
||||
}
|
||||
|
||||
container, ok := containerCopy.(*v1.Container)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected type returned from deep copy of container object")
|
||||
}
|
||||
|
||||
MergeContainerResourceLimits(container, nodeAllocatable)
|
||||
|
||||
return ExtractContainerResourceValue(fs, container)
|
||||
}
|
||||
|
||||
// ExtractContainerResourceValue extracts the value of a resource
|
||||
// in an already known container
|
||||
func ExtractContainerResourceValue(fs *v1.ResourceFieldSelector, container *v1.Container) (string, error) {
|
||||
divisor := resource.Quantity{}
|
||||
if divisor.Cmp(fs.Divisor) == 0 {
|
||||
divisor = resource.MustParse("1")
|
||||
} else {
|
||||
divisor = fs.Divisor
|
||||
}
|
||||
|
||||
switch fs.Resource {
|
||||
case "limits.cpu":
|
||||
return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor)
|
||||
case "limits.memory":
|
||||
return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor)
|
||||
case "requests.cpu":
|
||||
return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor)
|
||||
case "requests.memory":
|
||||
return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource)
|
||||
}
|
||||
|
||||
// convertResourceCPUToString converts cpu value to the format of divisor and returns
|
||||
// ceiling of the value.
|
||||
func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) {
|
||||
c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue())))
|
||||
return strconv.FormatInt(c, 10), nil
|
||||
}
|
||||
|
||||
// convertResourceMemoryToString converts memory value to the format of divisor and returns
|
||||
// ceiling of the value.
|
||||
func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) {
|
||||
m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value())))
|
||||
return strconv.FormatInt(m, 10), nil
|
||||
}
|
||||
|
||||
// findContainerInPod finds a container by its name in the provided pod
|
||||
func findContainerInPod(pod *v1.Pod, containerName string) (*v1.Container, error) {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if container.Name == containerName {
|
||||
return &container, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("container %s not found", containerName)
|
||||
}
|
||||
|
||||
// MergeContainerResourceLimits checks if a limit is applied for
|
||||
// the container, and if not, it sets the limit to the passed resource list.
|
||||
func MergeContainerResourceLimits(container *v1.Container,
|
||||
allocatable v1.ResourceList) {
|
||||
if container.Resources.Limits == nil {
|
||||
container.Resources.Limits = make(v1.ResourceList)
|
||||
}
|
||||
for _, resource := range []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory} {
|
||||
if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() {
|
||||
if cap, exists := allocatable[resource]; exists {
|
||||
container.Resources.Limits[resource] = *cap.Copy()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -14,23 +14,22 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
package resource
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
func TestResourceHelpers(t *testing.T) {
|
||||
cpuLimit := resource.MustParse("10")
|
||||
memoryLimit := resource.MustParse("10G")
|
||||
resourceSpec := ResourceRequirements{
|
||||
Limits: ResourceList{
|
||||
resourceSpec := v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
"cpu": cpuLimit,
|
||||
"memory": memoryLimit,
|
||||
"kube.io/storage": memoryLimit,
|
||||
|
@ -42,8 +41,8 @@ func TestResourceHelpers(t *testing.T) {
|
|||
if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 {
|
||||
t.Errorf("expected memorylimit %v, got %v", memoryLimit, res)
|
||||
}
|
||||
resourceSpec = ResourceRequirements{
|
||||
Limits: ResourceList{
|
||||
resourceSpec = v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
"memory": memoryLimit,
|
||||
"kube.io/storage": memoryLimit,
|
||||
},
|
||||
|
@ -57,7 +56,7 @@ func TestResourceHelpers(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDefaultResourceHelpers(t *testing.T) {
|
||||
resourceList := ResourceList{}
|
||||
resourceList := v1.ResourceList{}
|
||||
if resourceList.Cpu().Format != resource.DecimalSI {
|
||||
t.Errorf("expected %v, actual %v", resource.DecimalSI, resourceList.Cpu().Format)
|
||||
}
|
||||
|
@ -66,71 +65,16 @@ func TestDefaultResourceHelpers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func newPod(now metav1.Time, ready bool, beforeSec int) *Pod {
|
||||
conditionStatus := ConditionFalse
|
||||
if ready {
|
||||
conditionStatus = ConditionTrue
|
||||
}
|
||||
return &Pod{
|
||||
Status: PodStatus{
|
||||
Conditions: []PodCondition{
|
||||
{
|
||||
Type: PodReady,
|
||||
LastTransitionTime: metav1.NewTime(now.Time.Add(-1 * time.Duration(beforeSec) * time.Second)),
|
||||
Status: conditionStatus,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPodAvailable(t *testing.T) {
|
||||
now := metav1.Now()
|
||||
tests := []struct {
|
||||
pod *Pod
|
||||
minReadySeconds int32
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
pod: newPod(now, false, 0),
|
||||
minReadySeconds: 0,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
pod: newPod(now, true, 0),
|
||||
minReadySeconds: 1,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
pod: newPod(now, true, 0),
|
||||
minReadySeconds: 0,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
pod: newPod(now, true, 51),
|
||||
minReadySeconds: 50,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
isAvailable := IsPodAvailable(test.pod, test.minReadySeconds, now)
|
||||
if isAvailable != test.expected {
|
||||
t.Errorf("[tc #%d] expected available pod: %t, got: %t", i, test.expected, isAvailable)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractResourceValue(t *testing.T) {
|
||||
cases := []struct {
|
||||
fs *ResourceFieldSelector
|
||||
pod *Pod
|
||||
fs *v1.ResourceFieldSelector
|
||||
pod *v1.Pod
|
||||
cName string
|
||||
expectedValue string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
fs: &ResourceFieldSelector{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.cpu",
|
||||
},
|
||||
cName: "foo",
|
||||
|
@ -138,7 +82,7 @@ func TestExtractResourceValue(t *testing.T) {
|
|||
expectedValue: "9",
|
||||
},
|
||||
{
|
||||
fs: &ResourceFieldSelector{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
cName: "foo",
|
||||
|
@ -146,7 +90,7 @@ func TestExtractResourceValue(t *testing.T) {
|
|||
expectedValue: "0",
|
||||
},
|
||||
{
|
||||
fs: &ResourceFieldSelector{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
cName: "foo",
|
||||
|
@ -154,7 +98,7 @@ func TestExtractResourceValue(t *testing.T) {
|
|||
expectedValue: "8",
|
||||
},
|
||||
{
|
||||
fs: &ResourceFieldSelector{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
cName: "foo",
|
||||
|
@ -162,7 +106,7 @@ func TestExtractResourceValue(t *testing.T) {
|
|||
expectedValue: "1",
|
||||
},
|
||||
{
|
||||
fs: &ResourceFieldSelector{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
Divisor: resource.MustParse("100m"),
|
||||
},
|
||||
|
@ -171,7 +115,7 @@ func TestExtractResourceValue(t *testing.T) {
|
|||
expectedValue: "12",
|
||||
},
|
||||
{
|
||||
fs: &ResourceFieldSelector{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.memory",
|
||||
},
|
||||
cName: "foo",
|
||||
|
@ -179,7 +123,7 @@ func TestExtractResourceValue(t *testing.T) {
|
|||
expectedValue: "104857600",
|
||||
},
|
||||
{
|
||||
fs: &ResourceFieldSelector{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.memory",
|
||||
Divisor: resource.MustParse("1Mi"),
|
||||
},
|
||||
|
@ -188,7 +132,7 @@ func TestExtractResourceValue(t *testing.T) {
|
|||
expectedValue: "100",
|
||||
},
|
||||
{
|
||||
fs: &ResourceFieldSelector{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
cName: "foo",
|
||||
|
@ -208,26 +152,26 @@ func TestExtractResourceValue(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func getPod(cname, cpuRequest, cpuLimit, memoryRequest, memoryLimit string) *Pod {
|
||||
resources := ResourceRequirements{
|
||||
Limits: make(ResourceList),
|
||||
Requests: make(ResourceList),
|
||||
func getPod(cname, cpuRequest, cpuLimit, memoryRequest, memoryLimit string) *v1.Pod {
|
||||
resources := v1.ResourceRequirements{
|
||||
Limits: make(v1.ResourceList),
|
||||
Requests: make(v1.ResourceList),
|
||||
}
|
||||
if cpuLimit != "" {
|
||||
resources.Limits[ResourceCPU] = resource.MustParse(cpuLimit)
|
||||
resources.Limits[v1.ResourceCPU] = resource.MustParse(cpuLimit)
|
||||
}
|
||||
if memoryLimit != "" {
|
||||
resources.Limits[ResourceMemory] = resource.MustParse(memoryLimit)
|
||||
resources.Limits[v1.ResourceMemory] = resource.MustParse(memoryLimit)
|
||||
}
|
||||
if cpuRequest != "" {
|
||||
resources.Requests[ResourceCPU] = resource.MustParse(cpuRequest)
|
||||
resources.Requests[v1.ResourceCPU] = resource.MustParse(cpuRequest)
|
||||
}
|
||||
if memoryRequest != "" {
|
||||
resources.Requests[ResourceMemory] = resource.MustParse(memoryRequest)
|
||||
resources.Requests[v1.ResourceMemory] = resource.MustParse(memoryRequest)
|
||||
}
|
||||
return &Pod{
|
||||
Spec: PodSpec{
|
||||
Containers: []Container{
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: cname,
|
||||
Resources: resources,
|
|
@ -1,358 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
// Returns string version of ResourceName.
|
||||
func (self ResourceName) String() string {
|
||||
return string(self)
|
||||
}
|
||||
|
||||
// Returns the CPU limit if specified.
|
||||
func (self *ResourceList) Cpu() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceCPU]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{Format: resource.DecimalSI}
|
||||
}
|
||||
|
||||
// Returns the Memory limit if specified.
|
||||
func (self *ResourceList) Memory() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceMemory]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{Format: resource.BinarySI}
|
||||
}
|
||||
|
||||
func (self *ResourceList) Pods() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourcePods]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{}
|
||||
}
|
||||
|
||||
func (self *ResourceList) NvidiaGPU() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceNvidiaGPU]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{}
|
||||
}
|
||||
|
||||
func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) {
|
||||
for i := range statuses {
|
||||
if statuses[i].Name == name {
|
||||
return statuses[i], true
|
||||
}
|
||||
}
|
||||
return ContainerStatus{}, false
|
||||
}
|
||||
|
||||
func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus {
|
||||
for i := range statuses {
|
||||
if statuses[i].Name == name {
|
||||
return statuses[i]
|
||||
}
|
||||
}
|
||||
return ContainerStatus{}
|
||||
}
|
||||
|
||||
// IsPodAvailable returns true if a pod is available; false otherwise.
|
||||
// Precondition for an available pod is that it must be ready. On top
|
||||
// of that, there are two cases when a pod can be considered available:
|
||||
// 1. minReadySeconds == 0, or
|
||||
// 2. LastTransitionTime (is set) + minReadySeconds < current time
|
||||
func IsPodAvailable(pod *Pod, minReadySeconds int32, now metav1.Time) bool {
|
||||
if !IsPodReady(pod) {
|
||||
return false
|
||||
}
|
||||
|
||||
c := GetPodReadyCondition(pod.Status)
|
||||
minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second
|
||||
if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodReady returns true if a pod is ready; false otherwise.
|
||||
func IsPodReady(pod *Pod) bool {
|
||||
return IsPodReadyConditionTrue(pod.Status)
|
||||
}
|
||||
|
||||
// IsPodReady retruns true if a pod is ready; false otherwise.
|
||||
func IsPodReadyConditionTrue(status PodStatus) bool {
|
||||
condition := GetPodReadyCondition(status)
|
||||
return condition != nil && condition.Status == ConditionTrue
|
||||
}
|
||||
|
||||
// Extracts the pod ready condition from the given status and returns that.
|
||||
// Returns nil if the condition is not present.
|
||||
func GetPodReadyCondition(status PodStatus) *PodCondition {
|
||||
_, condition := GetPodCondition(&status, PodReady)
|
||||
return condition
|
||||
}
|
||||
|
||||
// GetPodCondition extracts the provided condition from the given status and returns that.
|
||||
// Returns nil and -1 if the condition is not present, and the index of the located condition.
|
||||
func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) {
|
||||
if status == nil {
|
||||
return -1, nil
|
||||
}
|
||||
for i := range status.Conditions {
|
||||
if status.Conditions[i].Type == conditionType {
|
||||
return i, &status.Conditions[i]
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// GetNodeCondition extracts the provided condition from the given status and returns that.
|
||||
// Returns nil and -1 if the condition is not present, and the index of the located condition.
|
||||
func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) {
|
||||
if status == nil {
|
||||
return -1, nil
|
||||
}
|
||||
for i := range status.Conditions {
|
||||
if status.Conditions[i].Type == conditionType {
|
||||
return i, &status.Conditions[i]
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the
|
||||
// status has changed.
|
||||
// Returns true if pod condition has changed or has been added.
|
||||
func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool {
|
||||
condition.LastTransitionTime = metav1.Now()
|
||||
// Try to find this pod condition.
|
||||
conditionIndex, oldCondition := GetPodCondition(status, condition.Type)
|
||||
|
||||
if oldCondition == nil {
|
||||
// We are adding new pod condition.
|
||||
status.Conditions = append(status.Conditions, *condition)
|
||||
return true
|
||||
} else {
|
||||
// We are updating an existing condition, so we need to check if it has changed.
|
||||
if condition.Status == oldCondition.Status {
|
||||
condition.LastTransitionTime = oldCondition.LastTransitionTime
|
||||
}
|
||||
|
||||
isEqual := condition.Status == oldCondition.Status &&
|
||||
condition.Reason == oldCondition.Reason &&
|
||||
condition.Message == oldCondition.Message &&
|
||||
condition.LastProbeTime.Equal(oldCondition.LastProbeTime) &&
|
||||
condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime)
|
||||
|
||||
status.Conditions[conditionIndex] = *condition
|
||||
// Return true if one of the fields have changed.
|
||||
return !isEqual
|
||||
}
|
||||
}
|
||||
|
||||
// IsNodeReady returns true if a node is ready; false otherwise.
|
||||
func IsNodeReady(node *Node) bool {
|
||||
for _, c := range node.Status.Conditions {
|
||||
if c.Type == NodeReady {
|
||||
return c.Status == ConditionTrue
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
|
||||
// containers of the pod.
|
||||
func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) {
|
||||
reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for name, quantity := range container.Resources.Requests {
|
||||
if value, ok := reqs[name]; !ok {
|
||||
reqs[name] = *quantity.Copy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
reqs[name] = value
|
||||
}
|
||||
}
|
||||
for name, quantity := range container.Resources.Limits {
|
||||
if value, ok := limits[name]; !ok {
|
||||
limits[name] = *quantity.Copy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
limits[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
// init containers define the minimum of any resource
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
for name, quantity := range container.Resources.Requests {
|
||||
value, ok := reqs[name]
|
||||
if !ok {
|
||||
reqs[name] = *quantity.Copy()
|
||||
continue
|
||||
}
|
||||
if quantity.Cmp(value) > 0 {
|
||||
reqs[name] = *quantity.Copy()
|
||||
}
|
||||
}
|
||||
for name, quantity := range container.Resources.Limits {
|
||||
value, ok := limits[name]
|
||||
if !ok {
|
||||
limits[name] = *quantity.Copy()
|
||||
continue
|
||||
}
|
||||
if quantity.Cmp(value) > 0 {
|
||||
limits[name] = *quantity.Copy()
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// finds and returns the request for a specific resource.
|
||||
func GetResourceRequest(pod *Pod, resource ResourceName) int64 {
|
||||
if resource == ResourcePods {
|
||||
return 1
|
||||
}
|
||||
totalResources := int64(0)
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if rQuantity, ok := container.Resources.Requests[resource]; ok {
|
||||
if resource == ResourceCPU {
|
||||
totalResources += rQuantity.MilliValue()
|
||||
} else {
|
||||
totalResources += rQuantity.Value()
|
||||
}
|
||||
}
|
||||
}
|
||||
// take max_resource(sum_pod, any_init_container)
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if rQuantity, ok := container.Resources.Requests[resource]; ok {
|
||||
if resource == ResourceCPU && rQuantity.MilliValue() > totalResources {
|
||||
totalResources = rQuantity.MilliValue()
|
||||
} else if rQuantity.Value() > totalResources {
|
||||
totalResources = rQuantity.Value()
|
||||
}
|
||||
}
|
||||
}
|
||||
return totalResources
|
||||
}
|
||||
|
||||
// ExtractResourceValueByContainerName extracts the value of a resource
|
||||
// by providing container name
|
||||
func ExtractResourceValueByContainerName(fs *ResourceFieldSelector, pod *Pod, containerName string) (string, error) {
|
||||
container, err := findContainerInPod(pod, containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ExtractContainerResourceValue(fs, container)
|
||||
}
|
||||
|
||||
// ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource
|
||||
// by providing container name and node allocatable
|
||||
func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *ResourceFieldSelector, pod *Pod, containerName string, nodeAllocatable ResourceList) (string, error) {
|
||||
realContainer, err := findContainerInPod(pod, containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
containerCopy, err := api.Scheme.DeepCopy(realContainer)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to perform a deep copy of container object: %v", err)
|
||||
}
|
||||
|
||||
container, ok := containerCopy.(*Container)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected type returned from deep copy of container object")
|
||||
}
|
||||
|
||||
MergeContainerResourceLimits(container, nodeAllocatable)
|
||||
|
||||
return ExtractContainerResourceValue(fs, container)
|
||||
}
|
||||
|
||||
// ExtractContainerResourceValue extracts the value of a resource
|
||||
// in an already known container
|
||||
func ExtractContainerResourceValue(fs *ResourceFieldSelector, container *Container) (string, error) {
|
||||
divisor := resource.Quantity{}
|
||||
if divisor.Cmp(fs.Divisor) == 0 {
|
||||
divisor = resource.MustParse("1")
|
||||
} else {
|
||||
divisor = fs.Divisor
|
||||
}
|
||||
|
||||
switch fs.Resource {
|
||||
case "limits.cpu":
|
||||
return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor)
|
||||
case "limits.memory":
|
||||
return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor)
|
||||
case "requests.cpu":
|
||||
return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor)
|
||||
case "requests.memory":
|
||||
return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource)
|
||||
}
|
||||
|
||||
// convertResourceCPUToString converts cpu value to the format of divisor and returns
|
||||
// ceiling of the value.
|
||||
func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) {
|
||||
c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue())))
|
||||
return strconv.FormatInt(c, 10), nil
|
||||
}
|
||||
|
||||
// convertResourceMemoryToString converts memory value to the format of divisor and returns
|
||||
// ceiling of the value.
|
||||
func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) {
|
||||
m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value())))
|
||||
return strconv.FormatInt(m, 10), nil
|
||||
}
|
||||
|
||||
// findContainerInPod finds a container by its name in the provided pod
|
||||
func findContainerInPod(pod *Pod, containerName string) (*Container, error) {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if container.Name == containerName {
|
||||
return &container, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("container %s not found", containerName)
|
||||
}
|
||||
|
||||
// MergeContainerResourceLimits checks if a limit is applied for
|
||||
// the container, and if not, it sets the limit to the passed resource list.
|
||||
func MergeContainerResourceLimits(container *Container,
|
||||
allocatable ResourceList) {
|
||||
if container.Resources.Limits == nil {
|
||||
container.Resources.Limits = make(ResourceList)
|
||||
}
|
||||
for _, resource := range []ResourceName{ResourceCPU, ResourceMemory} {
|
||||
if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() {
|
||||
if cap, exists := allocatable[resource]; exists {
|
||||
container.Resources.Limits[resource] = *cap.Copy()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -39,6 +39,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/ref:go_default_library",
|
||||
"//pkg/apis/policy/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/scheme:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/ref"
|
||||
)
|
||||
|
||||
// The EventExpansion interface allows manually adding extra methods to the EventInterface.
|
||||
|
@ -99,7 +100,7 @@ func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte)
|
|||
// object must match this event's client namespace unless the event client
|
||||
// was made with the "" namespace.
|
||||
func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) {
|
||||
ref, err := v1.GetReference(scheme, objOrRef)
|
||||
ref, err := ref.GetReference(scheme, objOrRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
// ErrPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
|
||||
|
@ -83,7 +84,7 @@ func PodRunningAndReady(event watch.Event) (bool, error) {
|
|||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, ErrPodCompleted
|
||||
case v1.PodRunning:
|
||||
return v1.IsPodReady(t), nil
|
||||
return podutil.IsPodReady(t), nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
|
|
|
@ -22,6 +22,8 @@ go_library(
|
|||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/helper:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/api/v1/ref:go_default_library",
|
||||
"//pkg/api/validation:go_default_library",
|
||||
"//pkg/apis/authentication/v1:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
|
|
|
@ -15,6 +15,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
@ -102,7 +103,7 @@ func (cnc *CloudNodeController) Run() {
|
|||
// Try to get the current node status
|
||||
// If node status is empty, then kubelet has not posted ready status yet. In this case, process next node
|
||||
for rep := 0; rep < nodeStatusUpdateRetry; rep++ {
|
||||
_, currentReadyCondition = v1.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
_, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
if currentReadyCondition != nil {
|
||||
break
|
||||
}
|
||||
|
|
|
@ -42,6 +42,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/api/v1/ref"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
@ -441,7 +443,7 @@ func getPodsAnnotationSet(template *v1.PodTemplateSpec, object runtime.Object) (
|
|||
for k, v := range template.Annotations {
|
||||
desiredAnnotations[k] = v
|
||||
}
|
||||
createdByRef, err := v1.GetReference(api.Scheme, object)
|
||||
createdByRef, err := ref.GetReference(api.Scheme, object)
|
||||
if err != nil {
|
||||
return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err)
|
||||
}
|
||||
|
@ -674,13 +676,13 @@ func (s ByLogging) Less(i, j int) bool {
|
|||
return m[s[i].Status.Phase] < m[s[j].Status.Phase]
|
||||
}
|
||||
// 3. ready < not ready
|
||||
if v1.IsPodReady(s[i]) != v1.IsPodReady(s[j]) {
|
||||
return v1.IsPodReady(s[i])
|
||||
if podutil.IsPodReady(s[i]) != podutil.IsPodReady(s[j]) {
|
||||
return podutil.IsPodReady(s[i])
|
||||
}
|
||||
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
|
||||
// see https://github.com/kubernetes/kubernetes/issues/22065
|
||||
// 4. Been ready for more time < less time < empty time
|
||||
if v1.IsPodReady(s[i]) && v1.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
|
||||
if podutil.IsPodReady(s[i]) && podutil.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
|
||||
return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i]))
|
||||
}
|
||||
// 5. Pods with containers with higher restart counts < lower restart counts
|
||||
|
@ -713,14 +715,14 @@ func (s ActivePods) Less(i, j int) bool {
|
|||
}
|
||||
// 3. Not ready < ready
|
||||
// If only one of the pods is not ready, the not ready one is smaller
|
||||
if v1.IsPodReady(s[i]) != v1.IsPodReady(s[j]) {
|
||||
return !v1.IsPodReady(s[i])
|
||||
if podutil.IsPodReady(s[i]) != podutil.IsPodReady(s[j]) {
|
||||
return !podutil.IsPodReady(s[i])
|
||||
}
|
||||
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
|
||||
// see https://github.com/kubernetes/kubernetes/issues/22065
|
||||
// 4. Been ready for empty time < less time < more time
|
||||
// If both pods are ready, the latest ready one is smaller
|
||||
if v1.IsPodReady(s[i]) && v1.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
|
||||
if podutil.IsPodReady(s[i]) && podutil.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
|
||||
return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
|
||||
}
|
||||
// 5. Pods with containers with higher restart counts < lower restart counts
|
||||
|
@ -744,7 +746,7 @@ func afterOrZero(t1, t2 metav1.Time) bool {
|
|||
}
|
||||
|
||||
func podReadyTime(pod *v1.Pod) metav1.Time {
|
||||
if v1.IsPodReady(pod) {
|
||||
if podutil.IsPodReady(pod) {
|
||||
for _, c := range pod.Status.Conditions {
|
||||
// we only care about pod ready conditions
|
||||
if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
|
||||
|
|
|
@ -20,6 +20,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/ref:go_default_library",
|
||||
"//pkg/apis/batch/v1:go_default_library",
|
||||
"//pkg/apis/batch/v2alpha1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
|
|
|
@ -47,6 +47,7 @@ import (
|
|||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/ref"
|
||||
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
@ -393,5 +394,5 @@ func deleteJob(sj *batchv2alpha1.CronJob, job *batchv1.Job, jc jobControlInterfa
|
|||
}
|
||||
|
||||
func getRef(object runtime.Object) (*v1.ObjectReference, error) {
|
||||
return v1.GetReference(api.Scheme, object)
|
||||
return ref.GetReference(api.Scheme, object)
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/ref"
|
||||
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
)
|
||||
|
@ -216,7 +217,7 @@ func getTimeHash(scheduledTime time.Time) int64 {
|
|||
|
||||
// makeCreatedByRefJson makes a json string with an object reference for use in "created-by" annotation value
|
||||
func makeCreatedByRefJson(object runtime.Object) (string, error) {
|
||||
createdByRef, err := v1.GetReference(api.Scheme, object)
|
||||
createdByRef, err := ref.GetReference(api.Scheme, object)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to get controller reference: %v", err)
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ go_library(
|
|||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/helper:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
||||
|
@ -64,6 +65,7 @@ go_test(
|
|||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions:go_default_library",
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
|
||||
|
@ -323,7 +324,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
|||
// Two different versions of the same pod will always have different RVs.
|
||||
return
|
||||
}
|
||||
changedToReady := !v1.IsPodReady(oldPod) && v1.IsPodReady(curPod)
|
||||
changedToReady := !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod)
|
||||
labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
|
||||
|
||||
curControllerRef := controller.GetControllerOf(curPod)
|
||||
|
@ -732,9 +733,9 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
|||
daemonPods, _ := nodeToDaemonPods[node.Name]
|
||||
sort.Sort(podByCreationTimestamp(daemonPods))
|
||||
pod := daemonPods[0]
|
||||
if v1.IsPodReady(pod) {
|
||||
if podutil.IsPodReady(pod) {
|
||||
numberReady++
|
||||
if v1.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
if podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
numberAvailable++
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
|
||||
|
@ -333,7 +334,7 @@ func markPodsReady(store cache.Store) {
|
|||
for _, obj := range store.List() {
|
||||
pod := obj.(*v1.Pod)
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||
v1.UpdatePodCondition(&pod.Status, &condition)
|
||||
podutil.UpdatePodCondition(&pod.Status, &condition)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
)
|
||||
|
@ -112,7 +113,7 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet,
|
|||
}
|
||||
available := false
|
||||
for _, pod := range daemonPods {
|
||||
if v1.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
if podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
available = true
|
||||
break
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
|
@ -52,7 +53,7 @@ func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*
|
|||
unavailablePods := []*v1.Pod{}
|
||||
availablePods := []*v1.Pod{}
|
||||
for _, pod := range pods {
|
||||
if v1.IsPodAvailable(pod, minReadySeconds, metav1.Now()) {
|
||||
if podutil.IsPodAvailable(pod, minReadySeconds, metav1.Now()) {
|
||||
availablePods = append(availablePods, pod)
|
||||
} else {
|
||||
unavailablePods = append(unavailablePods, pod)
|
||||
|
|
|
@ -15,6 +15,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/policy/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/policy/v1beta1:go_default_library",
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
policyclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1"
|
||||
|
@ -589,7 +590,7 @@ Pod:
|
|||
if disruptionTime, found := disruptedPods[pod.Name]; found && disruptionTime.Time.Add(DeletionTimeout).After(currentTime) {
|
||||
continue
|
||||
}
|
||||
if v1.IsPodReady(pod) {
|
||||
if podutil.IsPodReady(pod) {
|
||||
currentHealthy++
|
||||
continue Pod
|
||||
}
|
||||
|
|
|
@ -380,7 +380,7 @@ func (e *EndpointController) syncService(key string) error {
|
|||
epa.Hostname = hostname
|
||||
}
|
||||
|
||||
if tolerateUnreadyEndpoints || v1.IsPodReady(pod) {
|
||||
if tolerateUnreadyEndpoints || podutil.IsPodReady(pod) {
|
||||
subsets = append(subsets, v1.EndpointSubset{
|
||||
Addresses: []v1.EndpointAddress{epa},
|
||||
Ports: []v1.EndpointPort{epp},
|
||||
|
|
|
@ -29,6 +29,7 @@ go_library(
|
|||
"//pkg/api/helper:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/helper:go_default_library",
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions/extensions/v1beta1:go_default_library",
|
||||
|
|
|
@ -41,6 +41,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1"
|
||||
extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1"
|
||||
|
@ -413,7 +414,7 @@ func (nc *NodeController) doTaintingPass() {
|
|||
zone := utilnode.GetZoneKey(node)
|
||||
EvictionsNumber.WithLabelValues(zone).Inc()
|
||||
}
|
||||
_, condition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
_, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
// Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive.
|
||||
taintToAdd := v1.Taint{}
|
||||
oppositeTaint := v1.Taint{}
|
||||
|
@ -874,7 +875,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.
|
|||
var err error
|
||||
var gracePeriod time.Duration
|
||||
var observedReadyCondition v1.NodeCondition
|
||||
_, currentReadyCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
_, currentReadyCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
if currentReadyCondition == nil {
|
||||
// If ready condition is nil, then kubelet (or nodecontroller) never posted node status.
|
||||
// A fake ready condition is created, where LastProbeTime and LastTransitionTime is set
|
||||
|
@ -914,9 +915,9 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.
|
|||
// if that's the case, but it does not seem necessary.
|
||||
var savedCondition *v1.NodeCondition
|
||||
if found {
|
||||
_, savedCondition = v1.GetNodeCondition(&savedNodeStatus.status, v1.NodeReady)
|
||||
_, savedCondition = nodeutil.GetNodeCondition(&savedNodeStatus.status, v1.NodeReady)
|
||||
}
|
||||
_, observedCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
_, observedCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
if !found {
|
||||
glog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name)
|
||||
savedNodeStatus = nodeStatusData{
|
||||
|
@ -993,7 +994,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.
|
|||
remainingNodeConditionTypes := []v1.NodeConditionType{v1.NodeOutOfDisk, v1.NodeMemoryPressure, v1.NodeDiskPressure}
|
||||
nowTimestamp := nc.now()
|
||||
for _, nodeConditionType := range remainingNodeConditionTypes {
|
||||
_, currentCondition := v1.GetNodeCondition(&node.Status, nodeConditionType)
|
||||
_, currentCondition := nodeutil.GetNodeCondition(&node.Status, nodeConditionType)
|
||||
if currentCondition == nil {
|
||||
glog.V(2).Infof("Condition %v of node %v was never updated by kubelet", nodeConditionType, node.Name)
|
||||
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
|
||||
|
@ -1016,7 +1017,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.
|
|||
}
|
||||
}
|
||||
|
||||
_, currentCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
_, currentCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
if !apiequality.Semantic.DeepEqual(currentCondition, &observedReadyCondition) {
|
||||
if _, err = nc.kubeClient.Core().Nodes().UpdateStatus(node); err != nil {
|
||||
glog.Errorf("Error updating node %s: %v", node.Name, err)
|
||||
|
|
|
@ -28,6 +28,7 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1/ref:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/clock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
clientv1 "k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/api/v1/ref"
|
||||
"k8s.io/client-go/util/clock"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
@ -356,7 +357,7 @@ func (f *FakeRecorder) PastEventf(obj runtime.Object, timestamp metav1.Time, eve
|
|||
func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp metav1.Time, eventtype, reason, message string) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
ref, err := clientv1.GetReference(api.Scheme, obj)
|
||||
ref, err := ref.GetReference(api.Scheme, obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Encoutered error while getting reference: %v", err)
|
||||
return
|
||||
|
|
|
@ -20,6 +20,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/autoscaling/v1:go_default_library",
|
||||
"//pkg/apis/autoscaling/v2alpha1:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1"
|
||||
v1coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
|
@ -77,7 +78,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
|
|||
|
||||
requests[pod.Name] = podSum
|
||||
|
||||
if pod.Status.Phase != v1.PodRunning || !v1.IsPodReady(&pod) {
|
||||
if pod.Status.Phase != v1.PodRunning || !podutil.IsPodReady(&pod) {
|
||||
// save this pod name for later, but pretend it doesn't exist for now
|
||||
unreadyPods.Insert(pod.Name)
|
||||
delete(metrics, pod.Name)
|
||||
|
@ -192,7 +193,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
|
|||
missingPods := sets.NewString()
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != v1.PodRunning || !v1.IsPodReady(&pod) {
|
||||
if pod.Status.Phase != v1.PodRunning || !podutil.IsPodReady(&pod) {
|
||||
// save this pod name for later, but pretend it doesn't exist for now
|
||||
unreadyPods.Insert(pod.Name)
|
||||
delete(metrics, pod.Name)
|
||||
|
|
|
@ -19,6 +19,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
@ -317,7 +318,7 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
|
|||
// a Pod transitioned to Ready.
|
||||
// Note that this still suffers from #29229, we are just moving the problem one level
|
||||
// "closer" to kubelet (from the deployment to the replica set controller).
|
||||
if !v1.IsPodReady(oldPod) && v1.IsPodReady(curPod) && rs.Spec.MinReadySeconds > 0 {
|
||||
if !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) && rs.Spec.MinReadySeconds > 0 {
|
||||
glog.V(2).Infof("ReplicaSet %q will be enqueued after %ds for availability check", rs.Name, rs.Spec.MinReadySeconds)
|
||||
// Add a second to avoid milliseconds skew in AddAfter.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info.
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
|
||||
)
|
||||
|
@ -96,9 +97,9 @@ func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageRe
|
|||
if templateLabel.Matches(labels.Set(pod.Labels)) {
|
||||
fullyLabeledReplicasCount++
|
||||
}
|
||||
if v1.IsPodReady(pod) {
|
||||
if podutil.IsPodReady(pod) {
|
||||
readyReplicasCount++
|
||||
if v1.IsPodAvailable(pod, rs.Spec.MinReadySeconds, metav1.Now()) {
|
||||
if podutil.IsPodAvailable(pod, rs.Spec.MinReadySeconds, metav1.Now()) {
|
||||
availableReplicasCount++
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library",
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1"
|
||||
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||
|
@ -312,7 +313,7 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) {
|
|||
// a Pod transitioned to Ready.
|
||||
// Note that this still suffers from #29229, we are just moving the problem one level
|
||||
// "closer" to kubelet (from the deployment to the ReplicationController controller).
|
||||
if !v1.IsPodReady(oldPod) && v1.IsPodReady(curPod) && rc.Spec.MinReadySeconds > 0 {
|
||||
if !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) && rc.Spec.MinReadySeconds > 0 {
|
||||
glog.V(2).Infof("ReplicationController %q will be enqueued after %ds for availability check", rc.Name, rc.Spec.MinReadySeconds)
|
||||
// Add a second to avoid milliseconds skew in AddAfter.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info.
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
)
|
||||
|
||||
|
@ -106,9 +107,9 @@ func calculateStatus(rc *v1.ReplicationController, filteredPods []*v1.Pod, manag
|
|||
if templateLabel.Matches(labels.Set(pod.Labels)) {
|
||||
fullyLabeledReplicasCount++
|
||||
}
|
||||
if v1.IsPodReady(pod) {
|
||||
if podutil.IsPodReady(pod) {
|
||||
readyReplicasCount++
|
||||
if v1.IsPodAvailable(pod, rc.Spec.MinReadySeconds, metav1.Now()) {
|
||||
if podutil.IsPodAvailable(pod, rc.Spec.MinReadySeconds, metav1.Now()) {
|
||||
availableReplicasCount++
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library",
|
||||
"//pkg/client/listers/core/v1:go_default_library",
|
||||
|
@ -42,6 +43,7 @@ go_test(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1node "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1"
|
||||
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||
|
@ -161,7 +162,7 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
|||
}(nodeName, nameHint, route)
|
||||
} else {
|
||||
// Update condition only if it doesn't reflect the current state.
|
||||
_, condition := v1.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
|
||||
_, condition := v1node.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
|
||||
if condition == nil || condition.Status != v1.ConditionFalse {
|
||||
rc.updateNetworkingCondition(types.NodeName(node.Name), true)
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
@ -208,7 +209,7 @@ func TestReconcile(t *testing.T) {
|
|||
for _, action := range testCase.clientset.Actions() {
|
||||
if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" {
|
||||
node := action.(core.UpdateAction).GetObject().(*v1.Node)
|
||||
_, condition := v1.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
|
||||
_, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
|
||||
if condition == nil {
|
||||
t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name)
|
||||
} else {
|
||||
|
|
|
@ -20,6 +20,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/apps/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions/apps/v1beta1:go_default_library",
|
||||
|
@ -56,6 +57,7 @@ go_test(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/apps/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions:go_default_library",
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
|
||||
|
@ -660,7 +661,7 @@ func (spc *fakeStatefulPodControl) setPodReady(set *apps.StatefulSet, ordinal in
|
|||
sort.Sort(ascendingOrdinal(pods))
|
||||
pod := pods[ordinal]
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||
v1.UpdatePodCondition(&pod.Status, &condition)
|
||||
podutil.UpdatePodCondition(&pod.Status, &condition)
|
||||
fakeResourceVersion(pod)
|
||||
spc.podsIndexer.Update(pod)
|
||||
return spc.podsLister.Pods(set.Namespace).List(selector)
|
||||
|
@ -697,7 +698,7 @@ func (spc *fakeStatefulPodControl) addTerminatedPod(set *apps.StatefulSet, ordin
|
|||
pod.DeletionTimestamp = &deleted
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||
fakeResourceVersion(pod)
|
||||
v1.UpdatePodCondition(&pod.Status, &condition)
|
||||
podutil.UpdatePodCondition(&pod.Status, &condition)
|
||||
spc.podsIndexer.Update(pod)
|
||||
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
|
||||
if err != nil {
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
|
@ -190,7 +191,7 @@ func isRunningAndReady(pod *v1.Pod) bool {
|
|||
if pod.Status.Phase != v1.PodRunning {
|
||||
return false
|
||||
}
|
||||
podReady := v1.IsPodReady(pod)
|
||||
podReady := podutil.IsPodReady(pod)
|
||||
// User may have specified a pod readiness override through a debug annotation.
|
||||
initialized, ok := pod.Annotations[apps.StatefulSetInitAnnotation]
|
||||
if ok {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
@ -207,7 +208,7 @@ func TestIsRunningAndReady(t *testing.T) {
|
|||
t.Error("isRunningAndReady does not respect Pod condition")
|
||||
}
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||
v1.UpdatePodCondition(&pod.Status, &condition)
|
||||
podutil.UpdatePodCondition(&pod.Status, &condition)
|
||||
if !isRunningAndReady(pod) {
|
||||
t.Error("Pod should be running and ready")
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ go_library(
|
|||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/helper:go_default_library",
|
||||
"//pkg/api/v1/ref:go_default_library",
|
||||
"//pkg/apis/storage/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library",
|
||||
|
@ -67,6 +68,7 @@ go_test(
|
|||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/ref:go_default_library",
|
||||
"//pkg/apis/storage/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
|
|
|
@ -739,7 +739,7 @@ func newClaim(name, claimUID, capacity, boundToVolume string, phase v1.Persisten
|
|||
Phase: phase,
|
||||
},
|
||||
}
|
||||
// Make sure v1.GetReference(claim) works
|
||||
// Make sure ref.GetReference(claim) works
|
||||
claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", name)
|
||||
|
||||
if len(annotations) > 0 {
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/ref"
|
||||
)
|
||||
|
||||
func makePVC(size string, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentVolumeClaim {
|
||||
|
@ -616,7 +617,7 @@ func TestFindingPreboundVolumes(t *testing.T) {
|
|||
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi")}},
|
||||
},
|
||||
}
|
||||
claimRef, err := v1.GetReference(api.Scheme, claim)
|
||||
claimRef, err := ref.GetReference(api.Scheme, claim)
|
||||
if err != nil {
|
||||
t.Errorf("error getting claimRef: %v", err)
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/api/v1/ref"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||
|
@ -771,7 +772,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV
|
|||
volume.Spec.ClaimRef.Namespace != claim.Namespace ||
|
||||
volume.Spec.ClaimRef.UID != claim.UID {
|
||||
|
||||
claimRef, err := v1.GetReference(api.Scheme, claim)
|
||||
claimRef, err := ref.GetReference(api.Scheme, claim)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unexpected error getting claim reference: %v", err)
|
||||
}
|
||||
|
@ -1299,7 +1300,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
|
|||
|
||||
// Prepare a claimRef to the claim early (to fail before a volume is
|
||||
// provisioned)
|
||||
claimRef, err := v1.GetReference(api.Scheme, claim)
|
||||
claimRef, err := ref.GetReference(api.Scheme, claim)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("unexpected error getting claim reference: %v", err)
|
||||
return
|
||||
|
|
|
@ -54,6 +54,7 @@ go_library(
|
|||
"//pkg/api/helper:go_default_library",
|
||||
"//pkg/api/util:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/apps/v1beta1:go_default_library",
|
||||
"//pkg/apis/autoscaling:go_default_library",
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/helper"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/client/retry"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
|
@ -430,7 +431,7 @@ func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController, minR
|
|||
if v1Pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
if !v1.IsPodAvailable(v1Pod, minReadySeconds, r.nowFn()) {
|
||||
if !podutil.IsPodAvailable(v1Pod, minReadySeconds, r.nowFn()) {
|
||||
continue
|
||||
}
|
||||
switch controller.Name {
|
||||
|
|
|
@ -38,6 +38,8 @@ go_library(
|
|||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/helper:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/api/v1/resource:go_default_library",
|
||||
"//pkg/api/v1/validation:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/apis/componentconfig/v1alpha1:go_default_library",
|
||||
|
|
|
@ -25,6 +25,7 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/resource:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/kubelet/cm/util:go_default_library",
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
)
|
||||
|
||||
|
@ -147,7 +148,7 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass]
|
|||
// we only care about the burstable qos tier
|
||||
continue
|
||||
}
|
||||
req, _, err := v1.PodRequestsAndLimits(pod)
|
||||
req, _, err := resource.PodRequestsAndLimits(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -187,7 +188,7 @@ func (m *qosContainerManagerImpl) setMemoryReserve(configs map[v1.PodQOSClass]*C
|
|||
// limits are not set for Best Effort pods
|
||||
continue
|
||||
}
|
||||
req, _, err := v1.PodRequestsAndLimits(pod)
|
||||
req, _, err := resource.PodRequestsAndLimits(pod)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("[Container Manager] Pod resource requests/limits could not be determined. Not setting QOS memory limts.")
|
||||
return
|
||||
|
|
|
@ -28,6 +28,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/ref:go_default_library",
|
||||
"//pkg/client/unversioned/remotecommand:go_default_library",
|
||||
"//pkg/kubelet/api/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/ref"
|
||||
)
|
||||
|
||||
var ImplicitContainerPrefix string = "implicitly required container "
|
||||
|
@ -38,7 +39,7 @@ func GenerateContainerRef(pod *v1.Pod, container *v1.Container) (*v1.ObjectRefer
|
|||
// start (like the pod infra container). This is not a good way, ugh.
|
||||
fieldPath = ImplicitContainerPrefix + container.Name
|
||||
}
|
||||
ref, err := v1.GetPartialReference(api.Scheme, pod, fieldPath)
|
||||
ref, err := ref.GetPartialReference(api.Scheme, pod, fieldPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -43,6 +43,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1/validation"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/remotecommand"
|
||||
"k8s.io/kubernetes/pkg/fieldpath"
|
||||
|
@ -642,9 +644,9 @@ func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod
|
|||
func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, container *v1.Container) (string, error) {
|
||||
containerName := fs.ContainerName
|
||||
if len(containerName) == 0 {
|
||||
return v1.ExtractContainerResourceValue(fs, container)
|
||||
return resource.ExtractContainerResourceValue(fs, container)
|
||||
} else {
|
||||
return v1.ExtractResourceValueByContainerName(fs, pod, containerName)
|
||||
return resource.ExtractResourceValueByContainerName(fs, pod, containerName)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -940,10 +942,10 @@ func hasHostPortConflicts(pods []*v1.Pod) bool {
|
|||
func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
|
||||
var cID string
|
||||
|
||||
cStatus, found := v1.GetContainerStatus(podStatus.ContainerStatuses, containerName)
|
||||
cStatus, found := podutil.GetContainerStatus(podStatus.ContainerStatuses, containerName)
|
||||
// if not found, check the init containers
|
||||
if !found {
|
||||
cStatus, found = v1.GetContainerStatus(podStatus.InitContainerStatuses, containerName)
|
||||
cStatus, found = podutil.GetContainerStatus(podStatus.InitContainerStatuses, containerName)
|
||||
}
|
||||
if !found {
|
||||
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName)
|
||||
|
@ -1047,7 +1049,7 @@ func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
|
|||
pendingInitialization := 0
|
||||
failedInitialization := 0
|
||||
for _, container := range spec.InitContainers {
|
||||
containerStatus, ok := v1.GetContainerStatus(info, container.Name)
|
||||
containerStatus, ok := podutil.GetContainerStatus(info, container.Name)
|
||||
if !ok {
|
||||
pendingInitialization++
|
||||
continue
|
||||
|
@ -1084,7 +1086,7 @@ func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
|
|||
failed := 0
|
||||
succeeded := 0
|
||||
for _, container := range spec.Containers {
|
||||
containerStatus, ok := v1.GetContainerStatus(info, container.Name)
|
||||
containerStatus, ok := podutil.GetContainerStatus(info, container.Name)
|
||||
if !ok {
|
||||
unknown++
|
||||
continue
|
||||
|
@ -1179,10 +1181,10 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
|||
// s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus()
|
||||
// does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure
|
||||
// it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true.
|
||||
if _, oldPodScheduled := v1.GetPodCondition(&pod.Status, v1.PodScheduled); oldPodScheduled != nil {
|
||||
if _, oldPodScheduled := podutil.GetPodCondition(&pod.Status, v1.PodScheduled); oldPodScheduled != nil {
|
||||
s.Conditions = append(s.Conditions, *oldPodScheduled)
|
||||
}
|
||||
v1.UpdatePodCondition(&pod.Status, &v1.PodCondition{
|
||||
podutil.UpdatePodCondition(&pod.Status, &v1.PodCondition{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
})
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
)
|
||||
|
||||
// defaultPodLimitsForDownwardApi copies the input pod, and optional container,
|
||||
|
@ -52,7 +53,7 @@ func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *v1.Pod, container *v1.Con
|
|||
return nil, nil, fmt.Errorf("unexpected type returned from deep copy of pod object")
|
||||
}
|
||||
for idx := range outputPod.Spec.Containers {
|
||||
v1.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable)
|
||||
resource.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable)
|
||||
}
|
||||
|
||||
var outputContainer *v1.Container
|
||||
|
@ -65,7 +66,7 @@ func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *v1.Pod, container *v1.Con
|
|||
if !ok {
|
||||
return nil, nil, fmt.Errorf("unexpected type returned from deep copy of container object")
|
||||
}
|
||||
v1.MergeContainerResourceLimits(outputContainer, allocatable)
|
||||
resource.MergeContainerResourceLimits(outputContainer, allocatable)
|
||||
}
|
||||
return outputPod, outputContainer, nil
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/ref:go_default_library",
|
||||
"//pkg/credentialprovider:go_default_library",
|
||||
"//pkg/kubelet/api:go_default_library",
|
||||
"//pkg/kubelet/api/v1alpha1/runtime:go_default_library",
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/ref"
|
||||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
|
@ -548,7 +549,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat
|
|||
podContainerChanges := m.computePodContainerChanges(pod, podStatus)
|
||||
glog.V(3).Infof("computePodContainerChanges got %+v for pod %q", podContainerChanges, format.Pod(pod))
|
||||
if podContainerChanges.CreateSandbox {
|
||||
ref, err := v1.GetReference(api.Scheme, pod)
|
||||
ref, err := ref.GetReference(api.Scheme, pod)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err)
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/resource:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/eviction:go_default_library",
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
|
@ -190,7 +191,7 @@ type admissionRequirementList []*admissionRequirement
|
|||
func (a admissionRequirementList) distance(pod *v1.Pod) float64 {
|
||||
dist := float64(0)
|
||||
for _, req := range a {
|
||||
remainingRequest := float64(req.quantity - v1.GetResourceRequest(pod, req.resourceName))
|
||||
remainingRequest := float64(req.quantity - resource.GetResourceRequest(pod, req.resourceName))
|
||||
if remainingRequest < 0 {
|
||||
remainingRequest = 0
|
||||
}
|
||||
|
@ -206,7 +207,7 @@ func (a admissionRequirementList) subtract(pods ...*v1.Pod) admissionRequirement
|
|||
for _, req := range a {
|
||||
newQuantity := req.quantity
|
||||
for _, pod := range pods {
|
||||
newQuantity -= v1.GetResourceRequest(pod, req.resourceName)
|
||||
newQuantity -= resource.GetResourceRequest(pod, req.resourceName)
|
||||
}
|
||||
if newQuantity > 0 {
|
||||
newList = append(newList, &admissionRequirement{
|
||||
|
@ -252,8 +253,8 @@ func smallerResourceRequest(pod1 *v1.Pod, pod2 *v1.Pod) bool {
|
|||
v1.ResourceCPU,
|
||||
}
|
||||
for _, res := range priorityList {
|
||||
req1 := v1.GetResourceRequest(pod1, res)
|
||||
req2 := v1.GetResourceRequest(pod2, res)
|
||||
req1 := resource.GetResourceRequest(pod1, res)
|
||||
req2 := resource.GetResourceRequest(pod2, res)
|
||||
if req1 < req2 {
|
||||
return true
|
||||
} else if req1 > req2 {
|
||||
|
|
|
@ -18,6 +18,7 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/prober/results:go_default_library",
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
|
@ -155,7 +156,7 @@ func (w *worker) doProbe() (keepGoing bool) {
|
|||
return false
|
||||
}
|
||||
|
||||
c, ok := v1.GetContainerStatus(status.ContainerStatuses, w.container.Name)
|
||||
c, ok := podutil.GetContainerStatus(status.ContainerStatuses, w.container.Name)
|
||||
if !ok || len(c.ContainerID) == 0 {
|
||||
// Either the container has not been created yet, or it was deleted.
|
||||
glog.V(3).Infof("Probe target container not found: %v - %v",
|
||||
|
|
|
@ -45,6 +45,7 @@ go_test(
|
|||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -44,7 +45,7 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, containerStatuses []v1.Containe
|
|||
unknownContainers := []string{}
|
||||
unreadyContainers := []string{}
|
||||
for _, container := range spec.Containers {
|
||||
if containerStatus, ok := v1.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||
if !containerStatus.Ready {
|
||||
unreadyContainers = append(unreadyContainers, container.Name)
|
||||
}
|
||||
|
@ -99,7 +100,7 @@ func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.Co
|
|||
unknownContainers := []string{}
|
||||
unreadyContainers := []string{}
|
||||
for _, container := range spec.InitContainers {
|
||||
if containerStatus, ok := v1.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||
if !containerStatus.Ready {
|
||||
unreadyContainers = append(unreadyContainers, container.Name)
|
||||
}
|
||||
|
|
|
@ -288,10 +288,10 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp
|
|||
}
|
||||
|
||||
// Set ReadyCondition.LastTransitionTime.
|
||||
if _, readyCondition := v1.GetPodCondition(&status, v1.PodReady); readyCondition != nil {
|
||||
if _, readyCondition := podutil.GetPodCondition(&status, v1.PodReady); readyCondition != nil {
|
||||
// Need to set LastTransitionTime.
|
||||
lastTransitionTime := metav1.Now()
|
||||
_, oldReadyCondition := v1.GetPodCondition(&oldStatus, v1.PodReady)
|
||||
_, oldReadyCondition := podutil.GetPodCondition(&oldStatus, v1.PodReady)
|
||||
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
|
||||
lastTransitionTime = oldReadyCondition.LastTransitionTime
|
||||
}
|
||||
|
@ -299,10 +299,10 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp
|
|||
}
|
||||
|
||||
// Set InitializedCondition.LastTransitionTime.
|
||||
if _, initCondition := v1.GetPodCondition(&status, v1.PodInitialized); initCondition != nil {
|
||||
if _, initCondition := podutil.GetPodCondition(&status, v1.PodInitialized); initCondition != nil {
|
||||
// Need to set LastTransitionTime.
|
||||
lastTransitionTime := metav1.Now()
|
||||
_, oldInitCondition := v1.GetPodCondition(&oldStatus, v1.PodInitialized)
|
||||
_, oldInitCondition := podutil.GetPodCondition(&oldStatus, v1.PodInitialized)
|
||||
if oldInitCondition != nil && initCondition.Status == oldInitCondition.Status {
|
||||
lastTransitionTime = oldInitCondition.LastTransitionTime
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
|
@ -184,7 +185,7 @@ func TestNewStatusSetsReadyTransitionTime(t *testing.T) {
|
|||
syncer.SetPodStatus(pod, podStatus)
|
||||
verifyUpdates(t, syncer, 1)
|
||||
status := expectPodStatus(t, syncer, pod)
|
||||
readyCondition := v1.GetPodReadyCondition(status)
|
||||
readyCondition := podutil.GetPodReadyCondition(status)
|
||||
if readyCondition.LastTransitionTime.IsZero() {
|
||||
t.Errorf("Unexpected: last transition time not set")
|
||||
}
|
||||
|
@ -237,8 +238,8 @@ func TestChangedStatusUpdatesLastTransitionTime(t *testing.T) {
|
|||
verifyUpdates(t, syncer, 1)
|
||||
newStatus := expectPodStatus(t, syncer, pod)
|
||||
|
||||
oldReadyCondition := v1.GetPodReadyCondition(oldStatus)
|
||||
newReadyCondition := v1.GetPodReadyCondition(newStatus)
|
||||
oldReadyCondition := podutil.GetPodReadyCondition(oldStatus)
|
||||
newReadyCondition := podutil.GetPodReadyCondition(newStatus)
|
||||
if newReadyCondition.LastTransitionTime.IsZero() {
|
||||
t.Errorf("Unexpected: last transition time not set")
|
||||
}
|
||||
|
@ -276,8 +277,8 @@ func TestUnchangedStatusPreservesLastTransitionTime(t *testing.T) {
|
|||
verifyUpdates(t, syncer, 0)
|
||||
newStatus := expectPodStatus(t, syncer, pod)
|
||||
|
||||
oldReadyCondition := v1.GetPodReadyCondition(oldStatus)
|
||||
newReadyCondition := v1.GetPodReadyCondition(newStatus)
|
||||
oldReadyCondition := podutil.GetPodReadyCondition(oldStatus)
|
||||
newReadyCondition := podutil.GetPodReadyCondition(newStatus)
|
||||
if newReadyCondition.LastTransitionTime.IsZero() {
|
||||
t.Errorf("Unexpected: last transition time not set")
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/resource:go_default_library",
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
"k8s.io/kubernetes/pkg/fieldpath"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
@ -244,7 +245,7 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu
|
|||
nodeAllocatable, err := host.GetNodeAllocatable()
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
} else if values, err := v1.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil {
|
||||
} else if values, err := resource.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil {
|
||||
glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error())
|
||||
errlist = append(errlist, err)
|
||||
} else {
|
||||
|
|
|
@ -17,6 +17,7 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions/apps/v1beta1:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library",
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
appsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps/v1beta1"
|
||||
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1"
|
||||
|
@ -595,7 +596,7 @@ type podConditionUpdater struct {
|
|||
|
||||
func (p *podConditionUpdater) Update(pod *v1.Pod, condition *v1.PodCondition) error {
|
||||
glog.V(2).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status)
|
||||
if v1.UpdatePodCondition(&pod.Status, condition) {
|
||||
if podutil.UpdatePodCondition(&pod.Status, condition) {
|
||||
_, err := p.Client.Core().Pods(pod.Namespace).UpdateStatus(pod)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -116,6 +116,8 @@ find "${MAIN_REPO}/pkg/version" -maxdepth 1 -type f | xargs -I{} cp {} "${CLIENT
|
|||
mkcp "pkg/client/clientset_generated/${CLIENTSET}" "pkg/client/clientset_generated"
|
||||
mkcp "pkg/client/informers/informers_generated/externalversions" "pkg/client/informers/informers_generated"
|
||||
mkcp "pkg/api/helper" "pkg/api"
|
||||
mkcp "pkg/api/v1/resource" "pkg/api/v1"
|
||||
mkcp "pkg/api/v1/node" "pkg/api/v1"
|
||||
|
||||
pushd "${CLIENT_REPO_TEMP}" > /dev/null
|
||||
echo "generating vendor/"
|
||||
|
|
|
@ -47,6 +47,7 @@ go_library(
|
|||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1/ref:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/apis/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/api/v1/ref"
|
||||
)
|
||||
|
||||
// The EventExpansion interface allows manually adding extra methods to the EventInterface.
|
||||
|
@ -99,7 +100,7 @@ func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte)
|
|||
// object must match this event's client namespace unless the event client
|
||||
// was made with the "" namespace.
|
||||
func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) {
|
||||
ref, err := v1.GetReference(scheme, objOrRef)
|
||||
ref, err := ref.GetReference(scheme, objOrRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -17,9 +17,9 @@ go_library(
|
|||
"generate.go",
|
||||
"generated.pb.go",
|
||||
"meta.go",
|
||||
"ref.go",
|
||||
"objectreference.go",
|
||||
"register.go",
|
||||
"resource_helpers.go",
|
||||
"resource.go",
|
||||
"taint.go",
|
||||
"toleration.go",
|
||||
"types.generated.go",
|
||||
|
@ -34,7 +34,6 @@ go_library(
|
|||
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/gogo/protobuf/sortkeys:go_default_library",
|
||||
"//vendor/github.com/ugorji/go/codec:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["util.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = ["//vendor/k8s.io/client-go/pkg/api/v1:go_default_library"],
|
||||
)
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// TODO: merge with pkg/util/node
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
// GetNodeCondition extracts the provided condition from the given status and returns that.
|
||||
// Returns nil and -1 if the condition is not present, and the index of the located condition.
|
||||
func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) (int, *v1.NodeCondition) {
|
||||
if status == nil {
|
||||
return -1, nil
|
||||
}
|
||||
for i := range status.Conditions {
|
||||
if status.Conditions[i].Type == conditionType {
|
||||
return i, &status.Conditions[i]
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// IsNodeReady returns true if a node is ready; false otherwise.
|
||||
func IsNodeReady(node *v1.Node) bool {
|
||||
for _, c := range node.Status.Conditions {
|
||||
if c.Type == v1.NodeReady {
|
||||
return c.Status == v1.ConditionTrue
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that
|
||||
// intend only to get a reference to that object. This simplifies the event recording interface.
|
||||
func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
|
||||
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
|
||||
}
|
||||
|
||||
func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind {
|
||||
return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
|
||||
}
|
||||
|
||||
func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj }
|
|
@ -0,0 +1,19 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["ref.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||
],
|
||||
)
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
package ref
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
@ -22,10 +22,9 @@ import (
|
|||
"net/url"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -38,11 +37,11 @@ var (
|
|||
// object, or an error if the object doesn't follow the conventions
|
||||
// that would allow this.
|
||||
// TODO: should take a meta.Interface see http://issue.k8s.io/7127
|
||||
func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) {
|
||||
func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReference, error) {
|
||||
if obj == nil {
|
||||
return nil, ErrNilObject
|
||||
}
|
||||
if ref, ok := obj.(*ObjectReference); ok {
|
||||
if ref, ok := obj.(*v1.ObjectReference); ok {
|
||||
// Don't make a reference to a reference.
|
||||
return ref, nil
|
||||
}
|
||||
|
@ -94,14 +93,14 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference,
|
|||
|
||||
// only has list metadata
|
||||
if objectMeta == nil {
|
||||
return &ObjectReference{
|
||||
return &v1.ObjectReference{
|
||||
Kind: kind,
|
||||
APIVersion: version,
|
||||
ResourceVersion: listMeta.GetResourceVersion(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &ObjectReference{
|
||||
return &v1.ObjectReference{
|
||||
Kind: kind,
|
||||
APIVersion: version,
|
||||
Name: objectMeta.GetName(),
|
||||
|
@ -112,7 +111,7 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference,
|
|||
}
|
||||
|
||||
// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath.
|
||||
func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) {
|
||||
func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*v1.ObjectReference, error) {
|
||||
ref, err := GetReference(scheme, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -120,14 +119,3 @@ func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath s
|
|||
ref.FieldPath = fieldPath
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that
|
||||
// intend only to get a reference to that object. This simplifies the event recording interface.
|
||||
func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
|
||||
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
|
||||
}
|
||||
func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind {
|
||||
return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
|
||||
}
|
||||
|
||||
func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj }
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
// Returns string version of ResourceName.
|
||||
func (self ResourceName) String() string {
|
||||
return string(self)
|
||||
}
|
||||
|
||||
// Returns the CPU limit if specified.
|
||||
func (self *ResourceList) Cpu() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceCPU]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{Format: resource.DecimalSI}
|
||||
}
|
||||
|
||||
// Returns the Memory limit if specified.
|
||||
func (self *ResourceList) Memory() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceMemory]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{Format: resource.BinarySI}
|
||||
}
|
||||
|
||||
func (self *ResourceList) Pods() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourcePods]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{}
|
||||
}
|
||||
|
||||
func (self *ResourceList) NvidiaGPU() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceNvidiaGPU]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{}
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["helpers_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["helpers.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||
],
|
||||
)
|
|
@ -0,0 +1,200 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
|
||||
// containers of the pod.
|
||||
func PodRequestsAndLimits(pod *v1.Pod) (reqs map[v1.ResourceName]resource.Quantity, limits map[v1.ResourceName]resource.Quantity, err error) {
|
||||
reqs, limits = map[v1.ResourceName]resource.Quantity{}, map[v1.ResourceName]resource.Quantity{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for name, quantity := range container.Resources.Requests {
|
||||
if value, ok := reqs[name]; !ok {
|
||||
reqs[name] = *quantity.Copy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
reqs[name] = value
|
||||
}
|
||||
}
|
||||
for name, quantity := range container.Resources.Limits {
|
||||
if value, ok := limits[name]; !ok {
|
||||
limits[name] = *quantity.Copy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
limits[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
// init containers define the minimum of any resource
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
for name, quantity := range container.Resources.Requests {
|
||||
value, ok := reqs[name]
|
||||
if !ok {
|
||||
reqs[name] = *quantity.Copy()
|
||||
continue
|
||||
}
|
||||
if quantity.Cmp(value) > 0 {
|
||||
reqs[name] = *quantity.Copy()
|
||||
}
|
||||
}
|
||||
for name, quantity := range container.Resources.Limits {
|
||||
value, ok := limits[name]
|
||||
if !ok {
|
||||
limits[name] = *quantity.Copy()
|
||||
continue
|
||||
}
|
||||
if quantity.Cmp(value) > 0 {
|
||||
limits[name] = *quantity.Copy()
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// finds and returns the request for a specific resource.
|
||||
func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||
if resource == v1.ResourcePods {
|
||||
return 1
|
||||
}
|
||||
totalResources := int64(0)
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if rQuantity, ok := container.Resources.Requests[resource]; ok {
|
||||
if resource == v1.ResourceCPU {
|
||||
totalResources += rQuantity.MilliValue()
|
||||
} else {
|
||||
totalResources += rQuantity.Value()
|
||||
}
|
||||
}
|
||||
}
|
||||
// take max_resource(sum_pod, any_init_container)
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if rQuantity, ok := container.Resources.Requests[resource]; ok {
|
||||
if resource == v1.ResourceCPU && rQuantity.MilliValue() > totalResources {
|
||||
totalResources = rQuantity.MilliValue()
|
||||
} else if rQuantity.Value() > totalResources {
|
||||
totalResources = rQuantity.Value()
|
||||
}
|
||||
}
|
||||
}
|
||||
return totalResources
|
||||
}
|
||||
|
||||
// ExtractResourceValueByContainerName extracts the value of a resource
|
||||
// by providing container name
|
||||
func ExtractResourceValueByContainerName(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string) (string, error) {
|
||||
container, err := findContainerInPod(pod, containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ExtractContainerResourceValue(fs, container)
|
||||
}
|
||||
|
||||
// ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource
|
||||
// by providing container name and node allocatable
|
||||
func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string, nodeAllocatable v1.ResourceList) (string, error) {
|
||||
realContainer, err := findContainerInPod(pod, containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
containerCopy, err := api.Scheme.DeepCopy(realContainer)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to perform a deep copy of container object: %v", err)
|
||||
}
|
||||
|
||||
container, ok := containerCopy.(*v1.Container)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected type returned from deep copy of container object")
|
||||
}
|
||||
|
||||
MergeContainerResourceLimits(container, nodeAllocatable)
|
||||
|
||||
return ExtractContainerResourceValue(fs, container)
|
||||
}
|
||||
|
||||
// ExtractContainerResourceValue extracts the value of a resource
|
||||
// in an already known container
|
||||
func ExtractContainerResourceValue(fs *v1.ResourceFieldSelector, container *v1.Container) (string, error) {
|
||||
divisor := resource.Quantity{}
|
||||
if divisor.Cmp(fs.Divisor) == 0 {
|
||||
divisor = resource.MustParse("1")
|
||||
} else {
|
||||
divisor = fs.Divisor
|
||||
}
|
||||
|
||||
switch fs.Resource {
|
||||
case "limits.cpu":
|
||||
return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor)
|
||||
case "limits.memory":
|
||||
return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor)
|
||||
case "requests.cpu":
|
||||
return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor)
|
||||
case "requests.memory":
|
||||
return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource)
|
||||
}
|
||||
|
||||
// convertResourceCPUToString converts cpu value to the format of divisor and returns
|
||||
// ceiling of the value.
|
||||
func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) {
|
||||
c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue())))
|
||||
return strconv.FormatInt(c, 10), nil
|
||||
}
|
||||
|
||||
// convertResourceMemoryToString converts memory value to the format of divisor and returns
|
||||
// ceiling of the value.
|
||||
func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) {
|
||||
m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value())))
|
||||
return strconv.FormatInt(m, 10), nil
|
||||
}
|
||||
|
||||
// findContainerInPod finds a container by its name in the provided pod
|
||||
func findContainerInPod(pod *v1.Pod, containerName string) (*v1.Container, error) {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if container.Name == containerName {
|
||||
return &container, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("container %s not found", containerName)
|
||||
}
|
||||
|
||||
// MergeContainerResourceLimits checks if a limit is applied for
|
||||
// the container, and if not, it sets the limit to the passed resource list.
|
||||
func MergeContainerResourceLimits(container *v1.Container,
|
||||
allocatable v1.ResourceList) {
|
||||
if container.Resources.Limits == nil {
|
||||
container.Resources.Limits = make(v1.ResourceList)
|
||||
}
|
||||
for _, resource := range []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory} {
|
||||
if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() {
|
||||
if cap, exists := allocatable[resource]; exists {
|
||||
container.Resources.Limits[resource] = *cap.Copy()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resource
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
func TestResourceHelpers(t *testing.T) {
|
||||
cpuLimit := resource.MustParse("10")
|
||||
memoryLimit := resource.MustParse("10G")
|
||||
resourceSpec := v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
"cpu": cpuLimit,
|
||||
"memory": memoryLimit,
|
||||
"kube.io/storage": memoryLimit,
|
||||
},
|
||||
}
|
||||
if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 {
|
||||
t.Errorf("expected cpulimit %v, got %v", cpuLimit, res)
|
||||
}
|
||||
if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 {
|
||||
t.Errorf("expected memorylimit %v, got %v", memoryLimit, res)
|
||||
}
|
||||
resourceSpec = v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
"memory": memoryLimit,
|
||||
"kube.io/storage": memoryLimit,
|
||||
},
|
||||
}
|
||||
if res := resourceSpec.Limits.Cpu(); res.Value() != 0 {
|
||||
t.Errorf("expected cpulimit %v, got %v", 0, res)
|
||||
}
|
||||
if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 {
|
||||
t.Errorf("expected memorylimit %v, got %v", memoryLimit, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultResourceHelpers(t *testing.T) {
|
||||
resourceList := v1.ResourceList{}
|
||||
if resourceList.Cpu().Format != resource.DecimalSI {
|
||||
t.Errorf("expected %v, actual %v", resource.DecimalSI, resourceList.Cpu().Format)
|
||||
}
|
||||
if resourceList.Memory().Format != resource.BinarySI {
|
||||
t.Errorf("expected %v, actual %v", resource.BinarySI, resourceList.Memory().Format)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractResourceValue(t *testing.T) {
|
||||
cases := []struct {
|
||||
fs *v1.ResourceFieldSelector
|
||||
pod *v1.Pod
|
||||
cName string
|
||||
expectedValue string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.cpu",
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "", "9", "", ""),
|
||||
expectedValue: "9",
|
||||
},
|
||||
{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "", "", "", ""),
|
||||
expectedValue: "0",
|
||||
},
|
||||
{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "8", "", "", ""),
|
||||
expectedValue: "8",
|
||||
},
|
||||
{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "100m", "", "", ""),
|
||||
expectedValue: "1",
|
||||
},
|
||||
{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
Divisor: resource.MustParse("100m"),
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "1200m", "", "", ""),
|
||||
expectedValue: "12",
|
||||
},
|
||||
{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.memory",
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "", "", "100Mi", ""),
|
||||
expectedValue: "104857600",
|
||||
},
|
||||
{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.memory",
|
||||
Divisor: resource.MustParse("1Mi"),
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "", "", "100Mi", "1Gi"),
|
||||
expectedValue: "100",
|
||||
},
|
||||
{
|
||||
fs: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "", "", "10Mi", "100Mi"),
|
||||
expectedValue: "104857600",
|
||||
},
|
||||
}
|
||||
as := assert.New(t)
|
||||
for idx, tc := range cases {
|
||||
actual, err := ExtractResourceValueByContainerName(tc.fs, tc.pod, tc.cName)
|
||||
if tc.expectedError != nil {
|
||||
as.Equal(tc.expectedError, err, "expected test case [%d] to fail with error %v; got %v", idx, tc.expectedError, err)
|
||||
} else {
|
||||
as.Nil(err, "expected test case [%d] to not return an error; got %v", idx, err)
|
||||
as.Equal(tc.expectedValue, actual, "expected test case [%d] to return %q; got %q instead", idx, tc.expectedValue, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getPod(cname, cpuRequest, cpuLimit, memoryRequest, memoryLimit string) *v1.Pod {
|
||||
resources := v1.ResourceRequirements{
|
||||
Limits: make(v1.ResourceList),
|
||||
Requests: make(v1.ResourceList),
|
||||
}
|
||||
if cpuLimit != "" {
|
||||
resources.Limits[v1.ResourceCPU] = resource.MustParse(cpuLimit)
|
||||
}
|
||||
if memoryLimit != "" {
|
||||
resources.Limits[v1.ResourceMemory] = resource.MustParse(memoryLimit)
|
||||
}
|
||||
if cpuRequest != "" {
|
||||
resources.Requests[v1.ResourceCPU] = resource.MustParse(cpuRequest)
|
||||
}
|
||||
if memoryRequest != "" {
|
||||
resources.Requests[v1.ResourceMemory] = resource.MustParse(memoryRequest)
|
||||
}
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: cname,
|
||||
Resources: resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,358 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
)
|
||||
|
||||
// Returns string version of ResourceName.
|
||||
func (self ResourceName) String() string {
|
||||
return string(self)
|
||||
}
|
||||
|
||||
// Returns the CPU limit if specified.
|
||||
func (self *ResourceList) Cpu() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceCPU]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{Format: resource.DecimalSI}
|
||||
}
|
||||
|
||||
// Returns the Memory limit if specified.
|
||||
func (self *ResourceList) Memory() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceMemory]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{Format: resource.BinarySI}
|
||||
}
|
||||
|
||||
func (self *ResourceList) Pods() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourcePods]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{}
|
||||
}
|
||||
|
||||
func (self *ResourceList) NvidiaGPU() *resource.Quantity {
|
||||
if val, ok := (*self)[ResourceNvidiaGPU]; ok {
|
||||
return &val
|
||||
}
|
||||
return &resource.Quantity{}
|
||||
}
|
||||
|
||||
func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) {
|
||||
for i := range statuses {
|
||||
if statuses[i].Name == name {
|
||||
return statuses[i], true
|
||||
}
|
||||
}
|
||||
return ContainerStatus{}, false
|
||||
}
|
||||
|
||||
func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus {
|
||||
for i := range statuses {
|
||||
if statuses[i].Name == name {
|
||||
return statuses[i]
|
||||
}
|
||||
}
|
||||
return ContainerStatus{}
|
||||
}
|
||||
|
||||
// IsPodAvailable returns true if a pod is available; false otherwise.
|
||||
// Precondition for an available pod is that it must be ready. On top
|
||||
// of that, there are two cases when a pod can be considered available:
|
||||
// 1. minReadySeconds == 0, or
|
||||
// 2. LastTransitionTime (is set) + minReadySeconds < current time
|
||||
func IsPodAvailable(pod *Pod, minReadySeconds int32, now metav1.Time) bool {
|
||||
if !IsPodReady(pod) {
|
||||
return false
|
||||
}
|
||||
|
||||
c := GetPodReadyCondition(pod.Status)
|
||||
minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second
|
||||
if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodReady returns true if a pod is ready; false otherwise.
|
||||
func IsPodReady(pod *Pod) bool {
|
||||
return IsPodReadyConditionTrue(pod.Status)
|
||||
}
|
||||
|
||||
// IsPodReady retruns true if a pod is ready; false otherwise.
|
||||
func IsPodReadyConditionTrue(status PodStatus) bool {
|
||||
condition := GetPodReadyCondition(status)
|
||||
return condition != nil && condition.Status == ConditionTrue
|
||||
}
|
||||
|
||||
// Extracts the pod ready condition from the given status and returns that.
|
||||
// Returns nil if the condition is not present.
|
||||
func GetPodReadyCondition(status PodStatus) *PodCondition {
|
||||
_, condition := GetPodCondition(&status, PodReady)
|
||||
return condition
|
||||
}
|
||||
|
||||
// GetPodCondition extracts the provided condition from the given status and returns that.
|
||||
// Returns nil and -1 if the condition is not present, and the index of the located condition.
|
||||
func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) {
|
||||
if status == nil {
|
||||
return -1, nil
|
||||
}
|
||||
for i := range status.Conditions {
|
||||
if status.Conditions[i].Type == conditionType {
|
||||
return i, &status.Conditions[i]
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// GetNodeCondition extracts the provided condition from the given status and returns that.
|
||||
// Returns nil and -1 if the condition is not present, and the index of the located condition.
|
||||
func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) {
|
||||
if status == nil {
|
||||
return -1, nil
|
||||
}
|
||||
for i := range status.Conditions {
|
||||
if status.Conditions[i].Type == conditionType {
|
||||
return i, &status.Conditions[i]
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the
|
||||
// status has changed.
|
||||
// Returns true if pod condition has changed or has been added.
|
||||
func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool {
|
||||
condition.LastTransitionTime = metav1.Now()
|
||||
// Try to find this pod condition.
|
||||
conditionIndex, oldCondition := GetPodCondition(status, condition.Type)
|
||||
|
||||
if oldCondition == nil {
|
||||
// We are adding new pod condition.
|
||||
status.Conditions = append(status.Conditions, *condition)
|
||||
return true
|
||||
} else {
|
||||
// We are updating an existing condition, so we need to check if it has changed.
|
||||
if condition.Status == oldCondition.Status {
|
||||
condition.LastTransitionTime = oldCondition.LastTransitionTime
|
||||
}
|
||||
|
||||
isEqual := condition.Status == oldCondition.Status &&
|
||||
condition.Reason == oldCondition.Reason &&
|
||||
condition.Message == oldCondition.Message &&
|
||||
condition.LastProbeTime.Equal(oldCondition.LastProbeTime) &&
|
||||
condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime)
|
||||
|
||||
status.Conditions[conditionIndex] = *condition
|
||||
// Return true if one of the fields have changed.
|
||||
return !isEqual
|
||||
}
|
||||
}
|
||||
|
||||
// IsNodeReady returns true if a node is ready; false otherwise.
|
||||
func IsNodeReady(node *Node) bool {
|
||||
for _, c := range node.Status.Conditions {
|
||||
if c.Type == NodeReady {
|
||||
return c.Status == ConditionTrue
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
|
||||
// containers of the pod.
|
||||
func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) {
|
||||
reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for name, quantity := range container.Resources.Requests {
|
||||
if value, ok := reqs[name]; !ok {
|
||||
reqs[name] = *quantity.Copy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
reqs[name] = value
|
||||
}
|
||||
}
|
||||
for name, quantity := range container.Resources.Limits {
|
||||
if value, ok := limits[name]; !ok {
|
||||
limits[name] = *quantity.Copy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
limits[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
// init containers define the minimum of any resource
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
for name, quantity := range container.Resources.Requests {
|
||||
value, ok := reqs[name]
|
||||
if !ok {
|
||||
reqs[name] = *quantity.Copy()
|
||||
continue
|
||||
}
|
||||
if quantity.Cmp(value) > 0 {
|
||||
reqs[name] = *quantity.Copy()
|
||||
}
|
||||
}
|
||||
for name, quantity := range container.Resources.Limits {
|
||||
value, ok := limits[name]
|
||||
if !ok {
|
||||
limits[name] = *quantity.Copy()
|
||||
continue
|
||||
}
|
||||
if quantity.Cmp(value) > 0 {
|
||||
limits[name] = *quantity.Copy()
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// finds and returns the request for a specific resource.
|
||||
func GetResourceRequest(pod *Pod, resource ResourceName) int64 {
|
||||
if resource == ResourcePods {
|
||||
return 1
|
||||
}
|
||||
totalResources := int64(0)
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if rQuantity, ok := container.Resources.Requests[resource]; ok {
|
||||
if resource == ResourceCPU {
|
||||
totalResources += rQuantity.MilliValue()
|
||||
} else {
|
||||
totalResources += rQuantity.Value()
|
||||
}
|
||||
}
|
||||
}
|
||||
// take max_resource(sum_pod, any_init_container)
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if rQuantity, ok := container.Resources.Requests[resource]; ok {
|
||||
if resource == ResourceCPU && rQuantity.MilliValue() > totalResources {
|
||||
totalResources = rQuantity.MilliValue()
|
||||
} else if rQuantity.Value() > totalResources {
|
||||
totalResources = rQuantity.Value()
|
||||
}
|
||||
}
|
||||
}
|
||||
return totalResources
|
||||
}
|
||||
|
||||
// ExtractResourceValueByContainerName extracts the value of a resource
|
||||
// by providing container name
|
||||
func ExtractResourceValueByContainerName(fs *ResourceFieldSelector, pod *Pod, containerName string) (string, error) {
|
||||
container, err := findContainerInPod(pod, containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ExtractContainerResourceValue(fs, container)
|
||||
}
|
||||
|
||||
// ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource
|
||||
// by providing container name and node allocatable
|
||||
func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *ResourceFieldSelector, pod *Pod, containerName string, nodeAllocatable ResourceList) (string, error) {
|
||||
realContainer, err := findContainerInPod(pod, containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
containerCopy, err := api.Scheme.DeepCopy(realContainer)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to perform a deep copy of container object: %v", err)
|
||||
}
|
||||
|
||||
container, ok := containerCopy.(*Container)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected type returned from deep copy of container object")
|
||||
}
|
||||
|
||||
MergeContainerResourceLimits(container, nodeAllocatable)
|
||||
|
||||
return ExtractContainerResourceValue(fs, container)
|
||||
}
|
||||
|
||||
// ExtractContainerResourceValue extracts the value of a resource
|
||||
// in an already known container
|
||||
func ExtractContainerResourceValue(fs *ResourceFieldSelector, container *Container) (string, error) {
|
||||
divisor := resource.Quantity{}
|
||||
if divisor.Cmp(fs.Divisor) == 0 {
|
||||
divisor = resource.MustParse("1")
|
||||
} else {
|
||||
divisor = fs.Divisor
|
||||
}
|
||||
|
||||
switch fs.Resource {
|
||||
case "limits.cpu":
|
||||
return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor)
|
||||
case "limits.memory":
|
||||
return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor)
|
||||
case "requests.cpu":
|
||||
return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor)
|
||||
case "requests.memory":
|
||||
return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource)
|
||||
}
|
||||
|
||||
// convertResourceCPUToString converts cpu value to the format of divisor and returns
|
||||
// ceiling of the value.
|
||||
func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) {
|
||||
c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue())))
|
||||
return strconv.FormatInt(c, 10), nil
|
||||
}
|
||||
|
||||
// convertResourceMemoryToString converts memory value to the format of divisor and returns
|
||||
// ceiling of the value.
|
||||
func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) {
|
||||
m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value())))
|
||||
return strconv.FormatInt(m, 10), nil
|
||||
}
|
||||
|
||||
// findContainerInPod finds a container by its name in the provided pod
|
||||
func findContainerInPod(pod *Pod, containerName string) (*Container, error) {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if container.Name == containerName {
|
||||
return &container, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("container %s not found", containerName)
|
||||
}
|
||||
|
||||
// MergeContainerResourceLimits checks if a limit is applied for
|
||||
// the container, and if not, it sets the limit to the passed resource list.
|
||||
func MergeContainerResourceLimits(container *Container,
|
||||
allocatable ResourceList) {
|
||||
if container.Resources.Limits == nil {
|
||||
container.Resources.Limits = make(ResourceList)
|
||||
}
|
||||
for _, resource := range []ResourceName{ResourceCPU, ResourceMemory} {
|
||||
if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() {
|
||||
if cap, exists := allocatable[resource]; exists {
|
||||
container.Resources.Limits[resource] = *cap.Copy()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue