mirror of https://github.com/k3s-io/k3s
kubectl: copy deployment util dependency into kubectl
parent
4d66eeb922
commit
ffd4ea963d
|
@ -62,10 +62,10 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/kubectl/apps:go_default_library",
|
||||
"//pkg/kubectl/scheme:go_default_library",
|
||||
"//pkg/kubectl/util:go_default_library",
|
||||
"//pkg/kubectl/util/deployment:go_default_library",
|
||||
"//pkg/kubectl/util/podutils:go_default_library",
|
||||
"//pkg/kubectl/util/slice:go_default_library",
|
||||
"//pkg/printers:go_default_library",
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"text/tabwriter"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -36,8 +36,8 @@ import (
|
|||
clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
apiv1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
kapps "k8s.io/kubernetes/pkg/kubectl/apps"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment"
|
||||
sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
)
|
||||
|
@ -116,7 +116,7 @@ func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision i
|
|||
allRSs = append(allRSs, newRS)
|
||||
}
|
||||
|
||||
historyInfo := make(map[int64]*v1.PodTemplateSpec)
|
||||
historyInfo := make(map[int64]*corev1.PodTemplateSpec)
|
||||
for _, rs := range allRSs {
|
||||
v, err := deploymentutil.Revision(rs)
|
||||
if err != nil {
|
||||
|
@ -166,7 +166,7 @@ func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision i
|
|||
})
|
||||
}
|
||||
|
||||
func printTemplate(template *v1.PodTemplateSpec) (string, error) {
|
||||
func printTemplate(template *corev1.PodTemplateSpec) (string, error) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
internalTemplate := &api.PodTemplateSpec{}
|
||||
if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(template, internalTemplate, nil); err != nil {
|
||||
|
|
|
@ -38,10 +38,8 @@ import (
|
|||
"k8s.io/client-go/kubernetes"
|
||||
kapps "k8s.io/kubernetes/pkg/kubectl/apps"
|
||||
"k8s.io/kubernetes/pkg/kubectl/scheme"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment"
|
||||
sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice"
|
||||
|
||||
// kubectl should not be taking dependencies on logic in the controllers
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -35,8 +35,8 @@ import (
|
|||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/util/integer"
|
||||
"k8s.io/client-go/util/retry"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/podutils"
|
||||
)
|
||||
|
||||
|
|
|
@ -23,8 +23,8 @@ import (
|
|||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl/scheme"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment"
|
||||
)
|
||||
|
||||
// StatusViewer provides an interface for resources that have rollout status.
|
||||
|
@ -65,7 +65,7 @@ func (s *DeploymentStatusViewer) Status(obj runtime.Unstructured, revision int64
|
|||
}
|
||||
|
||||
if revision > 0 {
|
||||
deploymentRev, err := util.Revision(deployment)
|
||||
deploymentRev, err := deploymentutil.Revision(deployment)
|
||||
if err != nil {
|
||||
return "", false, fmt.Errorf("cannot get the revision of deployment %q: %v", deployment.Name, err)
|
||||
}
|
||||
|
@ -74,8 +74,8 @@ func (s *DeploymentStatusViewer) Status(obj runtime.Unstructured, revision int64
|
|||
}
|
||||
}
|
||||
if deployment.Generation <= deployment.Status.ObservedGeneration {
|
||||
cond := util.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
|
||||
if cond != nil && cond.Reason == util.TimedOutReason {
|
||||
cond := deploymentutil.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
|
||||
if cond != nil && cond.Reason == deploymentutil.TimedOutReason {
|
||||
return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", deployment.Name)
|
||||
}
|
||||
if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
|
||||
|
|
|
@ -66,6 +66,7 @@ filegroup(
|
|||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubectl/util/deployment:all-srcs",
|
||||
"//pkg/kubectl/util/hash:all-srcs",
|
||||
"//pkg/kubectl/util/i18n:all-srcs",
|
||||
"//pkg/kubectl/util/logs:all-srcs",
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["deployment.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubectl/util/deployment",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
|
@ -0,0 +1,220 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence
|
||||
RevisionAnnotation = "deployment.kubernetes.io/revision"
|
||||
// RollbackRevisionNotFound is not found rollback event reason
|
||||
RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound"
|
||||
// RollbackTemplateUnchanged is the template unchanged rollback event reason
|
||||
RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged"
|
||||
// RollbackDone is the done rollback event reason
|
||||
RollbackDone = "DeploymentRollback"
|
||||
// TimedOutReason is added in a deployment when its newest replica set fails to show any progress
|
||||
// within the given deadline (progressDeadlineSeconds).
|
||||
TimedOutReason = "ProgressDeadlineExceeded"
|
||||
)
|
||||
|
||||
// GetDeploymentCondition returns the condition with the provided type.
|
||||
func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Revision returns the revision number of the input object.
|
||||
func Revision(obj runtime.Object) (int64, error) {
|
||||
acc, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
v, ok := acc.GetAnnotations()[RevisionAnnotation]
|
||||
if !ok {
|
||||
return 0, nil
|
||||
}
|
||||
return strconv.ParseInt(v, 10, 64)
|
||||
}
|
||||
|
||||
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and
|
||||
// ReplicaSetList from client interface. Note that the first set of old replica sets doesn't include the ones
|
||||
// with no pods, and the second set of old replica sets include all old replica sets. The third returned value
|
||||
// is the new replica set, and it may be nil if it doesn't exist yet.
|
||||
func GetAllReplicaSets(deployment *appsv1.Deployment, c appsclient.AppsV1Interface) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet, *appsv1.ReplicaSet, error) {
|
||||
rsList, err := listReplicaSets(deployment, rsListFromClient(c))
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
oldRSes, allOldRSes := findOldReplicaSets(deployment, rsList)
|
||||
newRS := findNewReplicaSet(deployment, rsList)
|
||||
return oldRSes, allOldRSes, newRS, nil
|
||||
}
|
||||
|
||||
// RsListFromClient returns an rsListFunc that wraps the given client.
|
||||
func rsListFromClient(c appsclient.AppsV1Interface) rsListFunc {
|
||||
return func(namespace string, options metav1.ListOptions) ([]*appsv1.ReplicaSet, error) {
|
||||
rsList, err := c.ReplicaSets(namespace).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var ret []*appsv1.ReplicaSet
|
||||
for i := range rsList.Items {
|
||||
ret = append(ret, &rsList.Items[i])
|
||||
}
|
||||
return ret, err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: switch this to full namespacers
|
||||
type rsListFunc func(string, metav1.ListOptions) ([]*appsv1.ReplicaSet, error)
|
||||
type podListFunc func(string, metav1.ListOptions) (*corev1.PodList, error)
|
||||
|
||||
// listReplicaSets returns a slice of RSes the given deployment targets.
|
||||
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
|
||||
// because only the controller itself should do that.
|
||||
// However, it does filter out anything whose ControllerRef doesn't match.
|
||||
func listReplicaSets(deployment *appsv1.Deployment, getRSList rsListFunc) ([]*appsv1.ReplicaSet, error) {
|
||||
// TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
|
||||
// should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
|
||||
namespace := deployment.Namespace
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
all, err := getRSList(namespace, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Only include those whose ControllerRef matches the Deployment.
|
||||
owned := make([]*appsv1.ReplicaSet, 0, len(all))
|
||||
for _, rs := range all {
|
||||
if metav1.IsControlledBy(rs, deployment) {
|
||||
owned = append(owned, rs)
|
||||
}
|
||||
}
|
||||
return owned, nil
|
||||
}
|
||||
|
||||
// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
|
||||
// We ignore pod-template-hash because:
|
||||
// 1. The hash result would be different upon podTemplateSpec API changes
|
||||
// (e.g. the addition of a new field will cause the hash code to change)
|
||||
// 2. The deployment template won't have hash labels
|
||||
func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool {
|
||||
t1Copy := template1.DeepCopy()
|
||||
t2Copy := template2.DeepCopy()
|
||||
// Remove hash labels from template.Labels before comparing
|
||||
delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
|
||||
delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
|
||||
return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
|
||||
}
|
||||
|
||||
// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
|
||||
func findNewReplicaSet(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) *appsv1.ReplicaSet {
|
||||
sort.Sort(replicaSetsByCreationTimestamp(rsList))
|
||||
for i := range rsList {
|
||||
if equalIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) {
|
||||
// In rare cases, such as after cluster upgrades, Deployment may end up with
|
||||
// having more than one new ReplicaSets that have the same template as its template,
|
||||
// see https://github.com/kubernetes/kubernetes/issues/40415
|
||||
// We deterministically choose the oldest new ReplicaSet.
|
||||
return rsList[i]
|
||||
}
|
||||
}
|
||||
// new ReplicaSet does not exist.
|
||||
return nil
|
||||
}
|
||||
|
||||
// replicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker.
|
||||
type replicaSetsByCreationTimestamp []*appsv1.ReplicaSet
|
||||
|
||||
func (o replicaSetsByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o replicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
func (o replicaSetsByCreationTimestamp) Less(i, j int) bool {
|
||||
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
// // FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes.
|
||||
// // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
func findOldReplicaSets(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet) {
|
||||
var requiredRSs []*appsv1.ReplicaSet
|
||||
var allRSs []*appsv1.ReplicaSet
|
||||
newRS := findNewReplicaSet(deployment, rsList)
|
||||
for _, rs := range rsList {
|
||||
// Filter out new replica set
|
||||
if newRS != nil && rs.UID == newRS.UID {
|
||||
continue
|
||||
}
|
||||
allRSs = append(allRSs, rs)
|
||||
if *(rs.Spec.Replicas) != 0 {
|
||||
requiredRSs = append(requiredRSs, rs)
|
||||
}
|
||||
}
|
||||
return requiredRSs, allRSs
|
||||
}
|
||||
|
||||
// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
|
||||
// step. For example:
|
||||
//
|
||||
// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1)
|
||||
// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1)
|
||||
// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
|
||||
// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1)
|
||||
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
|
||||
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
|
||||
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
|
||||
surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
if surge == 0 && unavailable == 0 {
|
||||
// Validation should never allow the user to explicitly use zero values for both maxSurge
|
||||
// maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero.
|
||||
// If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the
|
||||
// theory that surge might not work due to quota.
|
||||
unavailable = 1
|
||||
}
|
||||
|
||||
return int32(surge), int32(unavailable), nil
|
||||
}
|
Loading…
Reference in New Issue