mirror of https://github.com/k3s-io/k3s
Merge pull request #70039 from liggitt/deployment-rollback
Switch kubectl rollout to directly rolling back deploymentspull/58/head
commit
975e6c0a73
|
@ -31,6 +31,7 @@ go_test(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
|
@ -74,6 +75,7 @@ go_library(
|
|||
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
|
|
@ -19,14 +19,11 @@ package kubectl
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sort"
|
||||
"syscall"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -34,12 +31,10 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kapps "k8s.io/kubernetes/pkg/kubectl/apps"
|
||||
"k8s.io/kubernetes/pkg/kubectl/scheme"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment"
|
||||
sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -119,124 +114,140 @@ func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations m
|
|||
return "", fmt.Errorf("failed to retrieve Deployment %s: %v", name, err)
|
||||
}
|
||||
|
||||
rsForRevision, err := deploymentRevision(deployment, r.c, toRevision)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if dryRun {
|
||||
return simpleDryRun(deployment, r.c, toRevision)
|
||||
return printTemplate(&rsForRevision.Spec.Template)
|
||||
}
|
||||
if deployment.Spec.Paused {
|
||||
return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", name)
|
||||
}
|
||||
deploymentRollback := &extensionsv1beta1.DeploymentRollback{
|
||||
Name: name,
|
||||
UpdatedAnnotations: updatedAnnotations,
|
||||
RollbackTo: extensionsv1beta1.RollbackConfig{
|
||||
Revision: toRevision,
|
||||
|
||||
// Skip if the revision already matches current Deployment
|
||||
if equalIgnoreHash(&rsForRevision.Spec.Template, &deployment.Spec.Template) {
|
||||
return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil
|
||||
}
|
||||
|
||||
// remove hash label before patching back into the deployment
|
||||
delete(rsForRevision.Spec.Template.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
|
||||
|
||||
// compute deployment annotations
|
||||
annotations := map[string]string{}
|
||||
for k := range annotationsToSkip {
|
||||
if v, ok := deployment.Annotations[k]; ok {
|
||||
annotations[k] = v
|
||||
}
|
||||
}
|
||||
for k, v := range rsForRevision.Annotations {
|
||||
if !annotationsToSkip[k] {
|
||||
annotations[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// make patch to restore
|
||||
patchType, patch, err := getDeploymentPatch(&rsForRevision.Spec.Template, annotations)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err)
|
||||
}
|
||||
|
||||
// Restore revision
|
||||
if _, err = r.c.AppsV1().Deployments(namespace).Patch(name, patchType, patch); err != nil {
|
||||
return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err)
|
||||
}
|
||||
return rollbackSuccess, nil
|
||||
}
|
||||
|
||||
// equalIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
|
||||
// We ignore pod-template-hash because:
|
||||
// 1. The hash result would be different upon podTemplateSpec API changes
|
||||
// (e.g. the addition of a new field will cause the hash code to change)
|
||||
// 2. The deployment template won't have hash labels
|
||||
func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool {
|
||||
t1Copy := template1.DeepCopy()
|
||||
t2Copy := template2.DeepCopy()
|
||||
// Remove hash labels from template.Labels before comparing
|
||||
delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
|
||||
delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
|
||||
return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
|
||||
}
|
||||
|
||||
// annotationsToSkip lists the annotations that should be preserved from the deployment and not
|
||||
// copied from the replicaset when rolling a deployment back
|
||||
var annotationsToSkip = map[string]bool{
|
||||
corev1.LastAppliedConfigAnnotation: true,
|
||||
deploymentutil.RevisionAnnotation: true,
|
||||
deploymentutil.RevisionHistoryAnnotation: true,
|
||||
deploymentutil.DesiredReplicasAnnotation: true,
|
||||
deploymentutil.MaxReplicasAnnotation: true,
|
||||
appsv1.DeprecatedRollbackTo: true,
|
||||
}
|
||||
|
||||
// getPatch returns a patch that can be applied to restore a Deployment to a
|
||||
// previous version. If the returned error is nil the patch is valid.
|
||||
func getDeploymentPatch(podTemplate *corev1.PodTemplateSpec, annotations map[string]string) (types.PatchType, []byte, error) {
|
||||
// Create a patch of the Deployment that replaces spec.template
|
||||
patch, err := json.Marshal([]interface{}{
|
||||
map[string]interface{}{
|
||||
"op": "replace",
|
||||
"path": "/spec/template",
|
||||
"value": podTemplate,
|
||||
},
|
||||
}
|
||||
result := ""
|
||||
|
||||
// Get current events
|
||||
events, err := r.c.CoreV1().Events(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
// Do the rollback
|
||||
// TODO: This is DEPRECATED. It should be updated. DaemonSets and StatefulSets implement rollback by
|
||||
// patching using history (ControllerRevision data). Deployments should probably also implement
|
||||
// rollback using a patch.
|
||||
if err := r.c.ExtensionsV1beta1().Deployments(namespace).Rollback(deploymentRollback); err != nil {
|
||||
return result, err
|
||||
}
|
||||
// Watch for the changes of events
|
||||
watch, err := r.c.CoreV1().Events(namespace).Watch(metav1.ListOptions{Watch: true, ResourceVersion: events.ResourceVersion})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
result = watchRollbackEvent(watch)
|
||||
return result, err
|
||||
map[string]interface{}{
|
||||
"op": "replace",
|
||||
"path": "/metadata/annotations",
|
||||
"value": annotations,
|
||||
},
|
||||
})
|
||||
return types.JSONPatchType, patch, err
|
||||
}
|
||||
|
||||
// watchRollbackEvent watches for rollback events and returns rollback result
|
||||
func watchRollbackEvent(w watch.Interface) string {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt, os.Kill, syscall.SIGTERM)
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
obj, ok := event.Object.(*corev1.Event)
|
||||
if !ok {
|
||||
w.Stop()
|
||||
return ""
|
||||
}
|
||||
isRollback, result := isRollbackEvent(obj)
|
||||
if isRollback {
|
||||
w.Stop()
|
||||
return result
|
||||
}
|
||||
case <-signals:
|
||||
w.Stop()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isRollbackEvent checks if the input event is about rollback, and returns true and
|
||||
// related result string back if it is.
|
||||
func isRollbackEvent(e *corev1.Event) (bool, string) {
|
||||
rollbackEventReasons := []string{deploymentutil.RollbackRevisionNotFound, deploymentutil.RollbackTemplateUnchanged, deploymentutil.RollbackDone}
|
||||
for _, reason := range rollbackEventReasons {
|
||||
if e.Reason == reason {
|
||||
if reason == deploymentutil.RollbackDone {
|
||||
return true, rollbackSuccess
|
||||
}
|
||||
return true, fmt.Sprintf("%s (%s: %s)", rollbackSkipped, e.Reason, e.Message)
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func simpleDryRun(deployment *appsv1.Deployment, c kubernetes.Interface, toRevision int64) (string, error) {
|
||||
func deploymentRevision(deployment *appsv1.Deployment, c kubernetes.Interface, toRevision int64) (revision *appsv1.ReplicaSet, err error) {
|
||||
|
||||
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err)
|
||||
return nil, fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err)
|
||||
}
|
||||
allRSs := allOldRSs
|
||||
if newRS != nil {
|
||||
allRSs = append(allRSs, newRS)
|
||||
}
|
||||
|
||||
revisionToSpec := make(map[int64]*corev1.PodTemplateSpec)
|
||||
var (
|
||||
latestReplicaSet *appsv1.ReplicaSet
|
||||
latestRevision = int64(-1)
|
||||
previousReplicaSet *appsv1.ReplicaSet
|
||||
previousRevision = int64(-1)
|
||||
)
|
||||
for _, rs := range allRSs {
|
||||
v, err := deploymentutil.Revision(rs)
|
||||
if err != nil {
|
||||
continue
|
||||
if v, err := deploymentutil.Revision(rs); err == nil {
|
||||
if toRevision == 0 {
|
||||
if latestRevision < v {
|
||||
// newest one we've seen so far
|
||||
previousRevision = latestRevision
|
||||
previousReplicaSet = latestReplicaSet
|
||||
latestRevision = v
|
||||
latestReplicaSet = rs
|
||||
} else if previousRevision < v {
|
||||
// second newest one we've seen so far
|
||||
previousRevision = v
|
||||
previousReplicaSet = rs
|
||||
}
|
||||
} else if toRevision == v {
|
||||
return rs, nil
|
||||
}
|
||||
}
|
||||
revisionToSpec[v] = &rs.Spec.Template
|
||||
}
|
||||
|
||||
if len(revisionToSpec) < 2 {
|
||||
return "", fmt.Errorf("no rollout history found for deployment %q", deployment.Name)
|
||||
}
|
||||
|
||||
if toRevision > 0 {
|
||||
template, ok := revisionToSpec[toRevision]
|
||||
if !ok {
|
||||
return "", revisionNotFoundErr(toRevision)
|
||||
}
|
||||
return printTemplate(template)
|
||||
return nil, revisionNotFoundErr(toRevision)
|
||||
}
|
||||
|
||||
// Sort the revisionToSpec map by revision
|
||||
revisions := make([]int64, 0, len(revisionToSpec))
|
||||
for r := range revisionToSpec {
|
||||
revisions = append(revisions, r)
|
||||
if previousReplicaSet == nil {
|
||||
return nil, fmt.Errorf("no rollout history found for deployment %q", deployment.Name)
|
||||
}
|
||||
sliceutil.SortInts64(revisions)
|
||||
|
||||
template, _ := revisionToSpec[revisions[len(revisions)-2]]
|
||||
return printTemplate(template)
|
||||
return previousReplicaSet, nil
|
||||
}
|
||||
|
||||
type DaemonSetRollbacker struct {
|
||||
|
|
|
@ -20,7 +20,9 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
|
@ -44,3 +46,22 @@ func TestRollbackerFor(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDeploymentPatch(t *testing.T) {
|
||||
patchType, patchBytes, err := getDeploymentPatch(&corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{{Image: "foo"}}}}, map[string]string{"a": "true"})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if patchType != types.JSONPatchType {
|
||||
t.Errorf("expected strategic merge patch, got %v", patchType)
|
||||
}
|
||||
expectedPatch := `[` +
|
||||
`{"op":"replace","path":"/spec/template","value":{"metadata":{"creationTimestamp":null},"spec":{"containers":[{"name":"","image":"foo","resources":{}}]}}},` +
|
||||
`{"op":"replace","path":"/metadata/annotations","value":{"a":"true"}}` +
|
||||
`]`
|
||||
if string(patchBytes) != expectedPatch {
|
||||
t.Errorf("expected:\n%s\ngot\n%s", expectedPatch, string(patchBytes))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,16 @@ import (
|
|||
const (
|
||||
// RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence
|
||||
RevisionAnnotation = "deployment.kubernetes.io/revision"
|
||||
// RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment.
|
||||
RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history"
|
||||
// DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation
|
||||
// in its replica sets. Helps in separating scaling events from the rollout process and for
|
||||
// determining if the new replica set for a deployment is really saturated.
|
||||
DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas"
|
||||
// MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which
|
||||
// is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their
|
||||
// proportions in case the deployment has surge replicas.
|
||||
MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas"
|
||||
// RollbackRevisionNotFound is not found rollback event reason
|
||||
RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound"
|
||||
// RollbackTemplateUnchanged is the template unchanged rollback event reason
|
||||
|
|
|
@ -299,7 +299,7 @@ run_deployment_tests() {
|
|||
sleep 1
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Rollback to revision 1000000 - should be no-op
|
||||
kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}"
|
||||
! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Rollback to last revision
|
||||
kubectl rollout undo deployment nginx "${kube_flags[@]}"
|
||||
|
|
|
@ -222,7 +222,7 @@ EOF
|
|||
kube::test::if_has_string "${output_message}" 'deploy:'
|
||||
|
||||
# check that "rollout undo" supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" rollout undo deploy/deploy --template="{{ .metadata.name }}:")
|
||||
output_message=$(kubectl "${kube_flags[@]}" rollout undo deploy/deploy --to-revision=1 --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'deploy:'
|
||||
|
||||
# check that "config view" command supports --template output
|
||||
|
|
Loading…
Reference in New Issue