diff --git a/pkg/kubectl/cmd/rollout/rollout_restart.go b/pkg/kubectl/cmd/rollout/rollout_restart.go index b273f3336b..b8bb92d074 100644 --- a/pkg/kubectl/cmd/rollout/rollout_restart.go +++ b/pkg/kubectl/cmd/rollout/rollout_restart.go @@ -54,11 +54,14 @@ var ( restartLong = templates.LongDesc(` Restart a resource. - A deployment with the "RolloutStrategy" will be rolling restarted.`) + Resource will be rollout restarted.`) restartExample = templates.Examples(` # Restart a deployment - kubectl rollout restart deployment/nginx`) + kubectl rollout restart deployment/nginx + + # Restart a daemonset + kubectl rollout restart daemonset/abc`) ) // NewRolloutRestartOptions returns an initialized RestartOptions instance @@ -73,7 +76,7 @@ func NewRolloutRestartOptions(streams genericclioptions.IOStreams) *RestartOptio func NewCmdRolloutRestart(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewRolloutRestartOptions(streams) - validArgs := []string{"deployment"} + validArgs := []string{"deployment", "daemonset", "statefulset"} cmd := &cobra.Command{ Use: "restart RESOURCE", diff --git a/pkg/kubectl/polymorphichelpers/objectrestarter.go b/pkg/kubectl/polymorphichelpers/objectrestarter.go index 1110beb7f3..891762c38d 100644 --- a/pkg/kubectl/polymorphichelpers/objectrestarter.go +++ b/pkg/kubectl/polymorphichelpers/objectrestarter.go @@ -71,6 +71,48 @@ func defaultObjectRestarter(obj runtime.Object) ([]byte, error) { obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta1.SchemeGroupVersion), obj) + case *extensionsv1beta1.DaemonSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(extensionsv1beta1.SchemeGroupVersion), obj) + + case *appsv1.DaemonSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion), obj) + + case *appsv1beta2.DaemonSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta2.SchemeGroupVersion), obj) + + case *appsv1.StatefulSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion), obj) + + case *appsv1beta1.StatefulSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta1.SchemeGroupVersion), obj) + + case *appsv1beta2.StatefulSet: + if obj.Spec.Template.ObjectMeta.Annotations == nil { + obj.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + obj.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + return runtime.Encode(scheme.Codecs.LegacyCodec(appsv1beta2.SchemeGroupVersion), obj) + default: return nil, fmt.Errorf("restarting is not supported") } diff --git a/test/cmd/apps.sh b/test/cmd/apps.sh index 4390a3947c..3e221fde6a 100755 --- a/test/cmd/apps.sh +++ b/test/cmd/apps.sh @@ -43,6 +43,10 @@ run_daemonset_tests() { kubectl set resources daemonsets/bind "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '4' + # Rollout restart should change generation + kubectl rollout restart daemonset/bind "${kube_flags[@]}" + kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '5' + # Clean up kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}" @@ -488,6 +492,10 @@ run_stateful_set_tests() { # TODO: test robust scaling in an e2e. wait-for-pods-with-label "app=nginx-statefulset" "nginx-0" + # Rollout restart should change generation + kubectl rollout restart statefulset nginx "${kube_flags[@]}" + kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '3' + ### Clean up kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]:?}" # Post-condition: no pods from statefulset controller