Revert "Gracefully delete pods from the Kubelet"

pull/6/head
Filip Grzadkowski 2015-06-02 23:40:05 +02:00
parent 52e5df7ebb
commit 98115facfd
66 changed files with 233 additions and 860 deletions

View File

@ -17,7 +17,6 @@ spec:
mountPath: /varlog
- name: containers
mountPath: /var/lib/docker/containers
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:

View File

@ -17,7 +17,6 @@ spec:
mountPath: /varlog
- name: containers
mountPath: /var/lib/docker/containers
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:

View File

@ -878,25 +878,19 @@ func runSchedulerNoPhantomPodsTest(client *client.Client) {
// Delete a pod to free up room.
glog.Infof("Deleting pod %v", bar.Name)
err = client.Pods(api.NamespaceDefault).Delete(bar.Name, api.NewDeleteOptions(1))
err = client.Pods(api.NamespaceDefault).Delete(bar.Name, nil)
if err != nil {
glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
}
time.Sleep(2 * time.Second)
pod.ObjectMeta.Name = "phantom.baz"
baz, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*60, podRunning(client, baz.Namespace, baz.Name)); err != nil {
if pod, perr := client.Pods(api.NamespaceDefault).Get("phantom.bar"); perr == nil {
glog.Fatalf("FAILED: 'phantom.bar' was never deleted: %#v", pod)
} else {
glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err)
}
}
glog.Info("Scheduler doesn't make phantom pods: test passed.")
}

View File

@ -1,28 +0,0 @@
{
"kind": "Pod",
"apiVersion": "v1beta3",
"metadata": {
"name": "slow-pod",
"labels": {
"name": "nettest"
}
},
"spec": {
"containers": [
{
"name": "webserver",
"image": "gcr.io/google_containers/nettest:1.5",
"args": [
"-service=nettest",
"-delay-shutdown=10"
],
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
]
}
]
}
}

View File

@ -1,42 +0,0 @@
{
"kind": "ReplicationController",
"apiVersion": "v1beta3",
"metadata": {
"name": "slow-rc",
"labels": {
"name": "nettest"
}
},
"spec": {
"replicas": 8,
"selector": {
"name": "nettest"
},
"template": {
"metadata": {
"labels": {
"name": "nettest"
}
},
"spec": {
"terminationGracePeriodSeconds": 5,
"containers": [
{
"name": "webserver",
"image": "gcr.io/google_containers/nettest:1.5",
"args": [
"-service=nettest",
"-delay-shutdown=10"
],
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
]
}
]
}
}
}
}

View File

@ -40,9 +40,7 @@ import (
"net"
"net/http"
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
@ -54,7 +52,6 @@ var (
peerCount = flag.Int("peers", 8, "Must find at least this many peers for the test to pass.")
service = flag.String("service", "nettest", "Service to find other network test pods in.")
namespace = flag.String("namespace", "default", "Namespace of this pod. TODO: kubernetes should make this discoverable.")
delayShutdown = flag.Int("delay-shutdown", 0, "Number of seconds to delay shutdown when receiving SIGTERM.")
)
// State tracks the internal state of our little http server.
@ -182,17 +179,6 @@ func main() {
log.Fatalf("Error getting hostname: %v", err)
}
if *delayShutdown > 0 {
termCh := make(chan os.Signal)
signal.Notify(termCh, syscall.SIGTERM)
go func() {
<-termCh
log.Printf("Sleeping %d seconds before exit ...", *delayShutdown)
time.Sleep(time.Duration(*delayShutdown) * time.Second)
os.Exit(0)
}()
}
state := State{
Hostname: hostname,
StillContactingPeers: true,

View File

@ -23,7 +23,7 @@ readonly red=$(tput setaf 1)
readonly green=$(tput setaf 2)
kube::test::clear_all() {
kubectl delete "${kube_flags[@]}" rc,pods --all --grace-period=0
kubectl delete "${kube_flags[@]}" rc,pods --all
}
kube::test::get_object_assert() {

View File

@ -179,11 +179,6 @@ for version in "${kube_api_versions[@]}"; do
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pod valid-pod "${kube_flags[@]}"
# Post-condition: pod is still there, in terminating
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
[[ "$(kubectl get pods "${kube_flags[@]}" | grep Terminating)" ]]
# Command
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0
# Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -199,7 +194,7 @@ for version in "${kube_api_versions[@]}"; do
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete -f examples/limitrange/valid-pod.json "${kube_flags[@]}" --grace-period=0
kubectl delete -f examples/limitrange/valid-pod.json "${kube_flags[@]}"
# Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -215,7 +210,7 @@ for version in "${kube_api_versions[@]}"; do
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}"
# Post-condition: no POD is running
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
@ -247,7 +242,7 @@ for version in "${kube_api_versions[@]}"; do
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 # --all remove all the pods
kubectl delete --all pods "${kube_flags[@]}" # --all remove all the pods
# Post-condition: no POD is running
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
@ -264,7 +259,7 @@ for version in "${kube_api_versions[@]}"; do
# Pre-condition: valid-pod and redis-proxy PODs are running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
# Command
kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # delete multiple pods at once
kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" # delete multiple pods at once
# Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -281,7 +276,7 @@ for version in "${kube_api_versions[@]}"; do
# Pre-condition: valid-pod and redis-proxy PODs are running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
# Command
kubectl stop pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # stop multiple pods at once
kubectl stop pods valid-pod redis-proxy "${kube_flags[@]}" # stop multiple pods at once
# Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -305,7 +300,7 @@ for version in "${kube_api_versions[@]}"; do
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 "${kube_flags[@]}"
kubectl delete pods -lnew-name=new-valid-pod "${kube_flags[@]}"
# Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -337,7 +332,7 @@ for version in "${kube_api_versions[@]}"; do
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 "${kube_flags[@]}"
kubectl delete pods -l'name in (valid-pod-super-sayan)' "${kube_flags[@]}"
# Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -358,7 +353,7 @@ for version in "${kube_api_versions[@]}"; do
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod
# Post-condition: no POD is running
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''

View File

@ -1002,12 +1002,6 @@ func deepCopy_api_ObjectMeta(in ObjectMeta, out *ObjectMeta, c *conversion.Clone
} else {
out.DeletionTimestamp = nil
}
if in.DeletionGracePeriodSeconds != nil {
out.DeletionGracePeriodSeconds = new(int64)
*out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds
} else {
out.DeletionGracePeriodSeconds = nil
}
if in.Labels != nil {
out.Labels = make(map[string]string)
for key, val := range in.Labels {

View File

@ -59,8 +59,6 @@ func BeforeCreate(strategy RESTCreateStrategy, ctx api.Context, obj runtime.Obje
} else {
objectMeta.Namespace = api.NamespaceNone
}
objectMeta.DeletionTimestamp = nil
objectMeta.DeletionGracePeriodSeconds = nil
strategy.PrepareForCreate(obj)
api.FillObjectMetaSystemFields(ctx, objectMeta)
api.GenerateName(strategy, objectMeta)

View File

@ -40,37 +40,12 @@ func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Obje
if strategy == nil {
return false, false, nil
}
objectMeta, _, kerr := objectMetaAndKind(strategy, obj)
_, _, kerr := objectMetaAndKind(strategy, obj)
if kerr != nil {
return false, false, kerr
}
// if the object is already being deleted
if objectMeta.DeletionTimestamp != nil {
// if we are already being deleted, we may only shorten the deletion grace period
// this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set,
// so we force deletion immediately
if objectMeta.DeletionGracePeriodSeconds == nil {
return false, false, nil
}
// only a shorter grace period may be provided by a user
if options.GracePeriodSeconds != nil {
period := int64(*options.GracePeriodSeconds)
if period > *objectMeta.DeletionGracePeriodSeconds {
return false, true, nil
}
objectMeta.DeletionGracePeriodSeconds = &period
options.GracePeriodSeconds = &period
return true, false, nil
}
// graceful deletion is pending, do nothing
options.GracePeriodSeconds = objectMeta.DeletionGracePeriodSeconds
return false, true, nil
}
if !strategy.CheckGracefulDelete(obj, options) {
return false, false, nil
}
objectMeta.DeletionGracePeriodSeconds = options.GracePeriodSeconds
return true, false, nil
}

View File

@ -329,9 +329,7 @@ func (t *Tester) TestDeleteNonExist(createFn func() runtime.Object) {
func (t *Tester) TestDeleteGraceful(createFn func() runtime.Object, expectedGrace int64, wasGracefulFn func() bool) {
t.TestDeleteGracefulHasDefault(createFn(), expectedGrace, wasGracefulFn)
t.TestDeleteGracefulWithValue(createFn(), expectedGrace, wasGracefulFn)
t.TestDeleteGracefulUsesZeroOnNil(createFn(), 0)
t.TestDeleteGracefulExtend(createFn(), expectedGrace, wasGracefulFn)
}
func (t *Tester) TestDeleteNoGraceful(createFn func() runtime.Object, wasGracefulFn func() bool) {
@ -364,99 +362,12 @@ func (t *Tester) TestDeleteGracefulHasDefault(existing runtime.Object, expectedG
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !wasGracefulFn() {
t.Errorf("did not gracefully delete resource")
return
}
object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name)
if err != nil {
if _, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name); err != nil {
t.Errorf("unexpected error, object should exist: %v", err)
return
}
objectMeta, err = api.ObjectMetaFor(object)
if err != nil {
t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, object)
}
if objectMeta.DeletionTimestamp == nil {
t.Errorf("did not set deletion timestamp")
}
if objectMeta.DeletionGracePeriodSeconds == nil {
t.Fatalf("did not set deletion grace period seconds")
}
if *objectMeta.DeletionGracePeriodSeconds != expectedGrace {
t.Errorf("actual grace period does not match expected: %d", *objectMeta.DeletionGracePeriodSeconds)
}
}
func (t *Tester) TestDeleteGracefulWithValue(existing runtime.Object, expectedGrace int64, wasGracefulFn func() bool) {
objectMeta, err := api.ObjectMetaFor(existing)
if err != nil {
t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, existing)
}
ctx := api.WithNamespace(t.TestContext(), objectMeta.Namespace)
_, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace+2))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !wasGracefulFn() {
t.Errorf("did not gracefully delete resource")
}
object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name)
if err != nil {
t.Errorf("unexpected error, object should exist: %v", err)
}
objectMeta, err = api.ObjectMetaFor(object)
if err != nil {
t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, object)
}
if objectMeta.DeletionTimestamp == nil {
t.Errorf("did not set deletion timestamp")
}
if objectMeta.DeletionGracePeriodSeconds == nil {
t.Fatalf("did not set deletion grace period seconds")
}
if *objectMeta.DeletionGracePeriodSeconds != expectedGrace+2 {
t.Errorf("actual grace period does not match expected: %d", *objectMeta.DeletionGracePeriodSeconds)
}
}
func (t *Tester) TestDeleteGracefulExtend(existing runtime.Object, expectedGrace int64, wasGracefulFn func() bool) {
objectMeta, err := api.ObjectMetaFor(existing)
if err != nil {
t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, existing)
}
ctx := api.WithNamespace(t.TestContext(), objectMeta.Namespace)
_, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !wasGracefulFn() {
t.Errorf("did not gracefully delete resource")
}
// second delete duration is ignored
_, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace+2))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name)
if err != nil {
t.Errorf("unexpected error, object should exist: %v", err)
}
objectMeta, err = api.ObjectMetaFor(object)
if err != nil {
t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, object)
}
if objectMeta.DeletionTimestamp == nil {
t.Errorf("did not set deletion timestamp")
}
if objectMeta.DeletionGracePeriodSeconds == nil {
t.Fatalf("did not set deletion grace period seconds")
}
if *objectMeta.DeletionGracePeriodSeconds != expectedGrace {
t.Errorf("actual grace period does not match expected: %d", *objectMeta.DeletionGracePeriodSeconds)
}
}
func (t *Tester) TestDeleteGracefulUsesZeroOnNil(existing runtime.Object, expectedGrace int64) {
@ -471,6 +382,6 @@ func (t *Tester) TestDeleteGracefulUsesZeroOnNil(existing runtime.Object, expect
t.Errorf("unexpected error: %v", err)
}
if _, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name); !errors.IsNotFound(err) {
t.Errorf("unexpected error, object should not exist: %v", err)
t.Errorf("unexpected error, object should exist: %v", err)
}
}

View File

@ -155,7 +155,6 @@ func TestRoundTripTypes(t *testing.T) {
}
func TestEncode_Ptr(t *testing.T) {
grace := int64(30)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": "foo"},
@ -163,8 +162,6 @@ func TestEncode_Ptr(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
}
obj := runtime.Object(pod)

View File

@ -88,15 +88,6 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
j.LabelSelector, _ = labels.Parse("a=b")
j.FieldSelector, _ = fields.ParseSelector("a=b")
},
func(j *api.PodSpec, c fuzz.Continue) {
c.FuzzNoCustom(j)
// has a default value
ttl := int64(30)
if c.RandBool() {
ttl = int64(c.Uint32())
}
j.TerminationGracePeriodSeconds = &ttl
},
func(j *api.PodPhase, c fuzz.Continue) {
statuses := []api.PodPhase{api.PodPending, api.PodRunning, api.PodFailed, api.PodUnknown}
*j = statuses[c.Rand.Intn(len(statuses))]

View File

@ -133,10 +133,6 @@ type ObjectMeta struct {
// will send a hard termination signal to the container.
DeletionTimestamp *util.Time `json:"deletionTimestamp,omitempty"`
// DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion
// was requested. Represents the most recent grace period, and may only be shortened once set.
DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"`
// Labels are key value pairs that may be used to scope and select individual resources.
// Label keys are of the form:
// label-key ::= prefixed-name | name

View File

@ -1087,12 +1087,6 @@ func convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta
} else {
out.DeletionTimestamp = nil
}
if in.DeletionGracePeriodSeconds != nil {
out.DeletionGracePeriodSeconds = new(int64)
*out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds
} else {
out.DeletionGracePeriodSeconds = nil
}
if in.Labels != nil {
out.Labels = make(map[string]string)
for key, val := range in.Labels {
@ -3368,12 +3362,6 @@ func convert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta
} else {
out.DeletionTimestamp = nil
}
if in.DeletionGracePeriodSeconds != nil {
out.DeletionGracePeriodSeconds = new(int64)
*out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds
} else {
out.DeletionGracePeriodSeconds = nil
}
if in.Labels != nil {
out.Labels = make(map[string]string)
for key, val := range in.Labels {

View File

@ -933,12 +933,6 @@ func deepCopy_v1_ObjectMeta(in ObjectMeta, out *ObjectMeta, c *conversion.Cloner
} else {
out.DeletionTimestamp = nil
}
if in.DeletionGracePeriodSeconds != nil {
out.DeletionGracePeriodSeconds = new(int64)
*out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds
} else {
out.DeletionGracePeriodSeconds = nil
}
if in.Labels != nil {
out.Labels = make(map[string]string)
for key, val := range in.Labels {

View File

@ -98,10 +98,6 @@ func addDefaultingFuncs() {
if obj.HostNetwork {
defaultHostNetworkPorts(&obj.Containers)
}
if obj.TerminationGracePeriodSeconds == nil {
period := int64(DefaultTerminationGracePeriodSeconds)
obj.TerminationGracePeriodSeconds = &period
}
},
func(obj *Probe) {
if obj.TimeoutSeconds == 0 {

View File

@ -131,10 +131,6 @@ type ObjectMeta struct {
// will send a hard termination signal to the container.
DeletionTimestamp *util.Time `json:"deletionTimestamp,omitempty" description:"RFC 3339 date and time at which the object will be deleted; populated by the system when a graceful deletion is requested, read-only; if not set, graceful deletion of the object has not been requested"`
// DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion
// was requested. Represents the most recent grace period, and may only be shortened once set.
DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" description:"number of seconds allowed for this object to gracefully terminate before it will be removed from the system; only set when deletionTimestamp is also set, read-only; may only be shortened"`
// Labels are key value pairs that may be used to scope and select individual resources.
// TODO: replace map[string]string with labels.LabelSet type
Labels map[string]string `json:"labels,omitempty" description:"map of string keys and values that can be used to organize and categorize objects; may match selectors of replication controllers and services"`
@ -842,8 +838,6 @@ const (
// DNSDefault indicates that the pod should use the default (as
// determined by kubelet) DNS settings.
DNSDefault DNSPolicy = "Default"
DefaultTerminationGracePeriodSeconds = 30
)
// PodSpec is a description of a pod
@ -858,7 +852,7 @@ type PodSpec struct {
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" description:"optional duration in seconds the pod needs to terminate gracefully; may be decreased in delete request; value must be non-negative integer; the value zero indicates delete immediately; if this value is not set, the default grace period will be used instead; the grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal; set this value longer than the expected cleanup time for your process; defaults to 30 seconds"`
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" description:"optional duration in seconds the pod needs to terminate gracefully; may be decreased in delete request; value must be non-negative integer; the value zero indicates delete immediately; if this value is not set, the default grace period will be used instead; the grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal; set this value longer than the expected cleanup time for your process"`
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" description:"optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers; value must be a positive integer`
// Optional: Set DNS policy. Defaults to "ClusterFirst"
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" description:"DNS policy for containers within the pod; one of 'ClusterFirst' or 'Default'"`

View File

@ -945,12 +945,6 @@ func convert_api_ObjectMeta_To_v1beta3_ObjectMeta(in *api.ObjectMeta, out *Objec
} else {
out.DeletionTimestamp = nil
}
if in.DeletionGracePeriodSeconds != nil {
out.DeletionGracePeriodSeconds = new(int64)
*out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds
} else {
out.DeletionGracePeriodSeconds = nil
}
if in.Labels != nil {
out.Labels = make(map[string]string)
for key, val := range in.Labels {
@ -3040,12 +3034,6 @@ func convert_v1beta3_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.Objec
} else {
out.DeletionTimestamp = nil
}
if in.DeletionGracePeriodSeconds != nil {
out.DeletionGracePeriodSeconds = new(int64)
*out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds
} else {
out.DeletionGracePeriodSeconds = nil
}
if in.Labels != nil {
out.Labels = make(map[string]string)
for key, val := range in.Labels {

View File

@ -937,12 +937,6 @@ func deepCopy_v1beta3_ObjectMeta(in ObjectMeta, out *ObjectMeta, c *conversion.C
} else {
out.DeletionTimestamp = nil
}
if in.DeletionGracePeriodSeconds != nil {
out.DeletionGracePeriodSeconds = new(int64)
*out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds
} else {
out.DeletionGracePeriodSeconds = nil
}
if in.Labels != nil {
out.Labels = make(map[string]string)
for key, val := range in.Labels {

View File

@ -102,10 +102,6 @@ func addDefaultingFuncs() {
if obj.HostNetwork {
defaultHostNetworkPorts(&obj.Containers)
}
if obj.TerminationGracePeriodSeconds == nil {
period := int64(DefaultTerminationGracePeriodSeconds)
obj.TerminationGracePeriodSeconds = &period
}
},
func(obj *Probe) {
if obj.TimeoutSeconds == 0 {

View File

@ -131,10 +131,6 @@ type ObjectMeta struct {
// will send a hard termination signal to the container.
DeletionTimestamp *util.Time `json:"deletionTimestamp,omitempty" description:"RFC 3339 date and time at which the object will be deleted; populated by the system when a graceful deletion is requested, read-only; if not set, graceful deletion of the object has not been requested"`
// DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion
// was requested. Represents the most recent grace period, and may only be shortened once set.
DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" description:"number of seconds allowed for this object to gracefully terminate before it will be removed from the system; only set when deletionTimestamp is also set, read-only; may only be shortened"`
// Labels are key value pairs that may be used to scope and select individual resources.
// TODO: replace map[string]string with labels.LabelSet type
Labels map[string]string `json:"labels,omitempty" description:"map of string keys and values that can be used to organize and categorize objects; may match selectors of replication controllers and services"`
@ -846,8 +842,6 @@ const (
// DNSDefault indicates that the pod should use the default (as
// determined by kubelet) DNS settings.
DNSDefault DNSPolicy = "Default"
DefaultTerminationGracePeriodSeconds = 30
)
// PodSpec is a description of a pod
@ -862,7 +856,7 @@ type PodSpec struct {
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" description:"optional duration in seconds the pod needs to terminate gracefully; may be decreased in delete request; value must be non-negative integer; the value zero indicates delete immediately; if this value is not set, the default grace period will be used instead; the grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal; set this value longer than the expected cleanup time for your process; defaults to 30 seconds"`
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" description:"optional duration in seconds the pod needs to terminate gracefully; may be decreased in delete request; value must be non-negative integer; the value zero indicates delete immediately; if this value is not set, the default grace period will be used instead; the grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal; set this value longer than the expected cleanup time for your process"`
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" description:"optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers; value must be a positive integer`
// Optional: Set DNS policy. Defaults to "ClusterFirst"
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" description:"DNS policy for containers within the pod; one of 'ClusterFirst' or 'Default'"`

View File

@ -252,16 +252,6 @@ func ValidateObjectMetaUpdate(old, meta *api.ObjectMeta) errs.ValidationErrorLis
} else {
meta.CreationTimestamp = old.CreationTimestamp
}
// an object can never remove a deletion timestamp or clear/change grace period seconds
if !old.DeletionTimestamp.IsZero() {
meta.DeletionTimestamp = old.DeletionTimestamp
}
if old.DeletionGracePeriodSeconds != nil && meta.DeletionGracePeriodSeconds == nil {
meta.DeletionGracePeriodSeconds = old.DeletionGracePeriodSeconds
}
if meta.DeletionGracePeriodSeconds != nil && *meta.DeletionGracePeriodSeconds != *old.DeletionGracePeriodSeconds {
allErrs = append(allErrs, errs.NewFieldInvalid("deletionGracePeriodSeconds", meta.DeletionGracePeriodSeconds, "field is immutable; may only be changed via deletion"))
}
// Reject updates that don't specify a resource version
if meta.ResourceVersion == "" {

View File

@ -294,8 +294,7 @@ func filterActivePods(pods []api.Pod) []*api.Pod {
var result []*api.Pod
for i := range pods {
if api.PodSucceeded != pods[i].Status.Phase &&
api.PodFailed != pods[i].Status.Phase &&
pods[i].DeletionTimestamp == nil {
api.PodFailed != pods[i].Status.Phase {
result = append(result, &pods[i])
}
}

View File

@ -204,12 +204,6 @@ func (rm *ReplicationManager) getPodControllers(pod *api.Pod) *api.ReplicationCo
// When a pod is created, enqueue the controller that manages it and update it's expectations.
func (rm *ReplicationManager) addPod(obj interface{}) {
pod := obj.(*api.Pod)
if pod.DeletionTimestamp != nil {
// on a restart of the controller manager, it's possible a new pod shows up in a state that
// is already pending deletion. Prevent the pod from being a creation observation.
rm.deletePod(pod)
return
}
if rc := rm.getPodControllers(pod); rc != nil {
rm.expectations.CreationObserved(rc)
rm.enqueueController(rc)
@ -226,15 +220,6 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) {
}
// TODO: Write a unittest for this case
curPod := cur.(*api.Pod)
if curPod.DeletionTimestamp != nil {
// when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period,
// and after such time has passed, the kubelet actually deletes it from the store. We receive an update
// for modification of the deletion timestamp and expect an rc to create more replicas asap, not wait
// until the kubelet actually deletes the pod. This is different from the Phase of a pod changing, because
// an rc never initiates a phase change, and so is never asleep waiting for the same.
rm.deletePod(curPod)
return
}
if rc := rm.getPodControllers(curPod); rc != nil {
rm.enqueueController(rc)
}

View File

@ -38,7 +38,6 @@ import (
)
func testData() (*api.PodList, *api.ServiceList, *api.ReplicationControllerList) {
grace := int64(30)
pods := &api.PodList{
ListMeta: api.ListMeta{
ResourceVersion: "15",
@ -49,7 +48,6 @@ func testData() (*api.PodList, *api.ServiceList, *api.ReplicationControllerList)
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
{
@ -57,7 +55,6 @@ func testData() (*api.PodList, *api.ServiceList, *api.ReplicationControllerList)
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
},
@ -506,7 +503,6 @@ func TestGetMultipleTypeObjectsWithDirectReference(t *testing.T) {
}
}
func watchTestData() ([]api.Pod, []watch.Event) {
grace := int64(30)
pods := []api.Pod{
{
ObjectMeta: api.ObjectMeta{
@ -517,7 +513,6 @@ func watchTestData() ([]api.Pod, []watch.Event) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
}
@ -533,7 +528,6 @@ func watchTestData() ([]api.Pod, []watch.Event) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
},
@ -548,7 +542,6 @@ func watchTestData() ([]api.Pod, []watch.Event) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
},

View File

@ -29,7 +29,6 @@ import (
)
func TestMerge(t *testing.T) {
grace := int64(30)
tests := []struct {
obj runtime.Object
fragment string
@ -52,7 +51,6 @@ func TestMerge(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
},
@ -121,7 +119,6 @@ func TestMerge(t *testing.T) {
},
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
},

View File

@ -271,12 +271,7 @@ func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.Even
fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&pod.Spec))
fmt.Fprintf(out, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP)
fmt.Fprintf(out, "Labels:\t%s\n", formatLabels(pod.Labels))
if pod.DeletionTimestamp != nil {
fmt.Fprintf(out, "Status:\tTerminating (expires %s)\n", pod.DeletionTimestamp.Time.Format(time.RFC1123Z))
fmt.Fprintf(out, "Termination Grace Period:\t%ss\n", pod.DeletionGracePeriodSeconds)
} else {
fmt.Fprintf(out, "Status:\t%s\n", string(pod.Status.Phase))
}
fmt.Fprintf(out, "Replication Controllers:\t%s\n", printReplicationControllersByLabels(rcs))
fmt.Fprintf(out, "Containers:\n")
describeContainers(pod.Status.ContainerStatuses, out)

View File

@ -83,7 +83,6 @@ func fakeClientWith(testName string, t *testing.T, data map[string]string) Clien
}
func testData() (*api.PodList, *api.ServiceList) {
grace := int64(30)
pods := &api.PodList{
ListMeta: api.ListMeta{
ResourceVersion: "15",
@ -94,7 +93,6 @@ func testData() (*api.PodList, *api.ServiceList) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
{
@ -102,7 +100,6 @@ func testData() (*api.PodList, *api.ServiceList) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
},

View File

@ -128,7 +128,6 @@ func TestHelperCreate(t *testing.T) {
return true
}
grace := int64(30)
tests := []struct {
Resp *http.Response
RespFunc client.HTTPClientFunc
@ -175,7 +174,6 @@ func TestHelperCreate(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
Resp: &http.Response{StatusCode: http.StatusOK, Body: objBody(&api.Status{Status: api.StatusSuccess})},
@ -383,7 +381,6 @@ func TestHelperUpdate(t *testing.T) {
return true
}
grace := int64(30)
tests := []struct {
Resp *http.Response
RespFunc client.HTTPClientFunc
@ -423,7 +420,6 @@ func TestHelperUpdate(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
Overwrite: true,

View File

@ -404,17 +404,13 @@ func printPod(pod *api.Pod, w io.Writer, withNamespace bool) error {
name = pod.Name
}
phase := string(pod.Status.Phase)
if pod.DeletionTimestamp != nil {
phase = "Terminating"
}
_, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
name,
pod.Status.PodIP,
"", "",
podHostString(pod.Spec.NodeName, pod.Status.HostIP),
formatLabels(pod.Labels),
phase,
pod.Status.Phase,
translateTimestamp(pod.CreationTimestamp),
pod.Status.Message,
)

View File

@ -628,7 +628,6 @@ func TestUpdateExistingReplicationController(t *testing.T) {
func TestUpdateWithRetries(t *testing.T) {
codec := testapi.Codec()
grace := int64(30)
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{Name: "rc",
Labels: map[string]string{
@ -648,7 +647,6 @@ func TestUpdateWithRetries(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
},

View File

@ -30,7 +30,6 @@ import (
func noDefault(*api.Pod) error { return nil }
func TestDecodeSinglePod(t *testing.T) {
grace := int64(30)
pod := &api.Pod{
TypeMeta: api.TypeMeta{
APIVersion: "",
@ -43,7 +42,6 @@ func TestDecodeSinglePod(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
Containers: []api.Container{{
Name: "image",
Image: "test/image",
@ -95,7 +93,6 @@ func TestDecodeSinglePod(t *testing.T) {
}
func TestDecodePodList(t *testing.T) {
grace := int64(30)
pod := &api.Pod{
TypeMeta: api.TypeMeta{
APIVersion: "",
@ -108,7 +105,6 @@ func TestDecodePodList(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
Containers: []api.Container{{
Name: "image",
Image: "test/image",

View File

@ -209,8 +209,9 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
for _, ref := range filtered {
name := kubecontainer.GetPodFullName(ref)
if existing, found := pods[name]; found {
if checkAndUpdatePod(existing, ref) {
if !reflect.DeepEqual(existing.Spec, ref.Spec) {
// this is an update
existing.Spec = ref.Spec
updates.Pods = append(updates.Pods, existing)
continue
}
@ -251,8 +252,9 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
name := kubecontainer.GetPodFullName(ref)
if existing, found := oldPods[name]; found {
pods[name] = existing
if checkAndUpdatePod(existing, ref) {
if !reflect.DeepEqual(existing.Spec, ref.Spec) {
// this is an update
existing.Spec = ref.Spec
updates.Pods = append(updates.Pods, existing)
continue
}
@ -323,23 +325,6 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco
return
}
// checkAndUpdatePod updates existing if ref makes a meaningful change and returns true, or
// returns false if there was no update.
func checkAndUpdatePod(existing, ref *api.Pod) bool {
// TODO: it would be better to update the whole object and only preserve certain things
// like the source annotation or the UID (to ensure safety)
if reflect.DeepEqual(existing.Spec, ref.Spec) &&
reflect.DeepEqual(existing.DeletionTimestamp, ref.DeletionTimestamp) &&
reflect.DeepEqual(existing.DeletionGracePeriodSeconds, ref.DeletionGracePeriodSeconds) {
return false
}
// this is an update
existing.Spec = ref.Spec
existing.DeletionTimestamp = ref.DeletionTimestamp
existing.DeletionGracePeriodSeconds = ref.DeletionGracePeriodSeconds
return true
}
// Sync sends a copy of the current state through the update channel.
func (s *podStorage) Sync() {
s.updateLock.Lock()

View File

@ -163,7 +163,6 @@ func TestReadContainerManifestFromFile(t *testing.T) {
func TestReadPodsFromFile(t *testing.T) {
hostname := "random-test-hostname"
grace := int64(30)
var testCases = []struct {
desc string
pod runtime.Object
@ -196,7 +195,6 @@ func TestReadPodsFromFile(t *testing.T) {
NodeName: hostname,
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
Containers: []api.Container{{
Name: "image",
Image: "test/image",
@ -232,7 +230,6 @@ func TestReadPodsFromFile(t *testing.T) {
NodeName: hostname,
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
Containers: []api.Container{{
Name: "image",
Image: "test/image",

View File

@ -120,7 +120,6 @@ func TestExtractInvalidManifest(t *testing.T) {
func TestExtractPodsFromHTTP(t *testing.T) {
hostname := "different-value"
grace := int64(30)
var testCases = []struct {
desc string
pods runtime.Object
@ -157,8 +156,6 @@ func TestExtractPodsFromHTTP(t *testing.T) {
NodeName: hostname,
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
Containers: []api.Container{{
Name: "1",
Image: "foo",
@ -212,8 +209,6 @@ func TestExtractPodsFromHTTP(t *testing.T) {
NodeName: hostname,
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
Containers: []api.Container{{
Name: "1",
Image: "foo",
@ -234,8 +229,6 @@ func TestExtractPodsFromHTTP(t *testing.T) {
NodeName: hostname,
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
Containers: []api.Container{{
Name: "2",
Image: "bar",

View File

@ -163,13 +163,13 @@ func (f *FakeRuntime) SyncPod(pod *api.Pod, _ Pod, _ api.PodStatus, _ []api.Secr
return f.Err
}
func (f *FakeRuntime) KillPod(pod *api.Pod, runningPod Pod) error {
func (f *FakeRuntime) KillPod(pod Pod) error {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "KillPod")
f.KilledPods = append(f.KilledPods, string(runningPod.ID))
for _, c := range runningPod.Containers {
f.KilledPods = append(f.KilledPods, string(pod.ID))
for _, c := range pod.Containers {
f.KilledContainers = append(f.KilledContainers, c.Name)
}
return f.Err

View File

@ -53,8 +53,8 @@ type Runtime interface {
GetPods(all bool) ([]*Pod, error)
// Syncs the running pod into the desired pod.
SyncPod(pod *api.Pod, runningPod Pod, podStatus api.PodStatus, pullSecrets []api.Secret) error
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.
KillPod(pod *api.Pod, runningPod Pod) error
// KillPod kills all the containers of a pod.
KillPod(pod Pod) error
// GetPodStatus retrieves the status of the pod, including the information of
// all containers in the pod.
GetPodStatus(*api.Pod) (*api.PodStatus, error)

View File

@ -54,16 +54,7 @@ const (
maxReasonCacheEntries = 200
// In order to avoid unnecessary SIGKILLs, give every container a minimum grace
// period after SIGTERM. Docker will guarantee the termination, but SIGTERM is
// potentially dangerous.
// TODO: evaluate whether there are scenarios in which SIGKILL is preferable to
// SIGTERM for certain process types, which may justify setting this to 0.
minimumGracePeriodInSeconds = 2
kubernetesNameLabel = "io.kubernetes.pod.name"
kubernetesPodLabel = "io.kubernetes.pod.data"
kubernetesTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
kubernetesContainerLabel = "io.kubernetes.container.name"
)
@ -573,19 +564,12 @@ func (dm *DockerManager) runContainer(
if len(containerHostname) > hostnameMaxLen {
containerHostname = containerHostname[:hostnameMaxLen]
}
// Pod information is recorded on the container as labels to preserve it in the event the pod is deleted
// while the Kubelet is down and there is no information available to recover the pod. This includes
// termination information like the termination grace period and the pre stop hooks.
// TODO: keep these labels up to date if the pod changes
namespacedName := types.NamespacedName{pod.Namespace, pod.Name}
labels := map[string]string{
kubernetesNameLabel: namespacedName.String(),
}
if pod.Spec.TerminationGracePeriodSeconds != nil {
labels[kubernetesTerminationGracePeriodLabel] = strconv.FormatInt(*pod.Spec.TerminationGracePeriodSeconds, 10)
"io.kubernetes.pod.name": namespacedName.String(),
}
if container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
glog.V(1).Infof("Setting preStop hook")
// TODO: This is kind of hacky, we should really just encode the bits we need.
data, err := latest.Codec.Encode(pod)
if err != nil {
@ -1048,12 +1032,12 @@ func (dm *DockerManager) PortForward(pod *kubecontainer.Pod, port uint16, stream
}
// Kills all containers in the specified pod
func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) error {
func (dm *DockerManager) KillPod(pod kubecontainer.Pod) error {
// Send the kills in parallel since they may take a long time. Len + 1 since there
// can be Len errors + the networkPlugin teardown error.
errs := make(chan error, len(runningPod.Containers)+1)
errs := make(chan error, len(pod.Containers)+1)
wg := sync.WaitGroup{}
for _, container := range runningPod.Containers {
for _, container := range pod.Containers {
wg.Add(1)
go func(container *kubecontainer.Container) {
defer util.HandleCrash()
@ -1061,24 +1045,15 @@ func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) err
// TODO: Handle this without signaling the pod infra container to
// adapt to the generic container runtime.
if container.Name == PodInfraContainerName {
err := dm.networkPlugin.TearDownPod(runningPod.Namespace, runningPod.Name, kubeletTypes.DockerID(container.ID))
err := dm.networkPlugin.TearDownPod(pod.Namespace, pod.Name, kubeletTypes.DockerID(container.ID))
if err != nil {
glog.Errorf("Failed tearing down the infra container: %v", err)
errs <- err
}
}
var containerSpec *api.Container
if pod != nil {
for i, c := range pod.Spec.Containers {
if c.Name == container.Name {
containerSpec = &pod.Spec.Containers[i]
break
}
}
}
err := dm.killContainer(container.ID, containerSpec, pod)
err := dm.killContainer(container.ID)
if err != nil {
glog.Errorf("Failed to delete container: %v; Skipping pod %q", err, runningPod.ID)
glog.Errorf("Failed to delete container: %v; Skipping pod %q", err, pod.ID)
errs <- err
}
wg.Done()
@ -1096,11 +1071,8 @@ func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) err
return nil
}
// KillContainerInPod kills a container in the pod. It must be passed either a container ID or a container and pod,
// and will attempt to lookup the other information if missing.
func (dm *DockerManager) KillContainerInPod(containerID types.UID, container *api.Container, pod *api.Pod) error {
switch {
case len(containerID) == 0:
// KillContainerInPod kills a container in the pod.
func (dm *DockerManager) KillContainerInPod(container api.Container, pod *api.Pod) error {
// Locate the container.
pods, err := dm.GetPods(false)
if err != nil {
@ -1111,113 +1083,63 @@ func (dm *DockerManager) KillContainerInPod(containerID types.UID, container *ap
if targetContainer == nil {
return fmt.Errorf("unable to find container %q in pod %q", container.Name, targetPod.Name)
}
containerID = targetContainer.ID
return dm.killContainer(targetContainer.ID)
}
case container == nil || pod == nil:
// Read information about the container from labels
inspect, err := dm.client.InspectContainer(string(containerID))
// TODO(vmarmol): Unexport this as it is no longer used externally.
// KillContainer kills a container identified by containerID.
// Internally, it invokes docker's StopContainer API with a timeout of 10s.
// TODO: Deprecate this function in favor of KillContainerInPod.
func (dm *DockerManager) KillContainer(containerID types.UID) error {
return dm.killContainer(containerID)
}
func (dm *DockerManager) killContainer(containerID types.UID) error {
ID := string(containerID)
glog.V(2).Infof("Killing container with id %q", ID)
inspect, err := dm.client.InspectContainer(ID)
if err != nil {
return err
}
storedPod, storedContainer, cerr := containerAndPodFromLabels(inspect)
if cerr != nil {
glog.Errorf("unable to access pod data from container: %v", err)
var found bool
var preStop string
if inspect != nil && inspect.Config != nil && inspect.Config.Labels != nil {
preStop, found = inspect.Config.Labels[kubernetesPodLabel]
}
if container == nil {
container = storedContainer
}
if pod == nil {
pod = storedPod
}
}
return dm.killContainer(containerID, container, pod)
}
// killContainer accepts a containerID and an optional container or pod containing shutdown policies. Invoke
// KillContainerInPod if information must be retrieved first.
func (dm *DockerManager) killContainer(containerID types.UID, container *api.Container, pod *api.Pod) error {
ID := string(containerID)
name := ID
if container != nil {
name = fmt.Sprintf("%s %s", name, container.Name)
}
if pod != nil {
name = fmt.Sprintf("%s %s/%s", name, pod.Namespace, pod.Name)
}
gracePeriod := int64(minimumGracePeriodInSeconds)
if pod != nil && pod.DeletionGracePeriodSeconds != nil {
gracePeriod = *pod.DeletionGracePeriodSeconds
}
glog.V(2).Infof("Killing container %q with %d second grace period", name, gracePeriod)
if pod != nil && container != nil && container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
glog.V(4).Infof("Running preStop hook for container %q", name)
start := util.Now()
// TODO: timebox PreStop execution to at most gracePeriod
if err := dm.runner.Run(ID, pod, container, container.Lifecycle.PreStop); err != nil {
glog.Errorf("preStop hook for container %q failed: %v", name, err)
}
gracePeriod -= int64(util.Now().Sub(start.Time).Seconds())
}
dm.readinessManager.RemoveReadiness(ID)
// always give containers a minimal shutdown window to avoid unnecessary SIGKILLs
if gracePeriod < minimumGracePeriodInSeconds {
gracePeriod = minimumGracePeriodInSeconds
}
err := dm.client.StopContainer(ID, uint(gracePeriod))
ref, ok := dm.containerRefManager.GetRef(ID)
if !ok {
glog.Warningf("No ref for pod '%q'", name)
if found {
var pod api.Pod
err := latest.Codec.DecodeInto([]byte(preStop), &pod)
if err != nil {
glog.Errorf("Failed to decode prestop: %s, %s", preStop, ID)
} else {
// TODO: pass reason down here, and state, or move this call up the stack.
dm.recorder.Eventf(ref, "killing", "Killing %v", ID)
}
return err
}
var errNoPodOnContainer = fmt.Errorf("no pod information labels on Docker container")
// containerAndPodFromLabels tries to load the appropriate container info off of a Docker container's labels
func containerAndPodFromLabels(inspect *docker.Container) (pod *api.Pod, container *api.Container, err error) {
if inspect == nil && inspect.Config == nil && inspect.Config.Labels == nil {
return nil, nil, errNoPodOnContainer
}
labels := inspect.Config.Labels
// the pod data may not be set
if body, found := labels[kubernetesPodLabel]; found {
pod = &api.Pod{}
if err = latest.Codec.DecodeInto([]byte(body), pod); err == nil {
name := labels[kubernetesContainerLabel]
name := inspect.Config.Labels[kubernetesContainerLabel]
var container *api.Container
for ix := range pod.Spec.Containers {
if pod.Spec.Containers[ix].Name == name {
container = &pod.Spec.Containers[ix]
break
}
}
if container == nil {
err = fmt.Errorf("unable to find container %s in pod %v", name, pod)
if container != nil {
glog.V(1).Infof("Running preStop hook")
if err := dm.runner.Run(ID, &pod, container, container.Lifecycle.PreStop); err != nil {
glog.Errorf("failed to run preStop hook: %v", err)
}
} else {
pod = nil
}
}
// attempt to find the default grace period if we didn't commit a pod, but set the generic metadata
// field (the one used by kill)
if pod == nil {
if period, ok := labels[kubernetesTerminationGracePeriodLabel]; ok {
if seconds, err := strconv.ParseInt(period, 10, 64); err == nil {
pod = &api.Pod{}
pod.DeletionGracePeriodSeconds = &seconds
glog.Errorf("unable to find container %v, %s", pod, name)
}
}
}
return
dm.readinessManager.RemoveReadiness(ID)
err = dm.client.StopContainer(ID, 10)
ref, ok := dm.containerRefManager.GetRef(ID)
if !ok {
glog.Warningf("No ref for pod '%v'", ID)
} else {
// TODO: pass reason down here, and state, or move this call up the stack.
dm.recorder.Eventf(ref, "killing", "Killing %v", ID)
}
return err
}
// Run a single container from a pod. Returns the docker container ID
@ -1245,7 +1167,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
handlerErr := dm.runner.Run(id, pod, container, container.Lifecycle.PostStart)
if handlerErr != nil {
dm.killContainer(types.UID(id), container, pod)
dm.killContainer(types.UID(id))
return kubeletTypes.DockerID(""), fmt.Errorf("failed to call event handler: %v", handlerErr)
}
}
@ -1358,11 +1280,6 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
containersToKeep := make(map[kubeletTypes.DockerID]int)
createPodInfraContainer := false
if pod.DeletionTimestamp != nil {
glog.V(4).Infof("Pod is terminating %q", podFullName)
return PodContainerChangesSpec{}, nil
}
var err error
var podInfraContainerID kubeletTypes.DockerID
var changed bool
@ -1516,7 +1433,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod
}
// Killing phase: if we want to start new infra container, or nothing is running kill everything (including infra container)
err = dm.KillPod(pod, runningPod)
err = dm.KillPod(runningPod)
if err != nil {
return err
}
@ -1526,15 +1443,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod
_, keep := containerChanges.ContainersToKeep[kubeletTypes.DockerID(container.ID)]
if !keep {
glog.V(3).Infof("Killing unwanted container %+v", container)
// attempt to find the appropriate container policy
var podContainer *api.Container
for i, c := range pod.Spec.Containers {
if c.Name == container.Name {
podContainer = &pod.Spec.Containers[i]
break
}
}
err = dm.KillContainerInPod(container.ID, podContainer, pod)
err = dm.KillContainer(container.ID)
if err != nil {
glog.Errorf("Error killing container: %v", err)
}

View File

@ -417,7 +417,7 @@ func TestKillContainerInPod(t *testing.T) {
manager.readinessManager.SetReadiness(c.ID, true)
}
if err := manager.KillContainerInPod("", &pod.Spec.Containers[0], pod); err != nil {
if err := manager.KillContainerInPod(pod.Spec.Containers[0], pod); err != nil {
t.Errorf("unexpected error: %v", err)
}
// Assert the container has been stopped.
@ -490,14 +490,14 @@ func TestKillContainerInPodWithPreStop(t *testing.T) {
manager.readinessManager.SetReadiness(c.ID, true)
}
if err := manager.KillContainerInPod("", &pod.Spec.Containers[0], pod); err != nil {
if err := manager.KillContainerInPod(pod.Spec.Containers[0], pod); err != nil {
t.Errorf("unexpected error: %v", err)
}
// Assert the container has been stopped.
if err := fakeDocker.AssertStopped([]string{containerToKill.ID}); err != nil {
t.Errorf("container was not stopped correctly: %v", err)
}
verifyCalls(t, fakeDocker, []string{"list", "create_exec", "start_exec", "stop"})
verifyCalls(t, fakeDocker, []string{"list", "inspect_container", "create_exec", "start_exec", "stop"})
if !reflect.DeepEqual(expectedCmd, fakeDocker.execCmd) {
t.Errorf("expected: %v, got %v", expectedCmd, fakeDocker.execCmd)
}
@ -534,7 +534,7 @@ func TestKillContainerInPodWithError(t *testing.T) {
manager.readinessManager.SetReadiness(c.ID, true)
}
if err := manager.KillContainerInPod("", &pod.Spec.Containers[0], pod); err == nil {
if err := manager.KillContainerInPod(pod.Spec.Containers[0], pod); err == nil {
t.Errorf("expected error, found nil")
}
@ -1030,7 +1030,7 @@ func TestSyncPodDeletesWithNoPodInfraContainer(t *testing.T) {
verifyCalls(t, fakeDocker, []string{
// Kill the container since pod infra container is not running.
"stop",
"inspect_container", "stop",
// Create pod infra container.
"create", "start", "inspect_container",
// Create container.
@ -1105,7 +1105,7 @@ func TestSyncPodDeletesDuplicate(t *testing.T) {
// Check the pod infra container.
"inspect_container",
// Kill the duplicated container.
"stop",
"inspect_container", "stop",
})
// Expect one of the duplicates to be killed.
if len(fakeDocker.Stopped) != 1 || (fakeDocker.Stopped[0] != "1234" && fakeDocker.Stopped[0] != "4567") {
@ -1159,7 +1159,7 @@ func TestSyncPodBadHash(t *testing.T) {
// Check the pod infra container.
"inspect_container",
// Kill and restart the bad hash container.
"stop", "create", "start",
"inspect_container", "stop", "create", "start",
})
if err := fakeDocker.AssertStopped([]string{"1234"}); err != nil {
@ -1217,7 +1217,7 @@ func TestSyncPodsUnhealthy(t *testing.T) {
// Check the pod infra container.
"inspect_container",
// Kill the unhealthy container.
"stop",
"inspect_container", "stop",
// Restart the unhealthy container.
"create", "start",
})
@ -1426,7 +1426,7 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
// Check the pod infra container.
"inspect_container",
// Stop the last pod infra container.
"stop",
"inspect_container", "stop",
},
[]string{},
[]string{"9876"},

View File

@ -1056,8 +1056,8 @@ func parseResolvConf(reader io.Reader) (nameservers []string, searches []string,
}
// Kill all running containers in a pod (includes the pod infra container).
func (kl *Kubelet) killPod(pod *api.Pod, runningPod kubecontainer.Pod) error {
return kl.containerRuntime.KillPod(pod, runningPod)
func (kl *Kubelet) killPod(pod kubecontainer.Pod) error {
return kl.containerRuntime.KillPod(pod)
}
type empty struct{}
@ -1103,7 +1103,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
// Kill pods we can't run.
err := canRunPod(pod)
if err != nil {
kl.killPod(pod, runningPod)
kl.killPod(runningPod)
return err
}
@ -1418,7 +1418,7 @@ func (kl *Kubelet) killUnwantedPods(desiredPods map[types.UID]empty,
}()
glog.V(1).Infof("Killing unwanted pod %q", pod.Name)
// Stop the containers.
err = kl.killPod(nil, *pod)
err = kl.killPod(*pod)
if err != nil {
glog.Errorf("Failed killing the pod %q: %v", pod.Name, err)
return

View File

@ -1155,7 +1155,7 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
// Create the container.
"create", "start",
// Kill the container since event handler fails.
"stop",
"inspect_container", "stop",
// Get pod status.
"list", "inspect_container", "inspect_container",
// Get pods for deleting orphaned volumes.

View File

@ -671,11 +671,11 @@ func (r *runtime) GetPods(all bool) ([]*kubecontainer.Pod, error) {
}
// KillPod invokes 'systemctl kill' to kill the unit that runs the pod.
func (r *runtime) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) error {
glog.V(4).Infof("Rkt is killing pod: name %q.", runningPod.Name)
func (r *runtime) KillPod(pod kubecontainer.Pod) error {
glog.V(4).Infof("Rkt is killing pod: name %q.", pod.Name)
// TODO(yifan): More graceful stop. Replace with StopUnit and wait for a timeout.
r.systemd.KillUnit(makePodServiceFileName(runningPod.ID), int32(syscall.SIGKILL))
r.systemd.KillUnit(makePodServiceFileName(pod.ID), int32(syscall.SIGKILL))
return r.systemd.Reload()
}
@ -880,7 +880,7 @@ func (r *runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus
if restartPod {
// TODO(yifan): Handle network plugin.
if err := r.KillPod(pod, runningPod); err != nil {
if err := r.KillPod(runningPod); err != nil {
return err
}
if err := r.RunPod(pod); err != nil {

View File

@ -22,7 +22,6 @@ import (
"sync"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
@ -136,25 +135,14 @@ func (s *statusManager) syncBatch() error {
}
// TODO: make me easier to express from client code
statusPod, err = s.kubeClient.Pods(statusPod.Namespace).Get(statusPod.Name)
if errors.IsNotFound(err) {
glog.V(3).Infof("Pod %q was deleted on the server", pod.Name)
return nil
}
if err == nil {
statusPod.Status = status
_, err = s.kubeClient.Pods(pod.Namespace).UpdateStatus(statusPod)
// TODO: handle conflict as a retry, make that easier too.
statusPod, err = s.kubeClient.Pods(pod.Namespace).UpdateStatus(statusPod)
if err == nil {
glog.V(3).Infof("Status for pod %q updated successfully", pod.Name)
if statusPod.DeletionTimestamp == nil || !allTerminated(statusPod.Status.ContainerStatuses) {
return nil
}
if err := s.kubeClient.Pods(statusPod.Namespace).Delete(statusPod.Name, api.NewDeleteOptions(0)); err == nil {
glog.V(3).Infof("Pod %q fully terminated and removed from etcd", statusPod.Name)
return nil
}
}
}
// We failed to update status. In order to make sure we retry next time
@ -163,14 +151,3 @@ func (s *statusManager) syncBatch() error {
s.DeletePodStatus(podFullName)
return fmt.Errorf("error updating status for pod %q: %v", pod.Name, err)
}
// allTerminated returns true if every status is terminated, or the status list
// is empty.
func allTerminated(statuses []api.ContainerStatus) bool {
for _, status := range statuses {
if status.State.Terminated == nil {
return false
}
}
return true
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package namespace
import (
"fmt"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
@ -120,7 +119,7 @@ func deleteAllContent(kubeClient client.Interface, namespace string) (err error)
if err != nil {
return err
}
estimate, err := deletePods(kubeClient, namespace)
err = deletePods(kubeClient, namespace)
if err != nil {
return err
}
@ -144,10 +143,6 @@ func deleteAllContent(kubeClient client.Interface, namespace string) (err error)
if err != nil {
return err
}
if estimate > 0 {
return fmt.Errorf("some resources are being gracefully deleted, estimate %d seconds", estimate)
}
return nil
}
@ -268,25 +263,18 @@ func deleteReplicationControllers(kubeClient client.Interface, ns string) error
return nil
}
func deletePods(kubeClient client.Interface, ns string) (int64, error) {
func deletePods(kubeClient client.Interface, ns string) error {
items, err := kubeClient.Pods(ns).List(labels.Everything(), fields.Everything())
if err != nil {
return 0, err
return err
}
estimate := int64(0)
for i := range items.Items {
if items.Items[i].Spec.TerminationGracePeriodSeconds != nil {
grace := *items.Items[i].Spec.TerminationGracePeriodSeconds
if grace > estimate {
estimate = grace
}
}
err := kubeClient.Pods(ns).Delete(items.Items[i].Name, nil)
if err != nil {
return 0, err
return err
}
}
return estimate, nil
return nil
}
func deleteEvents(kubeClient client.Interface, ns string) error {

View File

@ -722,7 +722,7 @@ func TestDelete(t *testing.T) {
// If the controller is still around after trying to delete either the delete
// failed, or we're deleting it gracefully.
if fakeClient.Data[key].R.Node != nil {
return fakeClient.Data[key].R.Node.TTL != 0
return true
}
return false
}

View File

@ -37,7 +37,6 @@ var testTTL uint64 = 60
func NewTestEventEtcdRegistry(t *testing.T) (*tools.FakeEtcdClient, generic.Registry) {
f := tools.NewFakeEtcdClient(t)
f.HideExpires = true
f.TestIndex = true
h := tools.NewEtcdHelper(f, testapi.Codec(), etcdtest.PathPrefix())

View File

@ -324,7 +324,7 @@ func TestEtcdUpdateStatus(t *testing.T) {
key, _ := storage.KeyFunc(ctx, "foo")
key = etcdtest.AddPrefix(key)
pvStart := validNewPersistentVolume("foo")
fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, pvStart), 0)
fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, pvStart), 1)
pvIn := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{

View File

@ -325,7 +325,7 @@ func TestEtcdUpdateStatus(t *testing.T) {
key, _ := storage.KeyFunc(ctx, "foo")
key = etcdtest.AddPrefix(key)
pvcStart := validNewPersistentVolumeClaim("foo", api.NamespaceDefault)
fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, pvcStart), 0)
fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, pvcStart), 1)
pvc := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{

View File

@ -54,7 +54,6 @@ func newStorage(t *testing.T) (*REST, *BindingREST, *StatusREST, *tools.FakeEtcd
}
func validNewPod() *api.Pod {
grace := int64(30)
return &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "foo",
@ -63,8 +62,6 @@ func validNewPod() *api.Pod {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
Containers: []api.Container{
{
Name: "foo",
@ -135,9 +132,9 @@ func TestDelete(t *testing.T) {
if fakeEtcdClient.Data[key].R.Node == nil {
return false
}
return fakeEtcdClient.Data[key].R.Node.TTL != 0
return fakeEtcdClient.Data[key].R.Node.TTL == 30
}
test.TestDeleteGraceful(createFn, 30, gracefulSetFn)
test.TestDelete(createFn, gracefulSetFn)
}
func expectPod(t *testing.T, out runtime.Object) (*api.Pod, bool) {
@ -1121,7 +1118,6 @@ func TestEtcdUpdateScheduled(t *testing.T) {
},
}), 1)
grace := int64(30)
podIn := api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "foo",
@ -1143,8 +1139,6 @@ func TestEtcdUpdateScheduled(t *testing.T) {
},
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
}
_, _, err := registry.Update(ctx, &podIn)
@ -1185,7 +1179,7 @@ func TestEtcdUpdateStatus(t *testing.T) {
},
},
}
fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, &podStart), 0)
fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, &podStart), 1)
podIn := api.Pod{
ObjectMeta: api.ObjectMeta{
@ -1214,8 +1208,6 @@ func TestEtcdUpdateStatus(t *testing.T) {
expected := podStart
expected.ResourceVersion = "2"
grace := int64(30)
expected.Spec.TerminationGracePeriodSeconds = &grace
expected.Spec.RestartPolicy = api.RestartPolicyAlways
expected.Spec.DNSPolicy = api.DNSClusterFirst
expected.Spec.Containers[0].ImagePullPolicy = api.PullIfNotPresent

View File

@ -81,27 +81,10 @@ func (podStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) fiel
return append(errorList, validation.ValidatePodUpdate(obj.(*api.Pod), old.(*api.Pod))...)
}
// CheckGracefulDelete allows a pod to be gracefully deleted. It updates the DeleteOptions to
// reflect the desired grace value.
// CheckGracefulDelete allows a pod to be gracefully deleted.
func (podStrategy) CheckGracefulDelete(obj runtime.Object, options *api.DeleteOptions) bool {
if options == nil {
return false
}
period := int64(0)
// user has specified a value
if options.GracePeriodSeconds != nil {
period = *options.GracePeriodSeconds
} else {
// use the default value if set, or deletes the pod immediately (0)
pod := obj.(*api.Pod)
if pod.Spec.TerminationGracePeriodSeconds != nil {
period = *pod.Spec.TerminationGracePeriodSeconds
}
}
// ensure the options and the pod are in sync
options.GracePeriodSeconds = &period
return true
}
type podStatusStrategy struct {
podStrategy
@ -113,7 +96,6 @@ func (podStatusStrategy) PrepareForUpdate(obj, old runtime.Object) {
newPod := obj.(*api.Pod)
oldPod := old.(*api.Pod)
newPod.Spec = oldPod.Spec
newPod.DeletionTimestamp = nil
}
func (podStatusStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) fielderrors.ValidationErrorList {

View File

@ -477,7 +477,7 @@ func TestEtcdUpdateStatus(t *testing.T) {
key, _ := registry.KeyFunc(ctx, "foo")
key = etcdtest.AddPrefix(key)
resourcequotaStart := validNewResourceQuota()
fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, resourcequotaStart), 0)
fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, resourcequotaStart), 1)
resourcequotaIn := &api.ResourceQuota{
ObjectMeta: api.ObjectMeta{

View File

@ -308,11 +308,7 @@ func (e *EndpointController) syncService(key string) {
continue
}
if len(pod.Status.PodIP) == 0 {
glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
continue
}
if pod.DeletionTimestamp != nil {
glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
glog.V(4).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
continue
}

View File

@ -556,21 +556,14 @@ func (h *EtcdHelper) GuaranteedUpdate(key string, ptrToType runtime.Object, igno
ttl := uint64(0)
if node != nil {
index = node.ModifiedIndex
if node.TTL != 0 {
if node.TTL > 0 {
ttl = uint64(node.TTL)
}
if node.Expiration != nil && ttl == 0 {
ttl = 1
}
} else if res != nil {
index = res.EtcdIndex
}
if newTTL != nil {
if ttl != 0 && *newTTL == 0 {
// TODO: remove this after we have verified this is no longer an issue
glog.V(4).Infof("GuaranteedUpdate is clearing TTL for %q, may not be intentional", key)
}
ttl = *newTTL
}

View File

@ -117,7 +117,6 @@ func TestExtractToList(t *testing.T) {
},
},
}
grace := int64(30)
expect := api.PodList{
ListMeta: api.ListMeta{ResourceVersion: "10"},
Items: []api.Pod{
@ -126,7 +125,6 @@ func TestExtractToList(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
{
@ -134,7 +132,6 @@ func TestExtractToList(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
{
@ -142,7 +139,6 @@ func TestExtractToList(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
},
@ -204,7 +200,6 @@ func TestExtractToListAcrossDirectories(t *testing.T) {
},
},
}
grace := int64(30)
expect := api.PodList{
ListMeta: api.ListMeta{ResourceVersion: "10"},
Items: []api.Pod{
@ -214,7 +209,6 @@ func TestExtractToListAcrossDirectories(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
{
@ -222,7 +216,6 @@ func TestExtractToListAcrossDirectories(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
{
@ -230,7 +223,6 @@ func TestExtractToListAcrossDirectories(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
},
@ -280,7 +272,6 @@ func TestExtractToListExcludesDirectories(t *testing.T) {
},
},
}
grace := int64(30)
expect := api.PodList{
ListMeta: api.ListMeta{ResourceVersion: "10"},
Items: []api.Pod{
@ -289,7 +280,6 @@ func TestExtractToListExcludesDirectories(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
{
@ -297,7 +287,6 @@ func TestExtractToListExcludesDirectories(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
{
@ -305,7 +294,6 @@ func TestExtractToListExcludesDirectories(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
},
},
@ -325,13 +313,11 @@ func TestExtractObj(t *testing.T) {
fakeClient := NewFakeEtcdClient(t)
helper := NewEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix())
key := etcdtest.AddPrefix("/some/key")
grace := int64(30)
expect := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "foo"},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
}
fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), &expect), 0)

View File

@ -39,7 +39,6 @@ const (
EtcdSet = "set"
EtcdCAS = "compareAndSwap"
EtcdDelete = "delete"
EtcdExpire = "expire"
)
// FilterFunc is a predicate which takes an API object and returns true
@ -406,7 +405,7 @@ func (w *etcdWatcher) sendResult(res *etcd.Response) {
w.sendAdd(res)
case EtcdSet, EtcdCAS:
w.sendModify(res)
case EtcdDelete, EtcdExpire:
case EtcdDelete:
w.sendDelete(res)
default:
glog.Errorf("unknown action: %v", res.Action)

View File

@ -21,7 +21,6 @@ import (
"fmt"
"sort"
"sync"
"time"
"github.com/coreos/go-etcd/etcd"
)
@ -54,8 +53,6 @@ type FakeEtcdClient struct {
TestIndex bool
ChangeIndex uint64
LastSetTTL uint64
// Will avoid setting the expires header on objects to make comparison easier
HideExpires bool
Machines []string
// Will become valid after Watch is called; tester may write to it. Tester may
@ -187,11 +184,6 @@ func (f *FakeEtcdClient) setLocked(key, value string, ttl uint64) (*etcd.Respons
prevResult := f.Data[key]
createdIndex := prevResult.R.Node.CreatedIndex
f.t.Logf("updating %v, index %v -> %v (ttl: %d)", key, createdIndex, i, ttl)
var expires *time.Time
if !f.HideExpires && ttl > 0 {
now := time.Now()
expires = &now
}
result := EtcdResponseWithError{
R: &etcd.Response{
Node: &etcd.Node{
@ -199,7 +191,6 @@ func (f *FakeEtcdClient) setLocked(key, value string, ttl uint64) (*etcd.Respons
CreatedIndex: createdIndex,
ModifiedIndex: i,
TTL: int64(ttl),
Expiration: expires,
},
},
}

View File

@ -132,13 +132,11 @@ func PriorityTwo(pod *api.Pod, podLister algorithm.PodLister, minionLister algor
}
func TestDefaultErrorFunc(t *testing.T) {
grace := int64(30)
testPod := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
},
}
handler := util.FakeHandler{

View File

@ -205,7 +205,7 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error
if strings.Contains(data.Image, jpgExpected) {
return nil
} else {
return errors.New(fmt.Sprintf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected))
return errors.New(fmt.Sprintf("data served up in container is innaccurate, %s didn't contain %s", data, jpgExpected))
}
}
}

View File

@ -82,8 +82,8 @@ var _ = Describe("PD", func() {
By("cleaning up PD-RW test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0))
podClient.Delete(host0Pod.Name, nil)
podClient.Delete(host1Pod.Name, nil)
detachPD(host0Name, diskName)
detachPD(host1Name, diskName)
deletePD(diskName)
@ -96,7 +96,7 @@ var _ = Describe("PD", func() {
expectNoError(waitForPodRunning(c, host0Pod.Name))
By("deleting host0Pod")
expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
expectNoError(podClient.Delete(host0Pod.Name, nil), "Failed to delete host0Pod")
By("submitting host1Pod to kubernetes")
_, err = podClient.Create(host1Pod)
@ -105,7 +105,7 @@ var _ = Describe("PD", func() {
expectNoError(waitForPodRunning(c, host1Pod.Name))
By("deleting host1Pod")
expectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod")
expectNoError(podClient.Delete(host1Pod.Name, nil), "Failed to delete host1Pod")
By(fmt.Sprintf("deleting PD %q", diskName))
for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) {
@ -142,9 +142,9 @@ var _ = Describe("PD", func() {
By("cleaning up PD-RO test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
podClient.Delete(rwPod.Name, api.NewDeleteOptions(0))
podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0))
podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0))
podClient.Delete(rwPod.Name, nil)
podClient.Delete(host0ROPod.Name, nil)
podClient.Delete(host1ROPod.Name, nil)
detachPD(host0Name, diskName)
detachPD(host1Name, diskName)
@ -155,7 +155,7 @@ var _ = Describe("PD", func() {
_, err = podClient.Create(rwPod)
expectNoError(err, "Failed to create rwPod")
expectNoError(waitForPodRunning(c, rwPod.Name))
expectNoError(podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
expectNoError(podClient.Delete(rwPod.Name, nil), "Failed to delete host0Pod")
By("submitting host0ROPod to kubernetes")
_, err = podClient.Create(host0ROPod)
@ -170,10 +170,10 @@ var _ = Describe("PD", func() {
expectNoError(waitForPodRunning(c, host1ROPod.Name))
By("deleting host0ROPod")
expectNoError(podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0ROPod")
expectNoError(podClient.Delete(host0ROPod.Name, nil), "Failed to delete host0ROPod")
By("deleting host1ROPod")
expectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod")
expectNoError(podClient.Delete(host1ROPod.Name, nil), "Failed to delete host1ROPod")
By(fmt.Sprintf("deleting PD %q", diskName))
for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) {

View File

@ -55,7 +55,7 @@ func runLivenessTest(c *client.Client, podDescr *api.Pod, expectRestart bool) {
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
c.Pods(ns).Delete(podDescr.Name, api.NewDeleteOptions(0))
c.Pods(ns).Delete(podDescr.Name, nil)
}()
// Wait until the pod is not pending. (Here we need to check for something other than
@ -101,7 +101,7 @@ func testHostIP(c *client.Client, pod *api.Pod) {
podClient := c.Pods(ns)
By("creating pod")
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
defer podClient.Delete(pod.Name, nil)
_, err = podClient.Create(pod)
if err != nil {
Fail(fmt.Sprintf("Failed to create pod: %v", err))
@ -205,7 +205,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with
// the test so we can ensure that we clean up after
// ourselves
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
defer podClient.Delete(pod.Name, nil)
_, err = podClient.Create(pod)
if err != nil {
Fail(fmt.Sprintf("Failed to create pod: %v", err))
@ -218,7 +218,7 @@ var _ = Describe("Pods", func() {
}
Expect(len(pods.Items)).To(Equal(1))
By("verifying pod creation was observed")
By("veryfying pod creation was observed")
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
@ -228,21 +228,22 @@ var _ = Describe("Pods", func() {
Fail("Timeout while waiting for pod creation")
}
By("deleting the pod gracefully")
if err := podClient.Delete(pod.Name, nil); err != nil {
Fail(fmt.Sprintf("Failed to observe pod deletion: %v", err))
By("deleting the pod")
podClient.Delete(pod.Name, nil)
pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
if err != nil {
Fail(fmt.Sprintf("Failed to delete pod: %v", err))
}
Expect(len(pods.Items)).To(Equal(0))
By("verifying pod deletion was observed")
By("veryfying pod deletion was observed")
deleted := false
timeout := false
var lastPod *api.Pod
timer := time.After(podStartTimeout)
for !deleted && !timeout {
select {
case event, _ := <-w.ResultChan():
if event.Type == watch.Deleted {
lastPod = event.Object.(*api.Pod)
deleted = true
}
case <-timer:
@ -252,14 +253,6 @@ var _ = Describe("Pods", func() {
if !deleted {
Fail("Failed to observe pod deletion")
}
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
if err != nil {
Fail(fmt.Sprintf("Failed to delete pod: %v", err))
}
Expect(len(pods.Items)).To(Equal(0))
})
It("should be updated", func() {
@ -299,7 +292,7 @@ var _ = Describe("Pods", func() {
By("submitting the pod to kubernetes")
defer func() {
By("deleting the pod")
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
podClient.Delete(pod.Name, nil)
}()
pod, err := podClient.Create(pod)
if err != nil {
@ -363,7 +356,7 @@ var _ = Describe("Pods", func() {
},
},
}
defer c.Pods(api.NamespaceDefault).Delete(serverPod.Name, api.NewDeleteOptions(0))
defer c.Pods(api.NamespaceDefault).Delete(serverPod.Name, nil)
_, err := c.Pods(api.NamespaceDefault).Create(serverPod)
if err != nil {
Fail(fmt.Sprintf("Failed to create serverPod: %v", err))
@ -554,7 +547,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with
// the test so we can ensure that we clean up after
// ourselves
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
podClient.Delete(pod.Name)
}()
By("waiting for the pod to start running")
@ -627,7 +620,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with
// the test so we can ensure that we clean up after
// ourselves
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
podClient.Delete(pod.Name)
}()
By("waiting for the pod to start running")

View File

@ -495,24 +495,20 @@ func expectNoError(err error, explain ...interface{}) {
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
// Stops everything from filePath from namespace ns and checks if everything maching selectors from the given namespace is correctly stopped.
func cleanup(filePath string, ns string, selectors ...string) {
By("using delete to clean up resources")
By("using stop to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
runKubectl("stop", "--grace-period=0", "-f", filePath, nsArg)
runKubectl("stop", "-f", filePath, nsArg)
for _, selector := range selectors {
resources := runKubectl("get", "rc,se", "-l", selector, "--no-headers", nsArg)
resources := runKubectl("get", "pods,rc,se", "-l", selector, "--no-headers", nsArg)
if resources != "" {
Failf("Resources left running after stop:\n%s", resources)
}
pods := runKubectl("get", "pods", "-l", selector, nsArg, "-t", "{{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
}
}

View File

@ -240,7 +240,7 @@ var deleteNow string = `
{
"kind": "DeleteOptions",
"apiVersion": "v1beta3",
"gracePeriodSeconds": 0%s
"gracePeriodSeconds": null%s
}
`

View File

@ -88,58 +88,6 @@ func TestExtractObj(t *testing.T) {
})
}
func TestWriteTTL(t *testing.T) {
client := framework.NewEtcdClient()
helper := tools.EtcdHelper{Client: client, Codec: stringCodec{}}
framework.WithEtcdKey(func(key string) {
_, err := client.Set(key, "object", 0)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
s := fakeAPIObject("")
err = helper.GuaranteedUpdate(key, &s, false, func(obj runtime.Object, res tools.ResponseMeta) (runtime.Object, *uint64, error) {
if *(obj.(*fakeAPIObject)) != "object" {
t.Fatalf("unexpected existing object: %v", obj)
}
if res.TTL != 0 {
t.Fatalf("unexpected TTL: %#v", res)
}
ttl := uint64(10)
out := fakeAPIObject("test")
return &out, &ttl, nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if s != "test" {
t.Errorf("unexpected response: %#v", s)
}
if res, err := client.Get(key, false, false); err != nil || res == nil || res.Node.TTL != 10 {
t.Fatalf("unexpected get: %v %#v", err, res)
}
err = helper.GuaranteedUpdate(key, &s, false, func(obj runtime.Object, res tools.ResponseMeta) (runtime.Object, *uint64, error) {
if *(obj.(*fakeAPIObject)) != "test" {
t.Fatalf("unexpected existing object: %v", obj)
}
if res.TTL <= 1 {
t.Fatalf("unexpected TTL: %#v", res)
}
out := fakeAPIObject("test2")
return &out, nil, nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if s != "test2" {
t.Errorf("unexpected response: %#v", s)
}
if res, err := client.Get(key, false, false); err != nil || res == nil || res.Node.TTL <= 1 {
t.Fatalf("unexpected get: %v %#v", err, res)
}
})
}
func TestWatch(t *testing.T) {
client := framework.NewEtcdClient()
helper := tools.NewEtcdHelper(client, testapi.Codec(), etcdtest.PathPrefix())

View File

@ -277,7 +277,7 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
}
err = restClient.Pods(api.NamespaceDefault).Delete(myPod.Name, api.NewDeleteOptions(0))
err = restClient.Pods(api.NamespaceDefault).Delete(myPod.Name, nil)
if err != nil {
t.Errorf("Failed to delete pod: %v", err)
}