Merge pull request #47850 from dcbw/checkpoint-hostnetwork

Automatic merge from submit-queue (batch tested with PRs 47850, 47835, 46197, 47250, 48284)

dockershim: checkpoint HostNetwork property

To ensure kubelet doesn't attempt network teardown on HostNetwork
containers that no longer exist but are still checkpointed, make
sure we preserve the HostNetwork property in checkpoints.  If
the checkpoint indicates the container was a HostNetwork one,
don't tear down the network since that would fail anyway.

Related: https://github.com/kubernetes/kubernetes/issues/44307#issuecomment-299548609

@freehan @kubernetes/sig-network-misc
pull/6/head
Kubernetes Submit Queue 2017-06-29 15:16:37 -07:00 committed by GitHub
commit 1cca341b17
3 changed files with 15 additions and 8 deletions

View File

@ -50,6 +50,7 @@ type PortMapping struct {
// CheckpointData contains all types of data that can be stored in the checkpoint.
type CheckpointData struct {
PortMappings []*PortMapping `json:"port_mappings,omitempty"`
HostNetwork bool `json:"host_network,omitempty"`
}
// PodSandboxCheckpoint is the checkpoint structure for a sandbox

View File

@ -48,18 +48,22 @@ func TestPersistentCheckpointHandler(t *testing.T) {
&port443,
},
}
checkpoint1.Data.HostNetwork = true
checkpoints := []struct {
podSandboxID string
checkpoint *PodSandboxCheckpoint
podSandboxID string
checkpoint *PodSandboxCheckpoint
expectHostNetwork bool
}{
{
"id1",
checkpoint1,
true,
},
{
"id2",
NewPodSandboxCheckpoint("ns2", "sandbox2"),
false,
},
}
@ -72,6 +76,7 @@ func TestPersistentCheckpointHandler(t *testing.T) {
checkpoint, err := handler.GetCheckpoint(tc.podSandboxID)
assert.NoError(t, err)
assert.Equal(t, *checkpoint, *tc.checkpoint)
assert.Equal(t, checkpoint.Data.HostNetwork, tc.expectHostNetwork)
}
// Test ListCheckpoints
keys, err := handler.ListCheckpoints()

View File

@ -171,14 +171,14 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (id
// after us?
func (ds *dockerService) StopPodSandbox(podSandboxID string) error {
var namespace, name string
var hostNetwork bool
var checkpointErr, statusErr error
needNetworkTearDown := false
// Try to retrieve sandbox information from docker daemon or sandbox checkpoint
status, statusErr := ds.PodSandboxStatus(podSandboxID)
if statusErr == nil {
nsOpts := status.GetLinux().GetNamespaces().GetOptions()
needNetworkTearDown = nsOpts != nil && !nsOpts.HostNetwork
hostNetwork = nsOpts != nil && nsOpts.HostNetwork
m := status.GetMetadata()
namespace = m.Namespace
name = m.Name
@ -211,10 +211,8 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error {
} else {
namespace = checkpoint.Namespace
name = checkpoint.Name
hostNetwork = checkpoint.Data != nil && checkpoint.Data.HostNetwork
}
// Always trigger network plugin to tear down
needNetworkTearDown = true
}
// WARNING: The following operations made the following assumption:
@ -226,7 +224,7 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error {
// since it is stopped. With empty network namespcae, CNI bridge plugin will conduct best
// effort clean up and will not return error.
errList := []error{}
if needNetworkTearDown {
if !hostNetwork {
cID := kubecontainer.BuildContainerID(runtimeName, podSandboxID)
err := ds.network.TearDownPod(namespace, name, cID)
if err == nil {
@ -642,6 +640,9 @@ func constructPodSandboxCheckpoint(config *runtimeapi.PodSandboxConfig) *PodSand
Protocol: &proto,
})
}
if nsOptions := config.GetLinux().GetSecurityContext().GetNamespaceOptions(); nsOptions != nil {
checkpoint.Data.HostNetwork = nsOptions.HostNetwork
}
return checkpoint
}