Merge remote-tracking branch 'origin/master' into release-1.14

pull/564/head
Hannes Hoerl 2019-03-15 09:35:50 +00:00
commit 846a82fecc
15 changed files with 2804 additions and 47 deletions

2
.gitignore vendored
View File

@ -111,6 +111,8 @@ kubernetes.tar.gz
# TODO(thockin): uncomment this when we stop committing the generated files.
#zz_generated.*
zz_generated.openapi.go
# TODO(roycaihw): remove this when we stop committing the generated definition
!staging/src/k8s.io/apiextensions-apiserver/pkg/generated/openapi/zz_generated.openapi.go
zz_generated_*_test.go
# make-related metadata

View File

@ -24,7 +24,7 @@ source "${KUBE_ROOT}/hack/lib/util.sh"
CLEAN_PATTERNS=(
"_tmp"
"doc_tmp"
".*/zz_generated.openapi.go"
"((?!staging\/src\/k8s\.io\/apiextensions-apiserver\/pkg\/generated\/openapi).)*/zz_generated.openapi.go"
"test/e2e/generated/bindata.go"
)

View File

@ -101,7 +101,7 @@ pushd "${KUBE_ROOT}" > /dev/null 2>&1
ret=1
fi
if ! _out="$(diff -Naupr -x "BUILD" -x "zz_generated.openapi.go" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_kubetmp}/vendor")"; then
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_kubetmp}/vendor")"; then
echo "Your vendored results are different:" >&2
echo "${_out}" >&2
echo "Godeps Verify failed." >&2

View File

@ -4212,18 +4212,39 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin
// Note that this is annoying: the load balancer disappears from the API immediately, but it is still
// deleting in the background. We get a DependencyViolation until the load balancer has deleted itself
var loadBalancerSGs = aws.StringValueSlice(lb.SecurityGroups)
describeRequest := &ec2.DescribeSecurityGroupsInput{}
filters := []*ec2.Filter{
newEc2Filter("group-id", loadBalancerSGs...),
}
describeRequest.Filters = c.tagging.addFilters(filters)
response, err := c.ec2.DescribeSecurityGroups(describeRequest)
if err != nil {
return fmt.Errorf("error querying security groups for ELB: %q", err)
}
// Collect the security groups to delete
securityGroupIDs := map[string]struct{}{}
for _, securityGroupID := range lb.SecurityGroups {
if *securityGroupID == c.cfg.Global.ElbSecurityGroup {
//We don't want to delete a security group that was defined in the Cloud Configurationn.
for _, sg := range response {
sgID := aws.StringValue(sg.GroupId)
if sgID == c.cfg.Global.ElbSecurityGroup {
//We don't want to delete a security group that was defined in the Cloud Configuration.
continue
}
if aws.StringValue(securityGroupID) == "" {
klog.Warning("Ignoring empty security group in ", service.Name)
if sgID == "" {
klog.Warningf("Ignoring empty security group in %s", service.Name)
continue
}
securityGroupIDs[*securityGroupID] = struct{}{}
if !c.tagging.hasClusterTag(sg.Tags) {
klog.Warningf("Ignoring security group with no cluster tag in %s", service.Name)
continue
}
securityGroupIDs[sgID] = struct{}{}
}
// Loop through and try to delete them

View File

@ -57,6 +57,8 @@ type managerImpl struct {
config Config
// the function to invoke to kill a pod
killPodFunc KillPodFunc
// the function to get the mirror pod by a given statid pod
mirrorPodFunc MirrorPodFunc
// the interface that knows how to do image gc
imageGC ImageGC
// the interface that knows how to do container gc
@ -99,6 +101,7 @@ func NewManager(
summaryProvider stats.SummaryProvider,
config Config,
killPodFunc KillPodFunc,
mirrorPodFunc MirrorPodFunc,
imageGC ImageGC,
containerGC ContainerGC,
recorder record.EventRecorder,
@ -108,6 +111,7 @@ func NewManager(
manager := &managerImpl{
clock: clock,
killPodFunc: killPodFunc,
mirrorPodFunc: mirrorPodFunc,
imageGC: imageGC,
containerGC: containerGC,
config: config,
@ -545,9 +549,19 @@ func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg
// If the pod is marked as critical and static, and support for critical pod annotations is enabled,
// do not evict such pods. Static pods are not re-admitted after evictions.
// https://github.com/kubernetes/kubernetes/issues/40573 has more details.
if kubelettypes.IsCriticalPod(pod) && kubepod.IsStaticPod(pod) {
klog.Errorf("eviction manager: cannot evict a critical static pod %s", format.Pod(pod))
return false
if kubepod.IsStaticPod(pod) {
// need mirrorPod to check its "priority" value; static pod doesn't carry it
if mirrorPod, ok := m.mirrorPodFunc(pod); ok && mirrorPod != nil {
// skip only when it's a static and critical pod
if kubelettypes.IsCriticalPod(mirrorPod) {
klog.Errorf("eviction manager: cannot evict a critical static pod %s", format.Pod(pod))
return false
}
} else {
// we should never hit this
klog.Errorf("eviction manager: cannot get mirror pod from static pod %s, so cannot evict it", format.Pod(pod))
return false
}
}
status := v1.PodStatus{
Phase: v1.PodFailed,

View File

@ -1164,6 +1164,11 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
activePodsFunc := func() []*v1.Pod {
return pods
}
mirrorPodFunc := func(staticPod *v1.Pod) (*v1.Pod, bool) {
mirrorPod := staticPod.DeepCopy()
mirrorPod.Annotations[kubelettypes.ConfigSourceAnnotationKey] = kubelettypes.ApiserverSource
return mirrorPod, true
}
fakeClock := clock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{}
@ -1198,6 +1203,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
manager := &managerImpl{
clock: fakeClock,
killPodFunc: podKiller.killPodNow,
mirrorPodFunc: mirrorPodFunc,
imageGC: diskGC,
containerGC: diskGC,
config: config,

View File

@ -94,6 +94,10 @@ type ContainerGC interface {
// gracePeriodOverride - the grace period override to use instead of what is on the pod spec
type KillPodFunc func(pod *v1.Pod, status v1.PodStatus, gracePeriodOverride *int64) error
// MirrorPodFunc returns the mirror pod for the given static pod and
// whether it was known to the pod manager.
type MirrorPodFunc func(*v1.Pod) (*v1.Pod, bool)
// ActivePodsFunc returns pods bound to the kubelet that are active (i.e. non-terminal state)
type ActivePodsFunc func() []*v1.Pod

View File

@ -822,7 +822,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.podKillingCh = make(chan *kubecontainer.PodPair, podKillingChannelCapacity)
// setup eviction manager
evictionManager, evictionAdmitHandler := eviction.NewManager(klet.resourceAnalyzer, evictionConfig, killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock)
evictionManager, evictionAdmitHandler := eviction.NewManager(klet.resourceAnalyzer, evictionConfig, killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.podManager.GetMirrorPodByPod, klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock)
klet.evictionManager = evictionManager
klet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)

View File

@ -89,19 +89,7 @@ func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
// GetActivePods returns non-terminal pods
func (kl *Kubelet) GetActivePods() []*v1.Pod {
allPods, mirrorPods := kl.podManager.GetPodsAndMirrorPods()
mirrorPodSet := make(map[string]*v1.Pod)
for _, p := range mirrorPods {
mirrorPodSet[kubecontainer.GetPodFullName(p)] = p
}
for i := range allPods {
podFullName := kubecontainer.GetPodFullName(allPods[i])
// replace static pod with mirror pod as some info (e.g. spec.Priority)
// is needed to make further decisions (e.g. eviction)
if mirrorPod, ok := mirrorPodSet[podFullName]; ok {
allPods[i] = mirrorPod
}
}
allPods := kl.podManager.GetPods()
activePods := kl.filterOutTerminatedPods(allPods)
return activePods
}

View File

@ -305,7 +305,7 @@ func newTestKubeletWithImageList(
Namespace: "",
}
// setup eviction manager
evictionManager, evictionAdmitHandler := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{}, killPodNow(kubelet.podWorkers, fakeRecorder), kubelet.imageManager, kubelet.containerGC, fakeRecorder, nodeRef, kubelet.clock)
evictionManager, evictionAdmitHandler := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{}, killPodNow(kubelet.podWorkers, fakeRecorder), kubelet.podManager.GetMirrorPodByPod, kubelet.imageManager, kubelet.containerGC, fakeRecorder, nodeRef, kubelet.clock)
kubelet.evictionManager = evictionManager
kubelet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)

View File

@ -120,7 +120,8 @@ func TestRunOnce(t *testing.T) {
fakeKillPodFunc := func(pod *v1.Pod, podStatus v1.PodStatus, gracePeriodOverride *int64) error {
return nil
}
evictionManager, evictionAdmitHandler := eviction.NewManager(kb.resourceAnalyzer, eviction.Config{}, fakeKillPodFunc, nil, nil, kb.recorder, nodeRef, kb.clock)
fakeMirrodPodFunc := func(*v1.Pod) (*v1.Pod, bool) { return nil, false }
evictionManager, evictionAdmitHandler := eviction.NewManager(kb.resourceAnalyzer, eviction.Config{}, fakeKillPodFunc, fakeMirrodPodFunc, nil, nil, kb.recorder, nodeRef, kb.clock)
kb.evictionManager = evictionManager
kb.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)

File diff suppressed because it is too large Load Diff

View File

@ -40,6 +40,8 @@ import (
. "github.com/onsi/gomega"
)
const nobodyUser = int64(65534)
var _ = SIGDescribe("PodSecurityPolicy", func() {
f := framework.NewDefaultFramework("podsecuritypolicy")
f.SkipPrivilegedPSPBinding = true
@ -186,6 +188,21 @@ func testPrivilegedPods(tester func(pod *v1.Pod)) {
sysadmin.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
tester(sysadmin)
})
By("Running a RunAsGroup pod", func() {
sysadmin := restrictedPod("runasgroup")
gid := int64(0)
sysadmin.Spec.Containers[0].SecurityContext.RunAsGroup = &gid
tester(sysadmin)
})
By("Running a RunAsUser pod", func() {
sysadmin := restrictedPod("runasuser")
uid := int64(0)
sysadmin.Spec.Containers[0].SecurityContext.RunAsUser = &uid
tester(sysadmin)
})
}
// createAndBindPSP creates a PSP in the policy API group.
@ -244,7 +261,8 @@ func restrictedPod(name string) *v1.Pod {
Image: imageutils.GetPauseImageName(),
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: boolPtr(false),
RunAsUser: utilpointer.Int64Ptr(65534),
RunAsUser: utilpointer.Int64Ptr(nobodyUser),
RunAsGroup: utilpointer.Int64Ptr(nobodyUser),
},
}},
},
@ -270,6 +288,9 @@ func privilegedPSP(name string) *policy.PodSecurityPolicy {
RunAsUser: policy.RunAsUserStrategyOptions{
Rule: policy.RunAsUserStrategyRunAsAny,
},
RunAsGroup: &policy.RunAsGroupStrategyOptions{
Rule: policy.RunAsGroupStrategyRunAsAny,
},
SELinux: policy.SELinuxStrategyOptions{
Rule: policy.SELinuxStrategyRunAsAny,
},
@ -325,6 +346,11 @@ func restrictedPSP(name string) *policy.PodSecurityPolicy {
RunAsUser: policy.RunAsUserStrategyOptions{
Rule: policy.RunAsUserStrategyMustRunAsNonRoot,
},
RunAsGroup: &policy.RunAsGroupStrategyOptions{
Rule: policy.RunAsGroupStrategyMustRunAs,
Ranges: []policy.IDRange{
{Min: nobodyUser, Max: nobodyUser}},
},
SELinux: policy.SELinuxStrategyOptions{
Rule: policy.SELinuxStrategyRunAsAny,
},

View File

@ -54,6 +54,7 @@ go_library(
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/coordination/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -62,6 +63,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",

View File

@ -22,7 +22,10 @@ import (
coordv1beta1 "k8s.io/api/coordination/v1beta1"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
@ -103,35 +106,45 @@ var _ = framework.KubeDescribe("NodeLease", func() {
// enough time has passed. So for here, keep checking the time diff
// between 2 NodeStatus report, until it is longer than lease duration
// (the same as nodeMonitorGracePeriod), or it doesn't change for at least leaseDuration
lastHeartbeatTime := getReadyConditionHeartbeatTime(f.ClientSet, nodeName)
lastHeartbeatTime, lastStatus := getHeartbeatTimeAndStatus(f.ClientSet, nodeName)
lastObserved := time.Now()
Eventually(func() error {
currentHeartbeatTime := getReadyConditionHeartbeatTime(f.ClientSet, nodeName)
err = wait.Poll(time.Second, 5*time.Minute, func() (bool, error) {
currentHeartbeatTime, currentStatus := getHeartbeatTimeAndStatus(f.ClientSet, nodeName)
currentObserved := time.Now()
switch {
case currentHeartbeatTime == lastHeartbeatTime:
if currentHeartbeatTime == lastHeartbeatTime {
if currentObserved.Sub(lastObserved) > 2*leaseDuration {
// heartbeat hasn't changed while watching for at least 2*leaseDuration, success!
framework.Logf("node status heartbeat is unchanged for %s, was waiting for at least %s, success!", currentObserved.Sub(lastObserved), 2*leaseDuration)
return nil
return true, nil
}
framework.Logf("node status heartbeat is unchanged for %s, waiting for %s", currentObserved.Sub(lastObserved), 2*leaseDuration)
return fmt.Errorf("node status heartbeat is unchanged for %s, waiting for %s", currentObserved.Sub(lastObserved), 2*leaseDuration)
return false, nil
}
case currentHeartbeatTime != lastHeartbeatTime:
if currentHeartbeatTime.Sub(lastHeartbeatTime) > leaseDuration {
// heartbeat time changed, but the diff was greater than leaseDuration, success!
framework.Logf("node status heartbeat changed in %s, was waiting for at least %s, success!", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
return nil
}
if currentHeartbeatTime.Sub(lastHeartbeatTime) >= leaseDuration {
// heartbeat time changed, but the diff was greater than leaseDuration, success!
framework.Logf("node status heartbeat changed in %s, was waiting for at least %s, success!", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
return true, nil
}
if !apiequality.Semantic.DeepEqual(lastStatus, currentStatus) {
// heartbeat time changed, but there were relevant changes in the status, keep waiting
framework.Logf("node status heartbeat changed in %s (with other status changes), waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
framework.Logf("%s", diff.ObjectReflectDiff(lastStatus, currentStatus))
lastHeartbeatTime = currentHeartbeatTime
lastObserved = currentObserved
framework.Logf("node status heartbeat changed in %s, waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
return fmt.Errorf("node status heartbeat changed in %s, waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
lastStatus = currentStatus
return false, nil
}
return nil
}, 5*time.Minute, time.Second).Should(BeNil())
// heartbeat time changed, with no other status changes, in less time than we expected, so fail.
return false, fmt.Errorf("node status heartbeat changed in %s (with no other status changes), was waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
})
// a timeout is acceptable, since it means we waited 5 minutes and didn't see any unwarranted node status updates
if err != nil && err != wait.ErrWaitTimeout {
Expect(err).NotTo(HaveOccurred(), "error waiting for infrequent nodestatus update")
}
By("verify node is still in ready status even though node status report is infrequent")
// This check on node status is only meaningful when this e2e test is
@ -145,12 +158,14 @@ var _ = framework.KubeDescribe("NodeLease", func() {
})
})
func getReadyConditionHeartbeatTime(clientSet clientset.Interface, nodeName string) time.Time {
func getHeartbeatTimeAndStatus(clientSet clientset.Interface, nodeName string) (time.Time, corev1.NodeStatus) {
node, err := clientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).To(BeNil())
_, readyCondition := testutils.GetNodeCondition(&node.Status, corev1.NodeReady)
Expect(readyCondition.Status).To(Equal(corev1.ConditionTrue))
return readyCondition.LastHeartbeatTime.Time
heartbeatTime := readyCondition.LastHeartbeatTime.Time
readyCondition.LastHeartbeatTime = metav1.Time{}
return heartbeatTime, node.Status
}
func expectLease(lease *coordv1beta1.Lease, nodeName string) error {