Filter irrelevant pods from quota

pull/6/head
derekwaynecarr 2015-02-24 11:17:41 -05:00
parent 8b627f516f
commit 21031e37c7
4 changed files with 235 additions and 8 deletions

View File

@ -44,6 +44,7 @@ type Fake struct {
EventsList api.EventList
LimitRangesList api.LimitRangeList
ResourceQuotasList api.ResourceQuotaList
ResourceQuotaUsage api.ResourceQuotaUsage
NamespacesList api.NamespaceList
SecretList api.SecretList
Secret api.Secret

View File

@ -29,5 +29,6 @@ type FakeResourceQuotaUsages struct {
func (c *FakeResourceQuotaUsages) Create(resourceQuotaUsage *api.ResourceQuotaUsage) error {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "create-resourceQuotaUsage"})
c.Fake.ResourceQuotaUsage = *resourceQuotaUsage
return nil
}

View File

@ -77,6 +77,34 @@ func (rm *ResourceQuotaManager) synchronize() {
wg.Wait()
}
// FilterQuotaPods eliminates pods that no longer have a cost against the quota
// pods that have a restart policy of always are always returned
// pods that are in a failed state, but have a restart policy of on failure are always returned
// pods that are not in a success state or a failure state are included in quota
func FilterQuotaPods(pods []api.Pod) []api.Pod {
var result []api.Pod
for _, value := range pods {
// a pod that has a restart policy always no matter its state counts against usage
if value.Spec.RestartPolicy.Always != nil {
result = append(result, value)
continue
}
// a failed pod with a restart policy of on failure will count against usage
if api.PodFailed == value.Status.Phase &&
value.Spec.RestartPolicy.OnFailure != nil {
result = append(result, value)
continue
}
// if the pod is not succeeded or failed, then we count it against quota
if api.PodSucceeded != value.Status.Phase &&
api.PodFailed != value.Status.Phase {
result = append(result, value)
continue
}
}
return result
}
// syncResourceQuota runs a complete sync of current status
func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err error) {
@ -96,9 +124,15 @@ func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err
Used: api.ResourceList{},
},
}
// populate the usage with the current observed hard/used limits
usage.Status.Hard = quota.Spec.Hard
usage.Status.Used = quota.Status.Used
// set the hard values supported on the quota
for k, v := range quota.Spec.Hard {
usage.Status.Hard[k] = *v.Copy()
}
// set any last known observed status values for usage
for k, v := range quota.Status.Used {
usage.Status.Used[k] = *v.Copy()
}
set := map[api.ResourceName]bool{}
for k := range usage.Status.Hard {
@ -113,6 +147,8 @@ func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err
}
}
filteredPods := FilterQuotaPods(pods.Items)
// iterate over each resource, and update observation
for k := range usage.Status.Hard {
@ -126,17 +162,17 @@ func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err
switch k {
case api.ResourcePods:
value = resource.NewQuantity(int64(len(pods.Items)), resource.DecimalSI)
value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI)
case api.ResourceMemory:
val := int64(0)
for i := range pods.Items {
val = val + PodMemory(&pods.Items[i]).Value()
for i := range filteredPods {
val = val + PodMemory(&filteredPods[i]).Value()
}
value = resource.NewQuantity(int64(val), resource.DecimalSI)
case api.ResourceCPU:
val := int64(0)
for i := range pods.Items {
val = val + PodCPU(&pods.Items[i]).MilliValue()
for i := range filteredPods {
val = val + PodCPU(&filteredPods[i]).MilliValue()
}
value = resource.NewMilliQuantity(int64(val), resource.DecimalSI)
case api.ResourceServices:

View File

@ -0,0 +1,189 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcequota
import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
func getResourceRequirements(cpu, memory string) api.ResourceRequirements {
res := api.ResourceRequirements{}
res.Limits = api.ResourceList{}
if cpu != "" {
res.Limits[api.ResourceCPU] = resource.MustParse(cpu)
}
if memory != "" {
res.Limits[api.ResourceMemory] = resource.MustParse(memory)
}
return res
}
func TestFilterQuotaPods(t *testing.T) {
pods := []api.Pod{
{
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
Status: api.PodStatus{Phase: api.PodRunning},
},
{
ObjectMeta: api.ObjectMeta{Name: "pod-pending"},
Status: api.PodStatus{Phase: api.PodPending},
},
{
ObjectMeta: api.ObjectMeta{Name: "pod-succeeded"},
Status: api.PodStatus{Phase: api.PodSucceeded},
},
{
ObjectMeta: api.ObjectMeta{Name: "pod-unknown"},
Status: api.PodStatus{Phase: api.PodUnknown},
},
{
ObjectMeta: api.ObjectMeta{Name: "pod-failed"},
Status: api.PodStatus{Phase: api.PodFailed},
},
{
ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-always"},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicy{
Always: &api.RestartPolicyAlways{},
},
},
Status: api.PodStatus{Phase: api.PodFailed},
},
{
ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-on-failure"},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicy{
OnFailure: &api.RestartPolicyOnFailure{},
},
},
Status: api.PodStatus{Phase: api.PodFailed},
},
{
ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-never"},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicy{
Never: &api.RestartPolicyNever{},
},
},
Status: api.PodStatus{Phase: api.PodFailed},
},
}
expectedResults := util.NewStringSet("pod-running",
"pod-pending", "pod-unknown", "pod-failed-with-restart-always",
"pod-failed-with-restart-on-failure")
actualResults := util.StringSet{}
result := FilterQuotaPods(pods)
for i := range result {
actualResults.Insert(result[i].Name)
}
if len(expectedResults) != len(actualResults) || !actualResults.HasAll(expectedResults.List()...) {
t.Errorf("Expected results %v, Actual results %v", expectedResults, actualResults)
}
}
func TestSyncResourceQuota(t *testing.T) {
podList := api.PodList{
Items: []api.Pod{
{
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
Status: api.PodStatus{Phase: api.PodRunning},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "pod-running-2"},
Status: api.PodStatus{Phase: api.PodRunning},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "pod-failed"},
Status: api.PodStatus{Phase: api.PodFailed},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
},
},
},
}
quota := api.ResourceQuota{
Spec: api.ResourceQuotaSpec{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("3"),
api.ResourceMemory: resource.MustParse("100Gi"),
api.ResourcePods: resource.MustParse("5"),
},
},
}
expectedUsage := api.ResourceQuotaUsage{
Status: api.ResourceQuotaStatus{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("3"),
api.ResourceMemory: resource.MustParse("100Gi"),
api.ResourcePods: resource.MustParse("5"),
},
Used: api.ResourceList{
api.ResourceCPU: resource.MustParse("200m"),
api.ResourceMemory: resource.MustParse("2147483648"),
api.ResourcePods: resource.MustParse("2"),
},
},
}
kubeClient := &client.Fake{
PodsList: podList,
}
resourceQuotaManager := NewResourceQuotaManager(kubeClient)
err := resourceQuotaManager.syncResourceQuota(quota)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
usage := kubeClient.ResourceQuotaUsage
// ensure hard and used limits are what we expected
for k, v := range expectedUsage.Status.Hard {
actual := usage.Status.Hard[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
for k, v := range expectedUsage.Status.Used {
actual := usage.Status.Used[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
}