mirror of https://github.com/k3s-io/k3s
Merge pull request #35122 from gmarek/clientset
Automatic merge from submit-queue Use clientset in GetReadySchedulableNodesOrDiepull/6/head
commit
26aa5a9e2f
|
@ -60,7 +60,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
c = f.Client
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
nodeCount = len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/chaosmonkey"
|
||||
|
@ -59,7 +60,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
|
|||
v, err := realVersion(framework.TestContext.UpgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
|
||||
framework.ExpectNoError(checkNodesVersions(f.Client, v))
|
||||
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
|
||||
})
|
||||
cm.Register(func(sem *chaosmonkey.Semaphore) {
|
||||
// Close over f.
|
||||
|
@ -73,7 +74,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
|
|||
v, err := realVersion(framework.TestContext.UpgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
|
||||
framework.ExpectNoError(checkNodesVersions(f.Client, v))
|
||||
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
|
||||
})
|
||||
cm.Register(func(sem *chaosmonkey.Semaphore) {
|
||||
// Close over f.
|
||||
|
@ -91,7 +92,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
|
|||
framework.ExpectNoError(framework.MasterUpgrade(v))
|
||||
framework.ExpectNoError(checkMasterVersion(f.Client, v))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
|
||||
framework.ExpectNoError(checkNodesVersions(f.Client, v))
|
||||
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
|
||||
})
|
||||
cm.Register(func(sem *chaosmonkey.Semaphore) {
|
||||
// Close over f.
|
||||
|
@ -107,7 +108,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
|
|||
framework.ExpectNoError(framework.MasterUpgrade(v))
|
||||
framework.ExpectNoError(checkMasterVersion(f.Client, v))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
|
||||
framework.ExpectNoError(checkNodesVersions(f.Client, v))
|
||||
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
|
||||
})
|
||||
cm.Register(func(sem *chaosmonkey.Semaphore) {
|
||||
// Close over f.
|
||||
|
@ -146,7 +147,7 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD
|
|||
// Setup
|
||||
serviceName := "service-test"
|
||||
|
||||
jig := NewServiceTestJig(f.Client, serviceName)
|
||||
jig := NewServiceTestJig(f.Client, f.ClientSet, serviceName)
|
||||
// nodeIP := pickNodeIP(jig.Client) // for later
|
||||
|
||||
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name)
|
||||
|
@ -209,8 +210,8 @@ func checkMasterVersion(c *client.Client, want string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func checkNodesVersions(c *client.Client, want string) error {
|
||||
l := framework.GetReadySchedulableNodesOrDie(c)
|
||||
func checkNodesVersions(cs clientset.Interface, want string) error {
|
||||
l := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
for _, n := range l.Items {
|
||||
// We do prefix trimming and then matching because:
|
||||
// want looks like: 0.19.3-815-g50e67d4
|
||||
|
|
|
@ -300,7 +300,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
|
|||
|
||||
It("Kubelet should not restart containers across restart", func() {
|
||||
|
||||
nodeIPs, err := getNodePublicIps(f.Client)
|
||||
nodeIPs, err := getNodePublicIps(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
preRestarts, badNodes := getContainerRestarts(f.Client, ns, labelSelector)
|
||||
if preRestarts != 0 {
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
@ -68,7 +69,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
|||
} else {
|
||||
framework.Logf("unable to dump pods: %v", err)
|
||||
}
|
||||
err := clearDaemonSetNodeLabels(f.Client)
|
||||
err := clearDaemonSetNodeLabels(f.Client, f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
@ -83,7 +84,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
|||
BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
c = f.Client
|
||||
err := clearDaemonSetNodeLabels(c)
|
||||
err := clearDaemonSetNodeLabels(c, f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
@ -180,7 +181,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
|||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
By("Change label of node, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
|
||||
|
@ -248,7 +249,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
|||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
By("Change label of node, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
|
||||
|
@ -284,8 +285,8 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
|
|||
return daemonSetLabels, otherLabels
|
||||
}
|
||||
|
||||
func clearDaemonSetNodeLabels(c *client.Client) error {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(c)
|
||||
func clearDaemonSetNodeLabels(c *client.Client, cs clientset.Interface) error {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
for _, node := range nodeList.Items {
|
||||
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
|
||||
if err != nil {
|
||||
|
|
|
@ -309,7 +309,7 @@ func makeConfigMapVolumes(configMapNames []string) (volumes []api.Volume, volume
|
|||
|
||||
func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volumeMounts []api.VolumeMount, podCount int32) {
|
||||
rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
||||
targetNode := nodeList.Items[0]
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() {
|
|||
f := framework.NewDefaultFramework("petstore")
|
||||
|
||||
It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
nodeCount = len(nodes.Items)
|
||||
|
||||
loadGenerators := nodeCount
|
||||
|
|
|
@ -261,7 +261,7 @@ func (f *Framework) BeforeEach() {
|
|||
f.logsSizeWaitGroup = sync.WaitGroup{}
|
||||
f.logsSizeWaitGroup.Add(1)
|
||||
f.logsSizeCloseChannel = make(chan bool)
|
||||
f.logsSizeVerifier = NewLogsVerifier(f.Client, f.logsSizeCloseChannel)
|
||||
f.logsSizeVerifier = NewLogsVerifier(f.Client, f.ClientSet, f.logsSizeCloseChannel)
|
||||
go func() {
|
||||
f.logsSizeVerifier.Run()
|
||||
f.logsSizeWaitGroup.Done()
|
||||
|
@ -659,7 +659,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
|||
|
||||
// CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
|
||||
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n api.Node) api.PodSpec, maxCount int) map[string]string {
|
||||
nodes := GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodes := GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
labels := map[string]string{
|
||||
"app": appName + "-pod",
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
|
@ -65,6 +66,7 @@ type LogSizeGatherer struct {
|
|||
// It oversees a <workersNo> workers which do the gathering.
|
||||
type LogsSizeVerifier struct {
|
||||
client *client.Client
|
||||
clientset clientset.Interface
|
||||
stopChannel chan bool
|
||||
// data stores LogSizeData groupped per IP and log_path
|
||||
data *LogsSizeData
|
||||
|
@ -142,8 +144,8 @@ func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int
|
|||
}
|
||||
|
||||
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
|
||||
func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier {
|
||||
nodeAddresses, err := NodeSSHHosts(c)
|
||||
func NewLogsVerifier(c *client.Client, cs clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
|
||||
nodeAddresses, err := NodeSSHHosts(cs)
|
||||
ExpectNoError(err)
|
||||
masterAddress := GetMasterHost() + ":22"
|
||||
|
||||
|
@ -152,6 +154,7 @@ func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier
|
|||
|
||||
verifier := &LogsSizeVerifier{
|
||||
client: c,
|
||||
clientset: cs,
|
||||
stopChannel: stopChannel,
|
||||
data: prepareData(masterAddress, nodeAddresses),
|
||||
masterAddress: masterAddress,
|
||||
|
|
|
@ -432,7 +432,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
|
|||
|
||||
By("Getting node addresses")
|
||||
ExpectNoError(WaitForAllNodesSchedulable(config.f.Client))
|
||||
nodeList := GetReadySchedulableNodesOrDie(config.f.Client)
|
||||
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
|
||||
config.ExternalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
|
||||
if len(config.ExternalAddrs) < 2 {
|
||||
// fall back to legacy IPs
|
||||
|
@ -483,7 +483,7 @@ func shuffleNodes(nodes []api.Node) []api.Node {
|
|||
|
||||
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
|
||||
ExpectNoError(WaitForAllNodesSchedulable(config.f.Client))
|
||||
nodeList := GetReadySchedulableNodesOrDie(config.f.Client)
|
||||
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
|
||||
|
||||
// To make this test work reasonably fast in large clusters,
|
||||
// we limit the number of NetProxyPods to no more than 100 ones
|
||||
|
|
|
@ -2336,11 +2336,11 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event {
|
|||
}
|
||||
|
||||
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
|
||||
func waitListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
|
||||
func waitListSchedulableNodesOrDie(c clientset.Interface) *api.NodeList {
|
||||
var nodes *api.NodeList
|
||||
var err error
|
||||
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
|
||||
nodes, err = c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
|
||||
nodes, err = c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector()})
|
||||
return err == nil, nil
|
||||
|
@ -2365,7 +2365,7 @@ func isNodeSchedulable(node *api.Node) bool {
|
|||
// 1) Needs to be schedulable.
|
||||
// 2) Needs to be ready.
|
||||
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
|
||||
func GetReadySchedulableNodesOrDie(c *client.Client) (nodes *api.NodeList) {
|
||||
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *api.NodeList) {
|
||||
nodes = waitListSchedulableNodesOrDie(c)
|
||||
// previous tests may have cause failures of some nodes. Let's skip
|
||||
// 'Not Ready' nodes, just in case (there is no need to fail the test).
|
||||
|
@ -3254,7 +3254,7 @@ func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []strin
|
|||
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node.
|
||||
// It returns an error if it can't find an external IP for every node, though it still returns all
|
||||
// hosts that it found in that case.
|
||||
func NodeSSHHosts(c *client.Client) ([]string, error) {
|
||||
func NodeSSHHosts(c clientset.Interface) ([]string, error) {
|
||||
nodelist := waitListSchedulableNodesOrDie(c)
|
||||
|
||||
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
|
||||
|
|
|
@ -98,7 +98,7 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
|
|||
// label with the given node pool name.
|
||||
func nodesWithPoolLabel(f *framework.Framework, poolName string) int {
|
||||
nodeCount := 0
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
for _, node := range nodeList.Items {
|
||||
if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName {
|
||||
nodeCount++
|
||||
|
|
|
@ -137,7 +137,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
|
|||
|
||||
BeforeEach(func() {
|
||||
c = f.Client
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
numNodes = len(nodes.Items)
|
||||
nodeNames = sets.NewString()
|
||||
// If there are a lot of nodes, we don't want to use all of them
|
||||
|
|
|
@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() {
|
|||
if err := framework.WaitForPodsSuccess(f.Client, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil {
|
||||
framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adulterated", imagePrePullingLongTimeout)
|
||||
}
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
nodeNames = sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
nodeNames.Insert(node.Name)
|
||||
|
|
|
@ -107,7 +107,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c))
|
||||
|
||||
ns = f.Namespace.Name
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
nodeCount = len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("Logging soak [Performance] [Slow] [Disruptive]",
|
|||
// was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs.
|
||||
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {
|
||||
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
totalPods := len(nodes.Items)
|
||||
Expect(totalPods).NotTo(Equal(0))
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
|
|||
client := f.Client
|
||||
framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready")
|
||||
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
|
||||
const ns = "static-pods"
|
||||
numpods := int32(len(nodelist.Items))
|
||||
|
|
|
@ -49,7 +49,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {
|
|||
|
||||
It("should grab all metrics from a Kubelet.", func() {
|
||||
By("Proxying to Node through the API server")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(nodes.Items).NotTo(BeEmpty())
|
||||
response, err := grabber.GrabFromKubelet(nodes.Items[0].Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
|
|
@ -49,7 +49,7 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
|
|||
maxBandwidthBits := gceBandwidthBitsEstimate
|
||||
|
||||
It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
totalPods := len(nodes.Items)
|
||||
// for a single service, we expect to divide bandwidth between the network. Very crude estimate.
|
||||
expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
|
||||
|
@ -110,7 +110,7 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
|
|||
|
||||
// Calculate expected number of clients based on total nodes.
|
||||
expectedCli := func() int {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
return int(math.Min(float64(len(nodes.Items)), float64(numClient)))
|
||||
}()
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
@ -67,13 +68,15 @@ const (
|
|||
// Flaky issue #20015. We have no clear path for how to test this functionality in a non-flaky way.
|
||||
var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
|
||||
var c *client.Client
|
||||
var cs clientset.Interface
|
||||
var unfilledNodeName, recoveredNodeName string
|
||||
f := framework.NewDefaultFramework("node-outofdisk")
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.Client
|
||||
cs = f.ClientSet
|
||||
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(c)
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
|
||||
// Skip this test on small clusters. No need to fail since it is not a use
|
||||
// case that any cluster of small size needs to support.
|
||||
|
@ -87,7 +90,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
|
|||
|
||||
AfterEach(func() {
|
||||
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(c)
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
Expect(len(nodelist.Items)).ToNot(BeZero())
|
||||
for _, node := range nodelist.Items {
|
||||
if unfilledNodeName == node.Name || recoveredNodeName == node.Name {
|
||||
|
@ -150,7 +153,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
|
|||
}
|
||||
})
|
||||
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(c)
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
Expect(len(nodelist.Items)).To(BeNumerically(">", 1))
|
||||
|
||||
nodeToRecover := nodelist.Items[1]
|
||||
|
|
|
@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
|
||||
podClient = f.Client.Pods(f.Namespace.Name)
|
||||
nodeClient = f.Client.Nodes()
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
|
||||
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")
|
||||
|
||||
|
|
|
@ -285,7 +285,7 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func(
|
|||
|
||||
It("should recreate evicted petset", func() {
|
||||
By("looking for a node to schedule pet set and pod")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
node := nodes.Items[0]
|
||||
|
||||
By("creating pod with conflicting port in namespace " + f.Namespace.Name)
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/net"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -300,9 +300,9 @@ func truncate(b []byte, maxLen int) []byte {
|
|||
return b2
|
||||
}
|
||||
|
||||
func pickNode(c *client.Client) (string, error) {
|
||||
func pickNode(cs clientset.Interface) (string, error) {
|
||||
// TODO: investigate why it doesn't work on master Node.
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
if len(nodes.Items) == 0 {
|
||||
return "", fmt.Errorf("no nodes exist, can't test node proxy")
|
||||
}
|
||||
|
@ -310,7 +310,7 @@ func pickNode(c *client.Client) (string, error) {
|
|||
}
|
||||
|
||||
func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
|
||||
node, err := pickNode(f.Client)
|
||||
node, err := pickNode(f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: Change it to test whether all requests succeeded when requests
|
||||
// not reaching Kubelet issue is debugged.
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
@ -89,32 +90,32 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
|||
It("each node by ordering clean reboot and ensure they function upon restart", func() {
|
||||
// clean shutdown and restart
|
||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted.
|
||||
testReboot(f.Client, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &")
|
||||
testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &")
|
||||
})
|
||||
|
||||
It("each node by ordering unclean reboot and ensure they function upon restart", func() {
|
||||
// unclean shutdown and restart
|
||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown.
|
||||
testReboot(f.Client, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &")
|
||||
testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &")
|
||||
})
|
||||
|
||||
It("each node by triggering kernel panic and ensure they function upon restart", func() {
|
||||
// kernel panic
|
||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered.
|
||||
testReboot(f.Client, "nohup sh -c 'sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &")
|
||||
testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &")
|
||||
})
|
||||
|
||||
It("each node by switching off the network interface and ensure they function upon switch on", func() {
|
||||
// switch the network interface off for a while to simulate a network outage
|
||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down.
|
||||
testReboot(f.Client, "nohup sh -c 'sleep 10 && (sudo ifdown eth0 || sudo ip link set eth0 down) && sleep 120 && (sudo ifup eth0 || sudo ip link set eth0 up)' >/dev/null 2>&1 &")
|
||||
testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && (sudo ifdown eth0 || sudo ip link set eth0 down) && sleep 120 && (sudo ifup eth0 || sudo ip link set eth0 up)' >/dev/null 2>&1 &")
|
||||
})
|
||||
|
||||
It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() {
|
||||
// tell the firewall to drop all inbound packets for a while
|
||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets.
|
||||
// We still accept packages send from localhost to prevent monit from restarting kubelet.
|
||||
testReboot(f.Client, "nohup sh -c 'sleep 10 && sudo iptables -I INPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I INPUT 2 -j DROP && "+
|
||||
testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && sudo iptables -I INPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I INPUT 2 -j DROP && "+
|
||||
" sleep 120 && sudo iptables -D INPUT -j DROP && sudo iptables -D INPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &")
|
||||
})
|
||||
|
||||
|
@ -122,14 +123,14 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
|||
// tell the firewall to drop all outbound packets for a while
|
||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets.
|
||||
// We still accept packages send to localhost to prevent monit from restarting kubelet.
|
||||
testReboot(f.Client, "nohup sh -c 'sleep 10 && sudo iptables -I OUTPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I OUTPUT 2 -j DROP && "+
|
||||
testReboot(f.Client, f.ClientSet, "nohup sh -c 'sleep 10 && sudo iptables -I OUTPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I OUTPUT 2 -j DROP && "+
|
||||
" sleep 120 && sudo iptables -D OUTPUT -j DROP && sudo iptables -D OUTPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &")
|
||||
})
|
||||
})
|
||||
|
||||
func testReboot(c *client.Client, rebootCmd string) {
|
||||
func testReboot(c *client.Client, cs clientset.Interface, rebootCmd string) {
|
||||
// Get all nodes, and kick off the test on each.
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(c)
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
result := make([]bool, len(nodelist.Items))
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(nodelist.Items))
|
||||
|
|
|
@ -38,7 +38,7 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() {
|
|||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
ns = f.Namespace.Name
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
nodeCount := len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/service"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller/endpoint"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
@ -74,9 +74,11 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
f := framework.NewDefaultFramework("services")
|
||||
|
||||
var c *client.Client
|
||||
var cs clientset.Interface
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.Client
|
||||
cs = f.ClientSet
|
||||
})
|
||||
|
||||
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
|
||||
|
@ -237,7 +239,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
ns := f.Namespace.Name
|
||||
|
||||
By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
|
||||
jig := NewServiceTestJig(c, serviceName)
|
||||
jig := NewServiceTestJig(c, cs, serviceName)
|
||||
servicePort := 8080
|
||||
tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort))
|
||||
jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP)
|
||||
|
@ -250,7 +252,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
framework.Logf("sourceip-test cluster ip: %s", serviceIp)
|
||||
|
||||
By("Picking multiple nodes")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
|
||||
if len(nodes.Items) == 1 {
|
||||
framework.Skipf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider)
|
||||
|
@ -296,7 +298,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
hosts, err := framework.NodeSSHHosts(c)
|
||||
hosts, err := framework.NodeSSHHosts(f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(hosts) == 0 {
|
||||
framework.Failf("No ssh-able nodes")
|
||||
|
@ -356,7 +358,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
framework.Failf("VIPs conflict: %v", svc1IP)
|
||||
}
|
||||
|
||||
hosts, err := framework.NodeSSHHosts(c)
|
||||
hosts, err := framework.NodeSSHHosts(f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(hosts) == 0 {
|
||||
framework.Failf("No ssh-able nodes")
|
||||
|
@ -397,7 +399,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
hosts, err := framework.NodeSSHHosts(c)
|
||||
hosts, err := framework.NodeSSHHosts(f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(hosts) == 0 {
|
||||
framework.Failf("No ssh-able nodes")
|
||||
|
@ -436,8 +438,8 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
serviceName := "nodeport-test"
|
||||
ns := f.Namespace.Name
|
||||
|
||||
jig := NewServiceTestJig(c, serviceName)
|
||||
nodeIP := pickNodeIP(jig.Client) // for later
|
||||
jig := NewServiceTestJig(c, cs, serviceName)
|
||||
nodeIP := pickNodeIP(jig.ClientSet) // for later
|
||||
|
||||
By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
|
||||
service := jig.CreateTCPServiceOrFail(ns, func(svc *api.Service) {
|
||||
|
@ -475,7 +477,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
}
|
||||
loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault
|
||||
largeClusterMinNodesNumber := 100
|
||||
if nodes := framework.GetReadySchedulableNodesOrDie(c); len(nodes.Items) > largeClusterMinNodesNumber {
|
||||
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > largeClusterMinNodesNumber {
|
||||
loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge
|
||||
}
|
||||
|
||||
|
@ -492,8 +494,8 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
ns2 := namespacePtr.Name // LB2 in ns2 on UDP
|
||||
framework.Logf("namespace for UDP test: %s", ns2)
|
||||
|
||||
jig := NewServiceTestJig(c, serviceName)
|
||||
nodeIP := pickNodeIP(jig.Client) // for later
|
||||
jig := NewServiceTestJig(c, cs, serviceName)
|
||||
nodeIP := pickNodeIP(jig.ClientSet) // for later
|
||||
|
||||
// Test TCP and UDP Services. Services with the same name in different
|
||||
// namespaces should get different node ports and load balancers.
|
||||
|
@ -1078,12 +1080,12 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault
|
||||
|
||||
largeClusterMinNodesNumber := 100
|
||||
if nodes := framework.GetReadySchedulableNodesOrDie(c); len(nodes.Items) > largeClusterMinNodesNumber {
|
||||
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > largeClusterMinNodesNumber {
|
||||
loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge
|
||||
}
|
||||
namespace := f.Namespace.Name
|
||||
serviceName := "external-local"
|
||||
jig := NewServiceTestJig(c, serviceName)
|
||||
jig := NewServiceTestJig(c, cs, serviceName)
|
||||
By("creating a service " + namespace + "/" + namespace + " with type=LoadBalancer and annotation for local-traffic-only")
|
||||
svc := jig.CreateTCPServiceOrFail(namespace, func(svc *api.Service) {
|
||||
svc.Spec.Type = api.ServiceTypeLoadBalancer
|
||||
|
@ -1132,7 +1134,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
framework.Logf("Pod for service %s/%s is on node %s", namespace, serviceName, readyHostName)
|
||||
// HealthCheck responder validation - iterate over all node IPs and check their HC responses
|
||||
// Collect all node names and their public IPs - the nodes and ips slices parallel each other
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(jig.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(jig.ClientSet)
|
||||
ips := collectAddresses(nodes, api.NodeExternalIP)
|
||||
if len(ips) == 0 {
|
||||
ips = collectAddresses(nodes, api.NodeLegacyHostIP)
|
||||
|
@ -1384,8 +1386,8 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st
|
|||
return ips
|
||||
}
|
||||
|
||||
func getNodePublicIps(c *client.Client) ([]string, error) {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
func getNodePublicIps(cs clientset.Interface) ([]string, error) {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
|
||||
ips := collectAddresses(nodes, api.NodeExternalIP)
|
||||
if len(ips) == 0 {
|
||||
|
@ -1394,8 +1396,8 @@ func getNodePublicIps(c *client.Client) ([]string, error) {
|
|||
return ips, nil
|
||||
}
|
||||
|
||||
func pickNodeIP(c *client.Client) string {
|
||||
publicIps, err := getNodePublicIps(c)
|
||||
func pickNodeIP(cs clientset.Interface) string {
|
||||
publicIps, err := getNodePublicIps(cs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(publicIps) == 0 {
|
||||
framework.Failf("got unexpected number (%d) of public IPs", len(publicIps))
|
||||
|
@ -1641,7 +1643,7 @@ func startServeHostnameService(c *client.Client, ns, name string, port, replicas
|
|||
return podNames, serviceIP, nil
|
||||
}
|
||||
|
||||
func stopServeHostnameService(c *client.Client, clientset internalclientset.Interface, ns, name string) error {
|
||||
func stopServeHostnameService(c *client.Client, clientset clientset.Interface, ns, name string) error {
|
||||
if err := framework.DeleteRCAndPods(c, clientset, ns, name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1766,16 +1768,18 @@ func httpGetNoConnectionPool(url string) (*http.Response, error) {
|
|||
|
||||
// A test jig to help testing.
|
||||
type ServiceTestJig struct {
|
||||
ID string
|
||||
Name string
|
||||
Client *client.Client
|
||||
Labels map[string]string
|
||||
ID string
|
||||
Name string
|
||||
Client *client.Client
|
||||
ClientSet clientset.Interface
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// NewServiceTestJig allocates and inits a new ServiceTestJig.
|
||||
func NewServiceTestJig(client *client.Client, name string) *ServiceTestJig {
|
||||
func NewServiceTestJig(client *client.Client, cs clientset.Interface, name string) *ServiceTestJig {
|
||||
j := &ServiceTestJig{}
|
||||
j.Client = client
|
||||
j.ClientSet = cs
|
||||
j.Name = name
|
||||
j.ID = j.Name + "-" + string(uuid.NewUUID())
|
||||
j.Labels = map[string]string{"testid": j.ID}
|
||||
|
|
|
@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("SSH", func() {
|
|||
It("should SSH to all nodes and run commands", func() {
|
||||
// Get all nodes' external IPs.
|
||||
By("Getting all nodes' SSH-able IP addresses")
|
||||
hosts, err := framework.NodeSSHHosts(f.Client)
|
||||
hosts, err := framework.NodeSSHHosts(f.ClientSet)
|
||||
if err != nil {
|
||||
framework.Failf("Error getting node hostnames: %v", err)
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
|
@ -152,7 +153,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
|
|||
|
||||
// The node should have disk pressure condition after the pods are evicted.
|
||||
if !nodeDiskPressureCondition {
|
||||
if !nodeHasDiskPressure(f.Client) {
|
||||
if !nodeHasDiskPressure(f.ClientSet) {
|
||||
return fmt.Errorf("expected disk pressure condition is not set")
|
||||
}
|
||||
nodeDiskPressureCondition = true
|
||||
|
@ -161,7 +162,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
|
|||
|
||||
// After eviction happens the pod is evicted so eventually the node disk pressure should be relieved.
|
||||
if !podRescheduleable {
|
||||
if nodeHasDiskPressure(f.Client) {
|
||||
if nodeHasDiskPressure(f.ClientSet) {
|
||||
return fmt.Errorf("expected disk pressure condition relief has not happened")
|
||||
}
|
||||
createIdlePod(verifyPodName, podClient)
|
||||
|
@ -212,8 +213,8 @@ func verifyPodEviction(podData *api.Pod) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func nodeHasDiskPressure(c *client.Client) bool {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(c)
|
||||
func nodeHasDiskPressure(cs clientset.Interface) bool {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
for _, condition := range nodeList.Items[0].Status.Conditions {
|
||||
if condition.Type == api.NodeDiskPressure {
|
||||
return condition.Status == api.ConditionTrue
|
||||
|
|
Loading…
Reference in New Issue