mirror of https://github.com/k3s-io/k3s
Merge pull request #3365 from smarterclayton/test_coverage
Reenable coverage and race detection on Travispull/6/head
commit
6cd37637f5
|
@ -14,7 +14,7 @@ install:
|
|||
- ./hack/build-go.sh
|
||||
|
||||
script:
|
||||
- KUBE_TIMEOUT='-timeout 60s' ./hack/test-go.sh
|
||||
- KUBE_RACE="-race" KUBE_COVER="-cover -covermode=atomic" KUBE_TIMEOUT='-timeout 60s' ./hack/test-go.sh
|
||||
- PATH=$HOME/gopath/bin:./third_party/etcd:$PATH ./hack/test-cmd.sh
|
||||
- PATH=$HOME/gopath/bin:./third_party/etcd:$PATH ./hack/test-integration.sh
|
||||
|
||||
|
|
|
@ -44,9 +44,9 @@ kube::test::find_pkgs() {
|
|||
}
|
||||
|
||||
# -covermode=atomic becomes default with -race in Go >=1.3
|
||||
KUBE_COVER="" #${KUBE_COVER:--cover -covermode=atomic}
|
||||
KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 120s}
|
||||
KUBE_RACE="" #${KUBE_RACE:--race}
|
||||
KUBE_COVER=${KUBE_COVER:-} # use KUBE_COVER="-cover -covermode=atomic" for full coverage
|
||||
KUBE_RACE=${KUBE_RACE:-} # use KUBE_RACE="-race" to enable race testing
|
||||
|
||||
kube::test::usage() {
|
||||
kube::log::usage_from_stdin <<EOF
|
||||
|
|
|
@ -39,6 +39,7 @@ kube::etcd::start
|
|||
|
||||
kube::log::status "Running integration test cases"
|
||||
KUBE_GOFLAGS="-tags 'integration no-docker' " \
|
||||
KUBE_RACE="-race" \
|
||||
"${KUBE_ROOT}/hack/test-go.sh" test/integration
|
||||
|
||||
kube::log::status "Running integration test scenario"
|
||||
|
|
|
@ -30,6 +30,9 @@ import (
|
|||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// retryEventSleep is the time between record failures to retry. Available for test alteration.
|
||||
var retryEventSleep = 1 * time.Second
|
||||
|
||||
// EventRecorder knows how to store events (client.Client implements it.)
|
||||
// EventRecorder must respect the namespace that will be embedded in 'event'.
|
||||
// It is assumed that EventRecorder will return the same sorts of errors as
|
||||
|
@ -41,6 +44,7 @@ type EventRecorder interface {
|
|||
// StartRecording starts sending events to recorder. Call once while initializing
|
||||
// your binary. Subsequent calls will be ignored. The return value can be ignored
|
||||
// or used to stop recording, if desired.
|
||||
// TODO: make me an object with parameterizable queue length and retry interval
|
||||
func StartRecording(recorder EventRecorder, source api.EventSource) watch.Interface {
|
||||
return GetEvents(func(event *api.Event) {
|
||||
// Make a copy before modification, because there could be multiple listeners.
|
||||
|
@ -80,7 +84,7 @@ func StartRecording(recorder EventRecorder, source api.EventSource) watch.Interf
|
|||
break
|
||||
}
|
||||
glog.Errorf("Unable to write event: '%v' (will retry in 1 second)", err)
|
||||
time.Sleep(1 * time.Second)
|
||||
time.Sleep(retryEventSleep)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -14,22 +14,26 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package record_test
|
||||
package record
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
func init() {
|
||||
retryEventSleep = 1 * time.Microsecond
|
||||
}
|
||||
|
||||
type testEventRecorder struct {
|
||||
OnEvent func(e *api.Event) (*api.Event, error)
|
||||
}
|
||||
|
@ -142,16 +146,16 @@ func TestEventf(t *testing.T) {
|
|||
return event, nil
|
||||
},
|
||||
}
|
||||
recorder := record.StartRecording(&testEvents, api.EventSource{Component: "eventTest"})
|
||||
logger := record.StartLogging(t.Logf) // Prove that it is useful
|
||||
logger2 := record.StartLogging(func(formatter string, args ...interface{}) {
|
||||
recorder := StartRecording(&testEvents, api.EventSource{Component: "eventTest"})
|
||||
logger := StartLogging(t.Logf) // Prove that it is useful
|
||||
logger2 := StartLogging(func(formatter string, args ...interface{}) {
|
||||
if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a {
|
||||
t.Errorf("Expected '%v', got '%v'", e, a)
|
||||
}
|
||||
called <- struct{}{}
|
||||
})
|
||||
|
||||
record.Eventf(item.obj, item.status, item.reason, item.messageFmt, item.elements...)
|
||||
Eventf(item.obj, item.status, item.reason, item.messageFmt, item.elements...)
|
||||
|
||||
<-called
|
||||
<-called
|
||||
|
@ -204,7 +208,7 @@ func TestWriteEventError(t *testing.T) {
|
|||
}
|
||||
done := make(chan struct{})
|
||||
|
||||
defer record.StartRecording(
|
||||
defer StartRecording(
|
||||
&testEventRecorder{
|
||||
OnEvent: func(event *api.Event) (*api.Event, error) {
|
||||
if event.Message == "finished" {
|
||||
|
@ -227,9 +231,9 @@ func TestWriteEventError(t *testing.T) {
|
|||
).Stop()
|
||||
|
||||
for caseName := range table {
|
||||
record.Event(ref, "Status", "Reason", caseName)
|
||||
Event(ref, "Status", "Reason", caseName)
|
||||
}
|
||||
record.Event(ref, "Status", "Reason", "finished")
|
||||
Event(ref, "Status", "Reason", "finished")
|
||||
<-done
|
||||
|
||||
for caseName, item := range table {
|
||||
|
|
|
@ -107,6 +107,12 @@ func SaveNamespaceInfo(path string, ns *NamespaceInfo) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// extracted for test speed
|
||||
var (
|
||||
updatePollInterval = 5 * time.Second
|
||||
updatePollTimeout = 300 * time.Second
|
||||
)
|
||||
|
||||
// Update performs a rolling update of a collection of pods.
|
||||
// 'name' points to a replication controller.
|
||||
// 'client' is used for updating pods.
|
||||
|
@ -149,7 +155,7 @@ func Update(ctx api.Context, name string, client client.Interface, updatePeriod
|
|||
}
|
||||
time.Sleep(updatePeriod)
|
||||
}
|
||||
return wait.Poll(time.Second*5, time.Second*300, func() (bool, error) {
|
||||
return wait.Poll(updatePollInterval, updatePollTimeout, func() (bool, error) {
|
||||
podList, err := client.Pods(api.Namespace(ctx)).List(s)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
|
@ -35,6 +36,10 @@ func validateAction(expectedAction, actualAction client.FakeAction, t *testing.T
|
|||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
updatePollInterval = 1 * time.Millisecond
|
||||
}
|
||||
|
||||
func TestUpdateWithPods(t *testing.T) {
|
||||
fakeClient := client.Fake{
|
||||
PodsList: api.PodList{
|
||||
|
|
|
@ -112,6 +112,9 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSe
|
|||
podBackoff := podBackoff{
|
||||
perPodBackoff: map[string]*backoffEntry{},
|
||||
clock: realClock{},
|
||||
|
||||
defaultDuration: 1 * time.Second,
|
||||
maxDuration: 60 * time.Second,
|
||||
}
|
||||
|
||||
return &scheduler.Config{
|
||||
|
@ -245,9 +248,11 @@ type backoffEntry struct {
|
|||
}
|
||||
|
||||
type podBackoff struct {
|
||||
perPodBackoff map[string]*backoffEntry
|
||||
lock sync.Mutex
|
||||
clock clock
|
||||
perPodBackoff map[string]*backoffEntry
|
||||
lock sync.Mutex
|
||||
clock clock
|
||||
defaultDuration time.Duration
|
||||
maxDuration time.Duration
|
||||
}
|
||||
|
||||
func (p *podBackoff) getEntry(podID string) *backoffEntry {
|
||||
|
@ -255,7 +260,7 @@ func (p *podBackoff) getEntry(podID string) *backoffEntry {
|
|||
defer p.lock.Unlock()
|
||||
entry, ok := p.perPodBackoff[podID]
|
||||
if !ok {
|
||||
entry = &backoffEntry{backoff: 1 * time.Second}
|
||||
entry = &backoffEntry{backoff: p.defaultDuration}
|
||||
p.perPodBackoff[podID] = entry
|
||||
}
|
||||
entry.lastUpdate = p.clock.Now()
|
||||
|
@ -266,8 +271,8 @@ func (p *podBackoff) getBackoff(podID string) time.Duration {
|
|||
entry := p.getEntry(podID)
|
||||
duration := entry.backoff
|
||||
entry.backoff *= 2
|
||||
if entry.backoff > 60*time.Second {
|
||||
entry.backoff = 60 * time.Second
|
||||
if entry.backoff > p.maxDuration {
|
||||
entry.backoff = p.maxDuration
|
||||
}
|
||||
glog.V(4).Infof("Backing off %s for pod %s", duration.String(), podID)
|
||||
return duration
|
||||
|
@ -282,7 +287,7 @@ func (p *podBackoff) gc() {
|
|||
defer p.lock.Unlock()
|
||||
now := p.clock.Now()
|
||||
for podID, entry := range p.perPodBackoff {
|
||||
if now.Sub(entry.lastUpdate) > 60*time.Second {
|
||||
if now.Sub(entry.lastUpdate) > p.maxDuration {
|
||||
delete(p.perPodBackoff, podID)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -118,8 +118,10 @@ func TestDefaultErrorFunc(t *testing.T) {
|
|||
factory := NewConfigFactory(client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}))
|
||||
queue := cache.NewFIFO()
|
||||
podBackoff := podBackoff{
|
||||
perPodBackoff: map[string]*backoffEntry{},
|
||||
clock: &fakeClock{},
|
||||
perPodBackoff: map[string]*backoffEntry{},
|
||||
clock: &fakeClock{},
|
||||
defaultDuration: 1 * time.Millisecond,
|
||||
maxDuration: 1 * time.Second,
|
||||
}
|
||||
errFunc := factory.makeDefaultErrorFunc(&podBackoff, queue)
|
||||
|
||||
|
@ -203,8 +205,10 @@ func TestBind(t *testing.T) {
|
|||
func TestBackoff(t *testing.T) {
|
||||
clock := fakeClock{}
|
||||
backoff := podBackoff{
|
||||
perPodBackoff: map[string]*backoffEntry{},
|
||||
clock: &clock,
|
||||
perPodBackoff: map[string]*backoffEntry{},
|
||||
clock: &clock,
|
||||
defaultDuration: 1 * time.Second,
|
||||
maxDuration: 60 * time.Second,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
|
|
Loading…
Reference in New Issue