mirror of https://github.com/k3s-io/k3s
Merge pull request #3365 from smarterclayton/test_coverage
Reenable coverage and race detection on Travispull/6/head
commit
6cd37637f5
|
@ -14,7 +14,7 @@ install:
|
||||||
- ./hack/build-go.sh
|
- ./hack/build-go.sh
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- KUBE_TIMEOUT='-timeout 60s' ./hack/test-go.sh
|
- KUBE_RACE="-race" KUBE_COVER="-cover -covermode=atomic" KUBE_TIMEOUT='-timeout 60s' ./hack/test-go.sh
|
||||||
- PATH=$HOME/gopath/bin:./third_party/etcd:$PATH ./hack/test-cmd.sh
|
- PATH=$HOME/gopath/bin:./third_party/etcd:$PATH ./hack/test-cmd.sh
|
||||||
- PATH=$HOME/gopath/bin:./third_party/etcd:$PATH ./hack/test-integration.sh
|
- PATH=$HOME/gopath/bin:./third_party/etcd:$PATH ./hack/test-integration.sh
|
||||||
|
|
||||||
|
|
|
@ -44,9 +44,9 @@ kube::test::find_pkgs() {
|
||||||
}
|
}
|
||||||
|
|
||||||
# -covermode=atomic becomes default with -race in Go >=1.3
|
# -covermode=atomic becomes default with -race in Go >=1.3
|
||||||
KUBE_COVER="" #${KUBE_COVER:--cover -covermode=atomic}
|
|
||||||
KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 120s}
|
KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 120s}
|
||||||
KUBE_RACE="" #${KUBE_RACE:--race}
|
KUBE_COVER=${KUBE_COVER:-} # use KUBE_COVER="-cover -covermode=atomic" for full coverage
|
||||||
|
KUBE_RACE=${KUBE_RACE:-} # use KUBE_RACE="-race" to enable race testing
|
||||||
|
|
||||||
kube::test::usage() {
|
kube::test::usage() {
|
||||||
kube::log::usage_from_stdin <<EOF
|
kube::log::usage_from_stdin <<EOF
|
||||||
|
|
|
@ -39,6 +39,7 @@ kube::etcd::start
|
||||||
|
|
||||||
kube::log::status "Running integration test cases"
|
kube::log::status "Running integration test cases"
|
||||||
KUBE_GOFLAGS="-tags 'integration no-docker' " \
|
KUBE_GOFLAGS="-tags 'integration no-docker' " \
|
||||||
|
KUBE_RACE="-race" \
|
||||||
"${KUBE_ROOT}/hack/test-go.sh" test/integration
|
"${KUBE_ROOT}/hack/test-go.sh" test/integration
|
||||||
|
|
||||||
kube::log::status "Running integration test scenario"
|
kube::log::status "Running integration test scenario"
|
||||||
|
|
|
@ -30,6 +30,9 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// retryEventSleep is the time between record failures to retry. Available for test alteration.
|
||||||
|
var retryEventSleep = 1 * time.Second
|
||||||
|
|
||||||
// EventRecorder knows how to store events (client.Client implements it.)
|
// EventRecorder knows how to store events (client.Client implements it.)
|
||||||
// EventRecorder must respect the namespace that will be embedded in 'event'.
|
// EventRecorder must respect the namespace that will be embedded in 'event'.
|
||||||
// It is assumed that EventRecorder will return the same sorts of errors as
|
// It is assumed that EventRecorder will return the same sorts of errors as
|
||||||
|
@ -41,6 +44,7 @@ type EventRecorder interface {
|
||||||
// StartRecording starts sending events to recorder. Call once while initializing
|
// StartRecording starts sending events to recorder. Call once while initializing
|
||||||
// your binary. Subsequent calls will be ignored. The return value can be ignored
|
// your binary. Subsequent calls will be ignored. The return value can be ignored
|
||||||
// or used to stop recording, if desired.
|
// or used to stop recording, if desired.
|
||||||
|
// TODO: make me an object with parameterizable queue length and retry interval
|
||||||
func StartRecording(recorder EventRecorder, source api.EventSource) watch.Interface {
|
func StartRecording(recorder EventRecorder, source api.EventSource) watch.Interface {
|
||||||
return GetEvents(func(event *api.Event) {
|
return GetEvents(func(event *api.Event) {
|
||||||
// Make a copy before modification, because there could be multiple listeners.
|
// Make a copy before modification, because there could be multiple listeners.
|
||||||
|
@ -80,7 +84,7 @@ func StartRecording(recorder EventRecorder, source api.EventSource) watch.Interf
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
glog.Errorf("Unable to write event: '%v' (will retry in 1 second)", err)
|
glog.Errorf("Unable to write event: '%v' (will retry in 1 second)", err)
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(retryEventSleep)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,22 +14,26 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package record_test
|
package record
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
retryEventSleep = 1 * time.Microsecond
|
||||||
|
}
|
||||||
|
|
||||||
type testEventRecorder struct {
|
type testEventRecorder struct {
|
||||||
OnEvent func(e *api.Event) (*api.Event, error)
|
OnEvent func(e *api.Event) (*api.Event, error)
|
||||||
}
|
}
|
||||||
|
@ -142,16 +146,16 @@ func TestEventf(t *testing.T) {
|
||||||
return event, nil
|
return event, nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
recorder := record.StartRecording(&testEvents, api.EventSource{Component: "eventTest"})
|
recorder := StartRecording(&testEvents, api.EventSource{Component: "eventTest"})
|
||||||
logger := record.StartLogging(t.Logf) // Prove that it is useful
|
logger := StartLogging(t.Logf) // Prove that it is useful
|
||||||
logger2 := record.StartLogging(func(formatter string, args ...interface{}) {
|
logger2 := StartLogging(func(formatter string, args ...interface{}) {
|
||||||
if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a {
|
if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a {
|
||||||
t.Errorf("Expected '%v', got '%v'", e, a)
|
t.Errorf("Expected '%v', got '%v'", e, a)
|
||||||
}
|
}
|
||||||
called <- struct{}{}
|
called <- struct{}{}
|
||||||
})
|
})
|
||||||
|
|
||||||
record.Eventf(item.obj, item.status, item.reason, item.messageFmt, item.elements...)
|
Eventf(item.obj, item.status, item.reason, item.messageFmt, item.elements...)
|
||||||
|
|
||||||
<-called
|
<-called
|
||||||
<-called
|
<-called
|
||||||
|
@ -204,7 +208,7 @@ func TestWriteEventError(t *testing.T) {
|
||||||
}
|
}
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
|
|
||||||
defer record.StartRecording(
|
defer StartRecording(
|
||||||
&testEventRecorder{
|
&testEventRecorder{
|
||||||
OnEvent: func(event *api.Event) (*api.Event, error) {
|
OnEvent: func(event *api.Event) (*api.Event, error) {
|
||||||
if event.Message == "finished" {
|
if event.Message == "finished" {
|
||||||
|
@ -227,9 +231,9 @@ func TestWriteEventError(t *testing.T) {
|
||||||
).Stop()
|
).Stop()
|
||||||
|
|
||||||
for caseName := range table {
|
for caseName := range table {
|
||||||
record.Event(ref, "Status", "Reason", caseName)
|
Event(ref, "Status", "Reason", caseName)
|
||||||
}
|
}
|
||||||
record.Event(ref, "Status", "Reason", "finished")
|
Event(ref, "Status", "Reason", "finished")
|
||||||
<-done
|
<-done
|
||||||
|
|
||||||
for caseName, item := range table {
|
for caseName, item := range table {
|
||||||
|
|
|
@ -107,6 +107,12 @@ func SaveNamespaceInfo(path string, ns *NamespaceInfo) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// extracted for test speed
|
||||||
|
var (
|
||||||
|
updatePollInterval = 5 * time.Second
|
||||||
|
updatePollTimeout = 300 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
// Update performs a rolling update of a collection of pods.
|
// Update performs a rolling update of a collection of pods.
|
||||||
// 'name' points to a replication controller.
|
// 'name' points to a replication controller.
|
||||||
// 'client' is used for updating pods.
|
// 'client' is used for updating pods.
|
||||||
|
@ -149,7 +155,7 @@ func Update(ctx api.Context, name string, client client.Interface, updatePeriod
|
||||||
}
|
}
|
||||||
time.Sleep(updatePeriod)
|
time.Sleep(updatePeriod)
|
||||||
}
|
}
|
||||||
return wait.Poll(time.Second*5, time.Second*300, func() (bool, error) {
|
return wait.Poll(updatePollInterval, updatePollTimeout, func() (bool, error) {
|
||||||
podList, err := client.Pods(api.Namespace(ctx)).List(s)
|
podList, err := client.Pods(api.Namespace(ctx)).List(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||||
|
@ -35,6 +36,10 @@ func validateAction(expectedAction, actualAction client.FakeAction, t *testing.T
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
updatePollInterval = 1 * time.Millisecond
|
||||||
|
}
|
||||||
|
|
||||||
func TestUpdateWithPods(t *testing.T) {
|
func TestUpdateWithPods(t *testing.T) {
|
||||||
fakeClient := client.Fake{
|
fakeClient := client.Fake{
|
||||||
PodsList: api.PodList{
|
PodsList: api.PodList{
|
||||||
|
|
|
@ -112,6 +112,9 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSe
|
||||||
podBackoff := podBackoff{
|
podBackoff := podBackoff{
|
||||||
perPodBackoff: map[string]*backoffEntry{},
|
perPodBackoff: map[string]*backoffEntry{},
|
||||||
clock: realClock{},
|
clock: realClock{},
|
||||||
|
|
||||||
|
defaultDuration: 1 * time.Second,
|
||||||
|
maxDuration: 60 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
return &scheduler.Config{
|
return &scheduler.Config{
|
||||||
|
@ -245,9 +248,11 @@ type backoffEntry struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type podBackoff struct {
|
type podBackoff struct {
|
||||||
perPodBackoff map[string]*backoffEntry
|
perPodBackoff map[string]*backoffEntry
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
clock clock
|
clock clock
|
||||||
|
defaultDuration time.Duration
|
||||||
|
maxDuration time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *podBackoff) getEntry(podID string) *backoffEntry {
|
func (p *podBackoff) getEntry(podID string) *backoffEntry {
|
||||||
|
@ -255,7 +260,7 @@ func (p *podBackoff) getEntry(podID string) *backoffEntry {
|
||||||
defer p.lock.Unlock()
|
defer p.lock.Unlock()
|
||||||
entry, ok := p.perPodBackoff[podID]
|
entry, ok := p.perPodBackoff[podID]
|
||||||
if !ok {
|
if !ok {
|
||||||
entry = &backoffEntry{backoff: 1 * time.Second}
|
entry = &backoffEntry{backoff: p.defaultDuration}
|
||||||
p.perPodBackoff[podID] = entry
|
p.perPodBackoff[podID] = entry
|
||||||
}
|
}
|
||||||
entry.lastUpdate = p.clock.Now()
|
entry.lastUpdate = p.clock.Now()
|
||||||
|
@ -266,8 +271,8 @@ func (p *podBackoff) getBackoff(podID string) time.Duration {
|
||||||
entry := p.getEntry(podID)
|
entry := p.getEntry(podID)
|
||||||
duration := entry.backoff
|
duration := entry.backoff
|
||||||
entry.backoff *= 2
|
entry.backoff *= 2
|
||||||
if entry.backoff > 60*time.Second {
|
if entry.backoff > p.maxDuration {
|
||||||
entry.backoff = 60 * time.Second
|
entry.backoff = p.maxDuration
|
||||||
}
|
}
|
||||||
glog.V(4).Infof("Backing off %s for pod %s", duration.String(), podID)
|
glog.V(4).Infof("Backing off %s for pod %s", duration.String(), podID)
|
||||||
return duration
|
return duration
|
||||||
|
@ -282,7 +287,7 @@ func (p *podBackoff) gc() {
|
||||||
defer p.lock.Unlock()
|
defer p.lock.Unlock()
|
||||||
now := p.clock.Now()
|
now := p.clock.Now()
|
||||||
for podID, entry := range p.perPodBackoff {
|
for podID, entry := range p.perPodBackoff {
|
||||||
if now.Sub(entry.lastUpdate) > 60*time.Second {
|
if now.Sub(entry.lastUpdate) > p.maxDuration {
|
||||||
delete(p.perPodBackoff, podID)
|
delete(p.perPodBackoff, podID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -118,8 +118,10 @@ func TestDefaultErrorFunc(t *testing.T) {
|
||||||
factory := NewConfigFactory(client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}))
|
factory := NewConfigFactory(client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}))
|
||||||
queue := cache.NewFIFO()
|
queue := cache.NewFIFO()
|
||||||
podBackoff := podBackoff{
|
podBackoff := podBackoff{
|
||||||
perPodBackoff: map[string]*backoffEntry{},
|
perPodBackoff: map[string]*backoffEntry{},
|
||||||
clock: &fakeClock{},
|
clock: &fakeClock{},
|
||||||
|
defaultDuration: 1 * time.Millisecond,
|
||||||
|
maxDuration: 1 * time.Second,
|
||||||
}
|
}
|
||||||
errFunc := factory.makeDefaultErrorFunc(&podBackoff, queue)
|
errFunc := factory.makeDefaultErrorFunc(&podBackoff, queue)
|
||||||
|
|
||||||
|
@ -203,8 +205,10 @@ func TestBind(t *testing.T) {
|
||||||
func TestBackoff(t *testing.T) {
|
func TestBackoff(t *testing.T) {
|
||||||
clock := fakeClock{}
|
clock := fakeClock{}
|
||||||
backoff := podBackoff{
|
backoff := podBackoff{
|
||||||
perPodBackoff: map[string]*backoffEntry{},
|
perPodBackoff: map[string]*backoffEntry{},
|
||||||
clock: &clock,
|
clock: &clock,
|
||||||
|
defaultDuration: 1 * time.Second,
|
||||||
|
maxDuration: 60 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
|
Loading…
Reference in New Issue