2018-04-09 16:35:14 +00:00
|
|
|
// Copyright 2018 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package kubernetes
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"encoding/json"
|
2022-06-03 11:47:14 +00:00
|
|
|
"errors"
|
2018-04-09 16:35:14 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2024-02-07 11:38:40 +00:00
|
|
|
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
2024-09-10 01:41:53 +00:00
|
|
|
"github.com/prometheus/common/promslog"
|
2020-10-29 09:43:23 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2024-10-15 18:29:07 +00:00
|
|
|
apiv1 "k8s.io/api/core/v1"
|
2024-02-07 11:38:40 +00:00
|
|
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
2024-10-15 18:29:07 +00:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
2018-04-09 16:35:14 +00:00
|
|
|
"k8s.io/apimachinery/pkg/runtime"
|
2021-08-15 23:34:36 +00:00
|
|
|
"k8s.io/apimachinery/pkg/version"
|
2024-02-07 11:38:40 +00:00
|
|
|
"k8s.io/apimachinery/pkg/watch"
|
2021-08-15 23:34:36 +00:00
|
|
|
fakediscovery "k8s.io/client-go/discovery/fake"
|
2018-04-09 16:35:14 +00:00
|
|
|
"k8s.io/client-go/kubernetes"
|
|
|
|
"k8s.io/client-go/kubernetes/fake"
|
2024-02-07 11:38:40 +00:00
|
|
|
kubetesting "k8s.io/client-go/testing"
|
2018-04-09 16:35:14 +00:00
|
|
|
"k8s.io/client-go/tools/cache"
|
2020-03-22 23:23:44 +00:00
|
|
|
|
2023-10-23 13:55:36 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
|
2020-08-20 12:48:26 +00:00
|
|
|
"github.com/prometheus/prometheus/discovery"
|
2020-03-22 23:23:44 +00:00
|
|
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
|
|
|
"github.com/prometheus/prometheus/util/testutil"
|
2018-04-09 16:35:14 +00:00
|
|
|
)
|
|
|
|
|
2020-07-27 08:38:08 +00:00
|
|
|
func TestMain(m *testing.M) {
|
|
|
|
testutil.TolerantVerifyLeak(m)
|
|
|
|
}
|
|
|
|
|
2018-04-09 16:35:14 +00:00
|
|
|
// makeDiscovery creates a kubernetes.Discovery instance for testing.
|
2019-01-28 15:42:25 +00:00
|
|
|
func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) {
|
2024-06-28 14:04:30 +00:00
|
|
|
return makeDiscoveryWithVersion(role, nsDiscovery, "v1.25.0", objects...)
|
2021-08-15 23:34:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// makeDiscoveryWithVersion creates a kubernetes.Discovery instance with the specified kubernetes version for testing.
|
|
|
|
func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer string, objects ...runtime.Object) (*Discovery, kubernetes.Interface) {
|
2018-04-09 16:35:14 +00:00
|
|
|
clientset := fake.NewSimpleClientset(objects...)
|
2021-08-15 23:34:36 +00:00
|
|
|
fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery)
|
|
|
|
fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: k8sVer}
|
2019-01-28 15:42:25 +00:00
|
|
|
|
2024-01-23 15:53:55 +00:00
|
|
|
reg := prometheus.NewRegistry()
|
|
|
|
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
|
|
|
metrics := newDiscovererMetrics(reg, refreshMetrics)
|
|
|
|
err := metrics.Register()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
// TODO(ptodev): Unregister the metrics at the end of the test.
|
|
|
|
|
|
|
|
kubeMetrics, ok := metrics.(*kubernetesMetrics)
|
|
|
|
if !ok {
|
|
|
|
panic("invalid discovery metrics type")
|
|
|
|
}
|
|
|
|
|
2023-10-23 13:55:36 +00:00
|
|
|
d := &Discovery{
|
2018-04-09 16:35:14 +00:00
|
|
|
client: clientset,
|
2024-09-10 01:41:53 +00:00
|
|
|
logger: promslog.NewNopLogger(),
|
2018-04-09 16:35:14 +00:00
|
|
|
role: role,
|
|
|
|
namespaceDiscovery: &nsDiscovery,
|
2021-11-27 10:51:33 +00:00
|
|
|
ownNamespace: "own-ns",
|
2024-01-23 15:53:55 +00:00
|
|
|
metrics: kubeMetrics,
|
2023-10-23 13:55:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return d, clientset
|
2018-04-09 16:35:14 +00:00
|
|
|
}
|
|
|
|
|
2021-12-23 09:50:00 +00:00
|
|
|
// makeDiscoveryWithMetadata creates a kubernetes.Discovery instance with the specified metadata config.
|
|
|
|
func makeDiscoveryWithMetadata(role Role, nsDiscovery NamespaceDiscovery, attachMetadata AttachMetadataConfig, objects ...runtime.Object) (*Discovery, kubernetes.Interface) {
|
|
|
|
d, k8s := makeDiscovery(role, nsDiscovery, objects...)
|
|
|
|
d.attachMetadata = attachMetadata
|
|
|
|
return d, k8s
|
|
|
|
}
|
|
|
|
|
2018-04-09 16:35:14 +00:00
|
|
|
type k8sDiscoveryTest struct {
|
|
|
|
// discovery is instance of discovery.Discoverer
|
2020-08-20 12:48:26 +00:00
|
|
|
discovery discovery.Discoverer
|
2018-04-09 16:35:14 +00:00
|
|
|
// beforeRun runs before discoverer run
|
|
|
|
beforeRun func()
|
|
|
|
// afterStart runs after discoverer has synced
|
|
|
|
afterStart func()
|
|
|
|
// expectedMaxItems is expected max items we may get from channel
|
|
|
|
expectedMaxItems int
|
|
|
|
// expectedRes is expected final result
|
|
|
|
expectedRes map[string]*targetgroup.Group
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d k8sDiscoveryTest) Run(t *testing.T) {
|
2019-04-10 12:21:42 +00:00
|
|
|
t.Helper()
|
2018-04-09 16:35:14 +00:00
|
|
|
ch := make(chan []*targetgroup.Group)
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
if d.beforeRun != nil {
|
|
|
|
d.beforeRun()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run discoverer and start a goroutine to read results.
|
|
|
|
go d.discovery.Run(ctx, ch)
|
2020-02-18 08:24:48 +00:00
|
|
|
|
|
|
|
// Ensure that discovery has a discoverer set. This prevents a race
|
|
|
|
// condition where the above go routine may or may not have set a
|
|
|
|
// discoverer yet.
|
2021-11-02 21:17:32 +00:00
|
|
|
lastDiscoverersCount := 0
|
|
|
|
dis := d.discovery.(*Discovery)
|
2020-02-18 08:24:48 +00:00
|
|
|
for {
|
|
|
|
dis.RLock()
|
|
|
|
l := len(dis.discoverers)
|
|
|
|
dis.RUnlock()
|
2021-11-02 21:17:32 +00:00
|
|
|
if l > 0 && l == lastDiscoverersCount {
|
2020-02-18 08:24:48 +00:00
|
|
|
break
|
|
|
|
}
|
2021-11-02 21:17:32 +00:00
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
|
|
|
|
lastDiscoverersCount = l
|
2020-02-18 08:24:48 +00:00
|
|
|
}
|
|
|
|
|
2018-04-09 16:35:14 +00:00
|
|
|
resChan := make(chan map[string]*targetgroup.Group)
|
2024-02-16 13:21:10 +00:00
|
|
|
go readResultWithTimeout(t, ctx, ch, d.expectedMaxItems, time.Second, resChan)
|
2018-04-09 16:35:14 +00:00
|
|
|
|
2018-04-09 19:23:04 +00:00
|
|
|
dd, ok := d.discovery.(hasSynced)
|
2021-09-03 13:14:25 +00:00
|
|
|
require.True(t, ok, "discoverer does not implement hasSynced interface")
|
|
|
|
require.True(t, cache.WaitForCacheSync(ctx.Done(), dd.hasSynced), "discoverer failed to sync: %v", dd)
|
2018-04-09 16:35:14 +00:00
|
|
|
|
|
|
|
if d.afterStart != nil {
|
|
|
|
d.afterStart()
|
|
|
|
}
|
|
|
|
|
|
|
|
if d.expectedRes != nil {
|
|
|
|
res := <-resChan
|
|
|
|
requireTargetGroups(t, d.expectedRes, res)
|
2024-02-16 13:21:10 +00:00
|
|
|
} else {
|
|
|
|
// Stop readResultWithTimeout and wait for it.
|
|
|
|
cancel()
|
|
|
|
<-resChan
|
2018-04-09 16:35:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-31 22:49:29 +00:00
|
|
|
// readResultWithTimeout reads all targetgroups from channel with timeout.
|
|
|
|
// It merges targetgroups by source and sends the result to result channel.
|
2024-08-18 09:27:04 +00:00
|
|
|
func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, maxGroups int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) {
|
2020-06-11 14:02:09 +00:00
|
|
|
res := make(map[string]*targetgroup.Group)
|
2024-02-16 13:21:10 +00:00
|
|
|
timeout := time.After(stopAfter)
|
2018-04-09 16:35:14 +00:00
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case tgs := <-ch:
|
2020-06-11 14:02:09 +00:00
|
|
|
for _, tg := range tgs {
|
|
|
|
if tg == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
res[tg.Source] = tg
|
|
|
|
}
|
2024-08-18 09:27:04 +00:00
|
|
|
if len(res) == maxGroups {
|
2018-04-09 16:35:14 +00:00
|
|
|
// Reached max target groups we may get, break fast.
|
|
|
|
break Loop
|
|
|
|
}
|
2024-02-16 13:21:10 +00:00
|
|
|
case <-timeout:
|
2018-04-09 16:35:14 +00:00
|
|
|
// Because we use queue, an object that is created then
|
|
|
|
// deleted or updated may be processed only once.
|
2018-12-18 10:52:40 +00:00
|
|
|
// So possibly we may skip events, timed out here.
|
2024-08-18 09:27:04 +00:00
|
|
|
t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), maxGroups)
|
2018-04-09 16:35:14 +00:00
|
|
|
break Loop
|
2024-02-16 13:21:10 +00:00
|
|
|
case <-ctx.Done():
|
2024-08-18 09:27:04 +00:00
|
|
|
t.Logf("stopped, got %d (max: %d) items", len(res), maxGroups)
|
2024-02-16 13:21:10 +00:00
|
|
|
break Loop
|
2018-04-09 16:35:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
resChan <- res
|
|
|
|
}
|
|
|
|
|
|
|
|
func requireTargetGroups(t *testing.T, expected, res map[string]*targetgroup.Group) {
|
2019-04-10 12:21:42 +00:00
|
|
|
t.Helper()
|
2022-06-07 15:19:40 +00:00
|
|
|
b1, err := marshalTargetGroups(expected)
|
2018-04-09 16:35:14 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2022-06-07 15:19:40 +00:00
|
|
|
b2, err := marshalTargetGroups(res)
|
2018-04-09 16:35:14 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2020-10-29 09:43:23 +00:00
|
|
|
require.Equal(t, string(b1), string(b2))
|
2018-04-09 16:35:14 +00:00
|
|
|
}
|
2018-04-09 19:23:04 +00:00
|
|
|
|
2022-06-07 15:19:40 +00:00
|
|
|
// marshalTargetGroups serializes a set of target groups to JSON, ignoring the
|
|
|
|
// custom MarshalJSON function defined on the targetgroup.Group struct.
|
|
|
|
// marshalTargetGroups can be used for making exact comparisons between target groups
|
|
|
|
// as it will serialize all target labels.
|
|
|
|
func marshalTargetGroups(tgs map[string]*targetgroup.Group) ([]byte, error) {
|
|
|
|
type targetGroupAlias targetgroup.Group
|
|
|
|
|
|
|
|
aliases := make(map[string]*targetGroupAlias, len(tgs))
|
|
|
|
for k, v := range tgs {
|
|
|
|
tg := targetGroupAlias(*v)
|
|
|
|
aliases[k] = &tg
|
|
|
|
}
|
|
|
|
|
|
|
|
return json.Marshal(aliases)
|
|
|
|
}
|
|
|
|
|
2018-04-09 19:23:04 +00:00
|
|
|
type hasSynced interface {
|
2018-04-10 08:44:24 +00:00
|
|
|
// hasSynced returns true if all informers synced.
|
|
|
|
// This is only used in testing to determine when discoverer synced to
|
|
|
|
// kubernetes apiserver.
|
2018-04-09 19:23:04 +00:00
|
|
|
hasSynced() bool
|
|
|
|
}
|
|
|
|
|
2021-10-22 08:06:44 +00:00
|
|
|
var (
|
|
|
|
_ hasSynced = &Discovery{}
|
|
|
|
_ hasSynced = &Node{}
|
|
|
|
_ hasSynced = &Endpoints{}
|
|
|
|
_ hasSynced = &EndpointSlice{}
|
|
|
|
_ hasSynced = &Ingress{}
|
|
|
|
_ hasSynced = &Pod{}
|
|
|
|
_ hasSynced = &Service{}
|
|
|
|
)
|
2018-04-09 19:23:04 +00:00
|
|
|
|
|
|
|
func (d *Discovery) hasSynced() bool {
|
|
|
|
d.RLock()
|
|
|
|
defer d.RUnlock()
|
|
|
|
for _, discoverer := range d.discoverers {
|
|
|
|
if hasSynceddiscoverer, ok := discoverer.(hasSynced); ok {
|
|
|
|
if !hasSynceddiscoverer.hasSynced() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *Node) hasSynced() bool {
|
|
|
|
return n.informer.HasSynced()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Endpoints) hasSynced() bool {
|
|
|
|
return e.endpointsInf.HasSynced() && e.serviceInf.HasSynced() && e.podInf.HasSynced()
|
|
|
|
}
|
|
|
|
|
2020-02-16 14:59:29 +00:00
|
|
|
func (e *EndpointSlice) hasSynced() bool {
|
|
|
|
return e.endpointSliceInf.HasSynced() && e.serviceInf.HasSynced() && e.podInf.HasSynced()
|
|
|
|
}
|
|
|
|
|
2018-04-09 19:23:04 +00:00
|
|
|
func (i *Ingress) hasSynced() bool {
|
|
|
|
return i.informer.HasSynced()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Pod) hasSynced() bool {
|
2021-12-23 09:50:00 +00:00
|
|
|
return p.podInf.HasSynced()
|
2018-04-09 19:23:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) hasSynced() bool {
|
|
|
|
return s.informer.HasSynced()
|
|
|
|
}
|
2021-08-15 23:34:36 +00:00
|
|
|
|
|
|
|
func TestRetryOnError(t *testing.T) {
|
|
|
|
for _, successAt := range []int{1, 2, 3} {
|
|
|
|
var called int
|
|
|
|
f := func() error {
|
|
|
|
called++
|
|
|
|
if called >= successAt {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return errors.New("dummy")
|
|
|
|
}
|
|
|
|
retryOnError(context.TODO(), 0, f)
|
|
|
|
require.Equal(t, successAt, called)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-07 11:38:40 +00:00
|
|
|
func TestFailuresCountMetric(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
role Role
|
|
|
|
minFailedWatches int
|
|
|
|
}{
|
|
|
|
{RoleNode, 1},
|
|
|
|
{RolePod, 1},
|
|
|
|
{RoleService, 1},
|
|
|
|
{RoleEndpoint, 3},
|
|
|
|
{RoleEndpointSlice, 3},
|
|
|
|
{RoleIngress, 1},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
|
|
|
tc := tc
|
|
|
|
t.Run(string(tc.role), func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
n, c := makeDiscovery(tc.role, NamespaceDiscovery{})
|
|
|
|
// The counter is initialized and no failures at the beginning.
|
|
|
|
require.Equal(t, float64(0), prom_testutil.ToFloat64(n.metrics.failuresCount))
|
|
|
|
|
|
|
|
// Simulate an error on watch requests.
|
|
|
|
c.Discovery().(*fakediscovery.FakeDiscovery).PrependWatchReactor("*", func(action kubetesting.Action) (bool, watch.Interface, error) {
|
|
|
|
return true, nil, apierrors.NewUnauthorized("unauthorized")
|
|
|
|
})
|
|
|
|
|
|
|
|
// Start the discovery.
|
|
|
|
k8sDiscoveryTest{discovery: n}.Run(t)
|
|
|
|
|
|
|
|
// At least the errors of the initial watches should be caught (watches are retried on errors).
|
|
|
|
require.GreaterOrEqual(t, prom_testutil.ToFloat64(n.metrics.failuresCount), float64(tc.minFailedWatches))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2024-10-15 18:29:07 +00:00
|
|
|
|
|
|
|
func TestNodeName(t *testing.T) {
|
|
|
|
node := &apiv1.Node{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: "foo",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
name, err := nodeName(node)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, "foo", name)
|
|
|
|
|
|
|
|
name, err = nodeName(cache.DeletedFinalStateUnknown{Key: "bar"})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, "bar", name)
|
|
|
|
}
|