mirror of https://github.com/prometheus/prometheus
Fix some (valid) lint errors (#4287)
Signed-off-by: Julius Volz <julius.volz@gmail.com>pull/4347/merge
parent
b41be4ef52
commit
219e477272
|
@ -55,26 +55,26 @@ func NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Ingress) enqueue(obj interface{}) {
|
func (i *Ingress) enqueue(obj interface{}) {
|
||||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
e.queue.Add(key)
|
i.queue.Add(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run implements the Discoverer interface.
|
// Run implements the Discoverer interface.
|
||||||
func (s *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
defer s.queue.ShutDown()
|
defer i.queue.ShutDown()
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) {
|
if !cache.WaitForCacheSync(ctx.Done(), i.informer.HasSynced) {
|
||||||
level.Error(s.logger).Log("msg", "ingress informer unable to sync cache")
|
level.Error(i.logger).Log("msg", "ingress informer unable to sync cache")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for s.process(ctx, ch) {
|
for i.process(ctx, ch) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -82,12 +82,12 @@ func (s *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
|
func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
|
||||||
keyObj, quit := s.queue.Get()
|
keyObj, quit := i.queue.Get()
|
||||||
if quit {
|
if quit {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
defer s.queue.Done(keyObj)
|
defer i.queue.Done(keyObj)
|
||||||
key := keyObj.(string)
|
key := keyObj.(string)
|
||||||
|
|
||||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
|
@ -95,20 +95,20 @@ func (s *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
o, exists, err := s.store.GetByKey(key)
|
o, exists, err := i.store.GetByKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
send(ctx, s.logger, RoleIngress, ch, &targetgroup.Group{Source: ingressSourceFromNamespaceAndName(namespace, name)})
|
send(ctx, i.logger, RoleIngress, ch, &targetgroup.Group{Source: ingressSourceFromNamespaceAndName(namespace, name)})
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
eps, err := convertToIngress(o)
|
eps, err := convertToIngress(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(s.logger).Log("msg", "converting to Ingress object failed", "err", err)
|
level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err", err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
send(ctx, s.logger, RoleIngress, ch, s.buildIngress(eps))
|
send(ctx, i.logger, RoleIngress, ch, i.buildIngress(eps))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,7 +170,7 @@ func pathsFromIngressRule(rv *v1beta1.IngressRuleValue) []string {
|
||||||
return paths
|
return paths
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Ingress) buildIngress(ingress *v1beta1.Ingress) *targetgroup.Group {
|
func (i *Ingress) buildIngress(ingress *v1beta1.Ingress) *targetgroup.Group {
|
||||||
tg := &targetgroup.Group{
|
tg := &targetgroup.Group{
|
||||||
Source: ingressSource(ingress),
|
Source: ingressSource(ingress),
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,13 +61,13 @@ func NewNode(l log.Logger, inf cache.SharedInformer) *Node {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Node) enqueue(obj interface{}) {
|
func (n *Node) enqueue(obj interface{}) {
|
||||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
e.queue.Add(key)
|
n.queue.Add(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run implements the Discoverer interface.
|
// Run implements the Discoverer interface.
|
||||||
|
|
|
@ -69,13 +69,13 @@ func NewPod(l log.Logger, pods cache.SharedInformer) *Pod {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Pod) enqueue(obj interface{}) {
|
func (p *Pod) enqueue(obj interface{}) {
|
||||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
e.queue.Add(key)
|
p.queue.Add(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run implements the Discoverer interface.
|
// Run implements the Discoverer interface.
|
||||||
|
|
|
@ -61,13 +61,13 @@ func NewService(l log.Logger, inf cache.SharedInformer) *Service {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Service) enqueue(obj interface{}) {
|
func (s *Service) enqueue(obj interface{}) {
|
||||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
e.queue.Add(key)
|
s.queue.Add(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run implements the Discoverer interface.
|
// Run implements the Discoverer interface.
|
||||||
|
|
|
@ -60,7 +60,7 @@ func BenchmarkRangeQuery(b *testing.B) {
|
||||||
// A day of data plus 10k steps.
|
// A day of data plus 10k steps.
|
||||||
numIntervals := 8640 + 10000
|
numIntervals := 8640 + 10000
|
||||||
|
|
||||||
for s := 0; s < numIntervals; s += 1 {
|
for s := 0; s < numIntervals; s++ {
|
||||||
a, err := storage.Appender()
|
a, err := storage.Appender()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
|
|
|
@ -613,7 +613,7 @@ func (ev *evaluator) Eval(expr Expr) (v Value, err error) {
|
||||||
return ev.eval(expr), nil
|
return ev.eval(expr), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extra information and caches for evaluating a single node across steps.
|
// EvalNodeHelper stores extra information and caches for evaluating a single node across steps.
|
||||||
type EvalNodeHelper struct {
|
type EvalNodeHelper struct {
|
||||||
// Evaluation timestamp.
|
// Evaluation timestamp.
|
||||||
ts int64
|
ts int64
|
||||||
|
@ -1531,19 +1531,19 @@ func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, p
|
||||||
valuesSquaredSum: s.V * s.V,
|
valuesSquaredSum: s.V * s.V,
|
||||||
groupCount: 1,
|
groupCount: 1,
|
||||||
}
|
}
|
||||||
input_vec_len := int64(len(vec))
|
inputVecLen := int64(len(vec))
|
||||||
result_size := k
|
resultSize := k
|
||||||
if k > input_vec_len {
|
if k > inputVecLen {
|
||||||
result_size = input_vec_len
|
resultSize = inputVecLen
|
||||||
}
|
}
|
||||||
if op == itemTopK || op == itemQuantile {
|
if op == itemTopK || op == itemQuantile {
|
||||||
result[groupingKey].heap = make(vectorByValueHeap, 0, result_size)
|
result[groupingKey].heap = make(vectorByValueHeap, 0, resultSize)
|
||||||
heap.Push(&result[groupingKey].heap, &Sample{
|
heap.Push(&result[groupingKey].heap, &Sample{
|
||||||
Point: Point{V: s.V},
|
Point: Point{V: s.V},
|
||||||
Metric: s.Metric,
|
Metric: s.Metric,
|
||||||
})
|
})
|
||||||
} else if op == itemBottomK {
|
} else if op == itemBottomK {
|
||||||
result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, result_size)
|
result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, resultSize)
|
||||||
heap.Push(&result[groupingKey].reverseHeap, &Sample{
|
heap.Push(&result[groupingKey].reverseHeap, &Sample{
|
||||||
Point: Point{V: s.V},
|
Point: Point{V: s.V},
|
||||||
Metric: s.Metric,
|
Metric: s.Metric,
|
||||||
|
|
Loading…
Reference in New Issue