2016-05-05 09:45:51 +00:00
|
|
|
// Copyright 2016 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package dns
|
|
|
|
|
|
|
|
import (
|
2017-10-25 04:21:42 +00:00
|
|
|
"context"
|
2022-06-03 11:47:14 +00:00
|
|
|
"errors"
|
2016-05-05 09:45:51 +00:00
|
|
|
"fmt"
|
2024-09-10 01:41:53 +00:00
|
|
|
"log/slog"
|
2016-05-05 09:45:51 +00:00
|
|
|
"net"
|
2024-05-13 15:36:19 +00:00
|
|
|
"strconv"
|
2016-05-05 09:45:51 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/miekg/dns"
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
"github.com/prometheus/common/model"
|
2024-09-10 01:41:53 +00:00
|
|
|
"github.com/prometheus/common/promslog"
|
2019-03-25 23:01:12 +00:00
|
|
|
|
2020-08-20 12:48:26 +00:00
|
|
|
"github.com/prometheus/prometheus/discovery"
|
2019-03-25 10:54:22 +00:00
|
|
|
"github.com/prometheus/prometheus/discovery/refresh"
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 20:01:34 +00:00
|
|
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
2016-05-05 09:45:51 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
resolvConf = "/etc/resolv.conf"
|
|
|
|
|
2020-07-28 20:09:01 +00:00
|
|
|
dnsNameLabel = model.MetaLabelPrefix + "dns_name"
|
|
|
|
dnsSrvRecordPrefix = model.MetaLabelPrefix + "dns_srv_record_"
|
|
|
|
dnsSrvRecordTargetLabel = dnsSrvRecordPrefix + "target"
|
|
|
|
dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port"
|
2022-08-03 09:19:26 +00:00
|
|
|
dnsMxRecordPrefix = model.MetaLabelPrefix + "dns_mx_record_"
|
|
|
|
dnsMxRecordTargetLabel = dnsMxRecordPrefix + "target"
|
2023-11-29 17:19:02 +00:00
|
|
|
dnsNsRecordPrefix = model.MetaLabelPrefix + "dns_ns_record_"
|
|
|
|
dnsNsRecordTargetLabel = dnsNsRecordPrefix + "target"
|
2016-05-05 09:45:51 +00:00
|
|
|
|
|
|
|
// Constants for instrumentation.
|
|
|
|
namespace = "prometheus"
|
|
|
|
)
|
|
|
|
|
2023-10-23 13:55:36 +00:00
|
|
|
// DefaultSDConfig is the default DNS SD configuration.
|
|
|
|
var DefaultSDConfig = SDConfig{
|
|
|
|
RefreshInterval: model.Duration(30 * time.Second),
|
|
|
|
Type: "SRV",
|
|
|
|
}
|
2016-05-05 09:45:51 +00:00
|
|
|
|
2020-08-20 12:48:26 +00:00
|
|
|
func init() {
|
|
|
|
discovery.RegisterConfig(&SDConfig{})
|
|
|
|
}
|
|
|
|
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 20:01:34 +00:00
|
|
|
// SDConfig is the configuration for DNS based service discovery.
|
|
|
|
type SDConfig struct {
|
|
|
|
Names []string `yaml:"names"`
|
|
|
|
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
|
|
|
Type string `yaml:"type"`
|
|
|
|
Port int `yaml:"port"` // Ignored for SRV records
|
|
|
|
}
|
|
|
|
|
2024-01-23 15:53:55 +00:00
|
|
|
// NewDiscovererMetrics implements discovery.Config.
|
|
|
|
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
|
|
|
return newDiscovererMetrics(reg, rmi)
|
|
|
|
}
|
|
|
|
|
2020-08-20 12:48:26 +00:00
|
|
|
// Name returns the name of the Config.
|
|
|
|
func (*SDConfig) Name() string { return "dns" }
|
|
|
|
|
|
|
|
// NewDiscoverer returns a Discoverer for the Config.
|
|
|
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
2024-01-23 15:53:55 +00:00
|
|
|
return NewDiscovery(*c, opts.Logger, opts.Metrics)
|
2020-08-20 12:48:26 +00:00
|
|
|
}
|
|
|
|
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 20:01:34 +00:00
|
|
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
|
|
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|
|
|
*c = DefaultSDConfig
|
|
|
|
type plain SDConfig
|
|
|
|
err := unmarshal((*plain)(c))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if len(c.Names) == 0 {
|
2019-03-25 23:01:12 +00:00
|
|
|
return errors.New("DNS-SD config must contain at least one SRV record name")
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 20:01:34 +00:00
|
|
|
}
|
|
|
|
switch strings.ToUpper(c.Type) {
|
|
|
|
case "SRV":
|
2023-11-29 17:19:02 +00:00
|
|
|
case "A", "AAAA", "MX", "NS":
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 20:01:34 +00:00
|
|
|
if c.Port == 0 {
|
2019-03-25 23:01:12 +00:00
|
|
|
return errors.New("a port is required in DNS-SD configs for all record types except SRV")
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 20:01:34 +00:00
|
|
|
}
|
|
|
|
default:
|
2022-06-03 11:47:14 +00:00
|
|
|
return fmt.Errorf("invalid DNS-SD records type %s", c.Type)
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 20:01:34 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-05-05 09:45:51 +00:00
|
|
|
// Discovery periodically performs DNS-SD requests. It implements
|
2018-01-08 23:59:18 +00:00
|
|
|
// the Discoverer interface.
|
2016-05-05 09:45:51 +00:00
|
|
|
type Discovery struct {
|
2019-03-25 10:54:22 +00:00
|
|
|
*refresh.Discovery
|
2024-01-23 15:53:55 +00:00
|
|
|
names []string
|
|
|
|
port int
|
|
|
|
qtype uint16
|
2024-09-10 01:41:53 +00:00
|
|
|
logger *slog.Logger
|
2024-01-23 15:53:55 +00:00
|
|
|
metrics *dnsMetrics
|
2019-04-04 15:05:35 +00:00
|
|
|
|
2024-09-10 01:41:53 +00:00
|
|
|
lookupFn func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error)
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
2024-09-10 01:41:53 +00:00
|
|
|
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
2024-01-23 15:53:55 +00:00
|
|
|
m, ok := metrics.(*dnsMetrics)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
|
|
|
}
|
|
|
|
|
2017-08-11 18:45:52 +00:00
|
|
|
if logger == nil {
|
2024-09-10 01:41:53 +00:00
|
|
|
logger = promslog.NewNopLogger()
|
2017-08-11 18:45:52 +00:00
|
|
|
}
|
|
|
|
|
2016-05-05 09:45:51 +00:00
|
|
|
qtype := dns.TypeSRV
|
|
|
|
switch strings.ToUpper(conf.Type) {
|
|
|
|
case "A":
|
|
|
|
qtype = dns.TypeA
|
|
|
|
case "AAAA":
|
|
|
|
qtype = dns.TypeAAAA
|
|
|
|
case "SRV":
|
|
|
|
qtype = dns.TypeSRV
|
2022-08-03 09:19:26 +00:00
|
|
|
case "MX":
|
|
|
|
qtype = dns.TypeMX
|
2023-11-29 17:19:02 +00:00
|
|
|
case "NS":
|
|
|
|
qtype = dns.TypeNS
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
2019-03-25 10:54:22 +00:00
|
|
|
d := &Discovery{
|
2019-04-04 15:05:35 +00:00
|
|
|
names: conf.Names,
|
|
|
|
qtype: qtype,
|
|
|
|
port: conf.Port,
|
|
|
|
logger: logger,
|
|
|
|
lookupFn: lookupWithSearchPath,
|
2024-01-23 15:53:55 +00:00
|
|
|
metrics: m,
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
2023-10-23 13:55:36 +00:00
|
|
|
|
2019-03-25 10:54:22 +00:00
|
|
|
d.Discovery = refresh.NewDiscovery(
|
2023-10-23 13:55:36 +00:00
|
|
|
refresh.Options{
|
2024-01-23 15:53:55 +00:00
|
|
|
Logger: logger,
|
|
|
|
Mech: "dns",
|
|
|
|
Interval: time.Duration(conf.RefreshInterval),
|
|
|
|
RefreshF: d.refresh,
|
|
|
|
MetricsInstantiator: m.refreshMetrics,
|
2023-10-23 13:55:36 +00:00
|
|
|
},
|
2019-03-25 10:54:22 +00:00
|
|
|
)
|
2023-10-23 13:55:36 +00:00
|
|
|
|
|
|
|
return d, nil
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
|
|
|
|
2019-03-25 10:54:22 +00:00
|
|
|
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|
|
|
var (
|
|
|
|
wg sync.WaitGroup
|
|
|
|
ch = make(chan *targetgroup.Group)
|
2019-04-04 15:05:35 +00:00
|
|
|
tgs = make([]*targetgroup.Group, 0, len(d.names))
|
2019-03-25 10:54:22 +00:00
|
|
|
)
|
2016-05-05 09:45:51 +00:00
|
|
|
|
2017-03-16 23:29:47 +00:00
|
|
|
wg.Add(len(d.names))
|
|
|
|
for _, name := range d.names {
|
2016-05-05 09:45:51 +00:00
|
|
|
go func(n string) {
|
2022-06-03 11:47:14 +00:00
|
|
|
if err := d.refreshOne(ctx, n, ch); err != nil && !errors.Is(err, context.Canceled) {
|
2024-09-10 01:41:53 +00:00
|
|
|
d.logger.Error("Error refreshing DNS targets", "err", err)
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}(name)
|
|
|
|
}
|
|
|
|
|
2019-03-25 10:54:22 +00:00
|
|
|
go func() {
|
|
|
|
wg.Wait()
|
|
|
|
close(ch)
|
|
|
|
}()
|
|
|
|
|
|
|
|
for tg := range ch {
|
|
|
|
tgs = append(tgs, tg)
|
|
|
|
}
|
|
|
|
return tgs, nil
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
|
|
|
|
2019-03-25 10:54:22 +00:00
|
|
|
func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targetgroup.Group) error {
|
2019-04-04 15:05:35 +00:00
|
|
|
response, err := d.lookupFn(name, d.qtype, d.logger)
|
2024-01-23 15:53:55 +00:00
|
|
|
d.metrics.dnsSDLookupsCount.Inc()
|
2016-05-05 09:45:51 +00:00
|
|
|
if err != nil {
|
2024-01-23 15:53:55 +00:00
|
|
|
d.metrics.dnsSDLookupFailuresCount.Inc()
|
2016-05-05 09:45:51 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 20:01:34 +00:00
|
|
|
tg := &targetgroup.Group{}
|
2016-09-05 12:40:28 +00:00
|
|
|
hostPort := func(a string, p int) model.LabelValue {
|
2024-05-13 15:36:19 +00:00
|
|
|
return model.LabelValue(net.JoinHostPort(a, strconv.Itoa(p)))
|
2016-09-05 12:40:28 +00:00
|
|
|
}
|
2016-05-05 09:45:51 +00:00
|
|
|
|
|
|
|
for _, record := range response.Answer {
|
2023-11-29 17:19:02 +00:00
|
|
|
var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget, dnsNsRecordTarget model.LabelValue
|
2020-07-28 20:09:01 +00:00
|
|
|
|
2016-05-05 09:45:51 +00:00
|
|
|
switch addr := record.(type) {
|
|
|
|
case *dns.SRV:
|
2020-07-28 20:09:01 +00:00
|
|
|
dnsSrvRecordTarget = model.LabelValue(addr.Target)
|
2024-05-13 15:36:19 +00:00
|
|
|
dnsSrvRecordPort = model.LabelValue(strconv.Itoa(int(addr.Port)))
|
2020-07-28 20:09:01 +00:00
|
|
|
|
2018-10-24 08:16:36 +00:00
|
|
|
// Remove the final dot from rooted DNS names to make them look more usual.
|
|
|
|
addr.Target = strings.TrimRight(addr.Target, ".")
|
|
|
|
|
2016-09-05 12:40:28 +00:00
|
|
|
target = hostPort(addr.Target, int(addr.Port))
|
2022-08-03 09:19:26 +00:00
|
|
|
case *dns.MX:
|
|
|
|
dnsMxRecordTarget = model.LabelValue(addr.Mx)
|
|
|
|
|
|
|
|
// Remove the final dot from rooted DNS names to make them look more usual.
|
|
|
|
addr.Mx = strings.TrimRight(addr.Mx, ".")
|
|
|
|
|
|
|
|
target = hostPort(addr.Mx, d.port)
|
2023-11-29 17:19:02 +00:00
|
|
|
case *dns.NS:
|
|
|
|
dnsNsRecordTarget = model.LabelValue(addr.Ns)
|
|
|
|
|
|
|
|
// Remove the final dot from rooted DNS names to make them look more usual.
|
|
|
|
addr.Ns = strings.TrimRight(addr.Ns, ".")
|
|
|
|
|
|
|
|
target = hostPort(addr.Ns, d.port)
|
2016-05-05 09:45:51 +00:00
|
|
|
case *dns.A:
|
2017-03-16 23:29:47 +00:00
|
|
|
target = hostPort(addr.A.String(), d.port)
|
2016-05-05 09:45:51 +00:00
|
|
|
case *dns.AAAA:
|
2017-03-16 23:29:47 +00:00
|
|
|
target = hostPort(addr.AAAA.String(), d.port)
|
2020-12-01 09:32:15 +00:00
|
|
|
case *dns.CNAME:
|
|
|
|
// CNAME responses can occur with "Type: A" dns_sd_config requests.
|
2021-01-03 23:08:27 +00:00
|
|
|
continue
|
2016-05-05 09:45:51 +00:00
|
|
|
default:
|
2024-09-10 01:41:53 +00:00
|
|
|
d.logger.Warn("Invalid record", "record", record)
|
2016-05-05 09:45:51 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
tg.Targets = append(tg.Targets, model.LabelSet{
|
2020-07-28 20:09:01 +00:00
|
|
|
model.AddressLabel: target,
|
|
|
|
dnsNameLabel: model.LabelValue(name),
|
|
|
|
dnsSrvRecordTargetLabel: dnsSrvRecordTarget,
|
|
|
|
dnsSrvRecordPortLabel: dnsSrvRecordPort,
|
2022-08-03 09:19:26 +00:00
|
|
|
dnsMxRecordTargetLabel: dnsMxRecordTarget,
|
2023-11-29 17:50:09 +00:00
|
|
|
dnsNsRecordTargetLabel: dnsNsRecordTarget,
|
2016-05-05 09:45:51 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
tg.Source = name
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
2019-03-25 10:54:22 +00:00
|
|
|
case ch <- tg:
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
// lookupWithSearchPath tries to get an answer for various permutations of
|
|
|
|
// the given name, appending the system-configured search path as necessary.
|
|
|
|
//
|
|
|
|
// There are three possible outcomes:
|
|
|
|
//
|
2022-09-07 09:30:48 +00:00
|
|
|
// 1. One of the permutations of the given name is recognized as
|
|
|
|
// "valid" by the DNS, in which case we consider ourselves "done"
|
|
|
|
// and that answer is returned. Note that, due to the way the DNS
|
|
|
|
// handles "name has resource records, but none of the specified type",
|
|
|
|
// the answer received may have an empty set of results.
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
//
|
2022-09-07 09:30:48 +00:00
|
|
|
// 2. All of the permutations of the given name are responded to by one of
|
|
|
|
// the servers in the "nameservers" list with the answer "that name does
|
|
|
|
// not exist" (NXDOMAIN). In that case, it can be considered
|
|
|
|
// pseudo-authoritative that there are no records for that name.
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
//
|
2022-09-07 09:30:48 +00:00
|
|
|
// 3. One or more of the names was responded to by all servers with some
|
|
|
|
// sort of error indication. In that case, we can't know if, in fact,
|
|
|
|
// there are records for the name or not, so whatever state the
|
|
|
|
// configuration is in, we should keep it that way until we know for
|
|
|
|
// sure (by, presumably, all the names getting answers in the future).
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
//
|
|
|
|
// Outcomes 1 and 2 are indicated by a valid response message (possibly an
|
|
|
|
// empty one) and no error. Outcome 3 is indicated by an error return. The
|
|
|
|
// error will be generic-looking, because trying to return all the errors
|
|
|
|
// returned by the combination of all name permutations and servers is a
|
|
|
|
// nightmare.
|
2024-09-10 01:41:53 +00:00
|
|
|
func lookupWithSearchPath(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
2016-05-05 09:45:51 +00:00
|
|
|
conf, err := dns.ClientConfigFromFile(resolvConf)
|
|
|
|
if err != nil {
|
2022-06-03 11:47:14 +00:00
|
|
|
return nil, fmt.Errorf("could not load resolv.conf: %w", err)
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
|
|
|
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
allResponsesValid := true
|
|
|
|
|
|
|
|
for _, lname := range conf.NameList(name) {
|
|
|
|
response, err := lookupFromAnyServer(lname, qtype, conf, logger)
|
|
|
|
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 14:14:31 +00:00
|
|
|
switch {
|
|
|
|
case err != nil:
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
// We can't go home yet, because a later name
|
|
|
|
// may give us a valid, successful answer. However
|
|
|
|
// we can no longer say "this name definitely doesn't
|
|
|
|
// exist", because we did not get that answer for
|
|
|
|
// at least one name.
|
|
|
|
allResponsesValid = false
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 14:14:31 +00:00
|
|
|
case response.Rcode == dns.RcodeSuccess:
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
// Outcome 1: GOLD!
|
|
|
|
return response, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if allResponsesValid {
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 14:14:31 +00:00
|
|
|
// Outcome 2: everyone says NXDOMAIN, that's good enough for me.
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
return &dns.Msg{}, nil
|
|
|
|
}
|
|
|
|
// Outcome 3: boned.
|
2022-06-03 11:47:14 +00:00
|
|
|
return nil, fmt.Errorf("could not resolve %q: all servers responded with errors to at least one search domain", name)
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// lookupFromAnyServer uses all configured servers to try and resolve a specific
|
|
|
|
// name. If a viable answer is received from a server, then it is
|
|
|
|
// immediately returned, otherwise the other servers in the config are
|
|
|
|
// tried, and if none of them return a viable answer, an error is returned.
|
|
|
|
//
|
|
|
|
// A "viable answer" is one which indicates either:
|
|
|
|
//
|
2022-09-07 09:30:48 +00:00
|
|
|
// 1. "yes, I know that name, and here are its records of the requested type"
|
|
|
|
// (RCODE==SUCCESS, ANCOUNT > 0);
|
|
|
|
// 2. "yes, I know that name, but it has no records of the requested type"
|
|
|
|
// (RCODE==SUCCESS, ANCOUNT==0); or
|
|
|
|
// 3. "I know that name doesn't exist" (RCODE==NXDOMAIN).
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
//
|
|
|
|
// A non-viable answer is "anything else", which encompasses both various
|
|
|
|
// system-level problems (like network timeouts) and also
|
|
|
|
// valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc).
|
2024-09-10 01:41:53 +00:00
|
|
|
func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger *slog.Logger) (*dns.Msg, error) {
|
2016-05-05 09:45:51 +00:00
|
|
|
client := &dns.Client{}
|
|
|
|
|
|
|
|
for _, server := range conf.Servers {
|
|
|
|
servAddr := net.JoinHostPort(server, conf.Port)
|
2018-03-09 10:21:58 +00:00
|
|
|
msg, err := askServerForName(name, qtype, client, servAddr, true)
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
if err != nil {
|
2024-09-10 01:41:53 +00:00
|
|
|
logger.Warn("DNS resolution failed", "server", server, "name", name, "err", err)
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if msg.Rcode == dns.RcodeSuccess || msg.Rcode == dns.RcodeNameError {
|
|
|
|
// We have our answer. Time to go home.
|
|
|
|
return msg, nil
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
|
|
|
}
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
|
2022-06-03 11:47:14 +00:00
|
|
|
return nil, fmt.Errorf("could not resolve %s: no servers returned a viable answer", name)
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
|
|
|
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
// askServerForName makes a request to a specific DNS server for a specific
|
2018-03-09 10:21:58 +00:00
|
|
|
// name (and qtype). Retries with TCP in the event of response truncation,
|
|
|
|
// but otherwise just sends back whatever the server gave, whether that be a
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
// valid-looking response, or an error.
|
|
|
|
func askServerForName(name string, queryType uint16, client *dns.Client, servAddr string, edns bool) (*dns.Msg, error) {
|
2016-05-05 09:45:51 +00:00
|
|
|
msg := &dns.Msg{}
|
|
|
|
|
Improve DNS response handling to prevent "stuck" records [Fixes #2799] (#3138)
The problem reported in #2799 was that in the event that all records for a
name were removed, the target group was never updated to be the "empty" set.
Essentially, whatever Prometheus last saw as a non-empty list of targets
would stay that way forever (or at least until Prometheus restarted...). This
came about because of a fairly naive interpretation of what a valid-looking
DNS response actually looked like -- essentially, the only valid DNS responses
were ones that had a non-empty record list. That's fine as long as your
config always lists only target names which have non-empty record sets; if
your environment happens to legitimately have empty record sets sometimes,
all hell breaks loose (otherwise-cleanly shutdown systems trigger up==0 alerts,
for instance).
This patch is a refactoring of the DNS lookup behaviour that maintains
existing behaviour with regard to search paths, but correctly handles empty
and non-existent record sets.
RFC1034 s4.3.1 says there's three ways a recursive DNS server can respond:
1. Here is your answer (possibly an empty answer, because of the way DNS
considers all records for a name, regardless of type, when deciding
whether the name exists).
2. There is no spoon (the name you asked for definitely does not exist).
3. I am a teapot (something has gone terribly wrong).
Situations 1 and 2 are fine and dandy; whatever the answer is (empty or
otherwise) is the list of targets. If something has gone wrong, then we
shouldn't go updating the target list because we don't really *know* what
the target list should be.
Multiple DNS servers to query is a straightforward augmentation; if you get
an error, then try the next server in the list, until you get an answer or
run out servers to ask. Only if *all* the servers return errors should you
return an error to the calling code.
Where things get complicated is the search path. In order to be able to
confidently say, "this name does not exist anywhere, you can remove all the
targets for this name because it's definitely GORN", at least one server for
*all* the possible names need to return either successful-but-empty
responses, or NXDOMAIN. If any name errors out, then -- since that one
might have been the one where the records came from -- you need to say
"maintain the status quo until we get a known-good response".
It is possible, though unlikely, that a poorly-configured DNS setup (say,
one which had a domain in its search path for which all configured recursive
resolvers respond with REFUSED) could result in the same "stuck" records
problem we're solving here, but the DNS configuration should be fixed in
that case, and there's nothing we can do in Prometheus itself to fix the
problem.
I've tested this patch on a local scratch instance in all the various ways I
can think of:
1. Adding records (targets get scraped)
2. Adding records of a different type
3. Remove records of the requested type, leaving other type records intact
(targets don't get scraped)
4. Remove all records for the name (targets don't get scraped)
5. Shutdown the resolver (targets still get scraped)
There's no automated test suite additions, because there isn't a test suite
for DNS discovery, and I was stretching my Go skills to the limit to make
this happen; mock objects are beyond me.
2017-09-15 10:26:10 +00:00
|
|
|
msg.SetQuestion(dns.Fqdn(name), queryType)
|
2016-05-05 09:45:51 +00:00
|
|
|
if edns {
|
2016-11-03 13:41:04 +00:00
|
|
|
msg.SetEdns0(dns.DefaultMsgSize, false)
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
response, _, err := client.Exchange(msg, servAddr)
|
2019-02-06 21:40:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if response.Truncated {
|
2016-05-05 09:45:51 +00:00
|
|
|
if client.Net == "tcp" {
|
2019-03-25 23:01:12 +00:00
|
|
|
return nil, errors.New("got truncated message on TCP (64kiB limit exceeded?)")
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
2018-03-09 10:21:58 +00:00
|
|
|
|
|
|
|
client.Net = "tcp"
|
|
|
|
return askServerForName(name, queryType, client, servAddr, false)
|
2016-05-05 09:45:51 +00:00
|
|
|
}
|
2019-02-06 21:40:51 +00:00
|
|
|
|
2016-05-05 09:45:51 +00:00
|
|
|
return response, nil
|
|
|
|
}
|