mirror of https://github.com/prometheus/prometheus
Merge branch 'master' into beorn7/release-2.24
commit
2344b2c514
|
@ -0,0 +1,67 @@
|
|||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, release-* ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
schedule:
|
||||
- cron: '26 14 * * 1'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'go', 'javascript' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||
# Learn more:
|
||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
|
@ -0,0 +1,6 @@
|
|||
# Reporting a security issue
|
||||
|
||||
The Prometheus security policy, including how to report vulnerabilities, can be
|
||||
found here:
|
||||
|
||||
https://prometheus.io/docs/operating/security/
|
|
@ -47,7 +47,7 @@ import (
|
|||
promlogflag "github.com/prometheus/common/promlog/flag"
|
||||
"github.com/prometheus/common/version"
|
||||
toolkit_web "github.com/prometheus/exporter-toolkit/web"
|
||||
httpsflag "github.com/prometheus/exporter-toolkit/web/kingpinflag"
|
||||
toolkit_webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag"
|
||||
jcfg "github.com/uber/jaeger-client-go/config"
|
||||
jprom "github.com/uber/jaeger-lib/metrics/prometheus"
|
||||
"go.uber.org/atomic"
|
||||
|
@ -153,7 +153,7 @@ func main() {
|
|||
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry.").
|
||||
Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress)
|
||||
|
||||
httpsConfig := httpsflag.AddFlags(a)
|
||||
webConfig := toolkit_webflag.AddFlags(a)
|
||||
|
||||
a.Flag("web.read-timeout",
|
||||
"Maximum duration before timing out read of the request, and closing idle connections.").
|
||||
|
@ -568,7 +568,7 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = toolkit_web.Validate(*httpsConfig)
|
||||
err = toolkit_web.Validate(*webConfig)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err)
|
||||
os.Exit(1)
|
||||
|
@ -788,7 +788,7 @@ func main() {
|
|||
// Web handler.
|
||||
g.Add(
|
||||
func() error {
|
||||
if err := webHandler.Run(ctxWeb, listener, *httpsConfig); err != nil {
|
||||
if err := webHandler.Run(ctxWeb, listener, *webConfig); err != nil {
|
||||
return errors.Wrapf(err, "error starting web server")
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
toolkit_web "github.com/prometheus/exporter-toolkit/web"
|
||||
"github.com/prometheus/exporter-toolkit/web"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
|
@ -249,7 +249,7 @@ func CheckWebConfig(files ...string) int {
|
|||
failed := false
|
||||
|
||||
for _, f := range files {
|
||||
if err := toolkit_web.Validate(f); err != nil {
|
||||
if err := web.Validate(f); err != nil {
|
||||
fmt.Fprintln(os.Stderr, f, "FAILED:", err)
|
||||
failed = true
|
||||
continue
|
||||
|
|
|
@ -10,5 +10,5 @@ tests:
|
|||
- series: 'join_2{a="1",b="4"}'
|
||||
values: 3
|
||||
|
||||
# Just the existance of the data, that can't be joined, makes the recording
|
||||
# Just the existence of the data, that can't be joined, makes the recording
|
||||
# rules error.
|
||||
|
|
|
@ -662,7 +662,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
* `__meta_ec2_instance_lifecycle`: the lifecycle of the EC2 instance, set only for 'spot' or 'scheduled' instances, absent otherwise
|
||||
* `__meta_ec2_instance_state`: the state of the EC2 instance
|
||||
* `__meta_ec2_instance_type`: the type of the EC2 instance
|
||||
* `__meta_ec2_ipv6_addresses`: comma seperated list of IPv6 addresses assigned to the instance's network interfaces, if present
|
||||
* `__meta_ec2_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present
|
||||
* `__meta_ec2_owner_id`: the ID of the AWS account that owns the EC2 instance
|
||||
* `__meta_ec2_platform`: the Operating System platform, set to 'windows' on Windows servers, absent otherwise
|
||||
* `__meta_ec2_primary_subnet_id`: the subnet ID of the primary network interface, if available
|
||||
|
|
|
@ -25,6 +25,8 @@ Generic placeholders are defined as follows:
|
|||
* `<secret>`: a regular string that is a secret, such as a password
|
||||
* `<string>`: a regular string
|
||||
|
||||
A valid example file can be found [here](/documentation/examples/web-config.yml).
|
||||
|
||||
```
|
||||
tls_server_config:
|
||||
# Certificate and key files for server to use to authenticate to client.
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
# TLS and basic authentication configuration example.
|
||||
#
|
||||
# Additionally, a certificate and a key file are needed.
|
||||
tls_server_config:
|
||||
cert_file: server.crt
|
||||
key_file: server.key
|
||||
|
||||
# Usernames and passwords required to connect to Prometheus.
|
||||
# Passwords are hashed with bcrypt: https://github.com/prometheus/exporter-toolkit/blob/master/https/README.md#about-bcrypt
|
||||
basic_auth_users:
|
||||
alice: $2y$10$mDwo.lAisC94iLAyP81MCesa29IzH37oigHC/42V2pdJlUprsJPze
|
||||
bob: $2y$10$hLqFl9jSjoAAy95Z/zw8Ye8wkdMBM8c5Bn1ptYqP/AXyV0.oy0S8m
|
|
@ -583,6 +583,21 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
|
|||
p.checkAST(n.VectorSelector)
|
||||
|
||||
case *VectorSelector:
|
||||
if n.Name != "" {
|
||||
// In this case the last LabelMatcher is checking for the metric name
|
||||
// set outside the braces. This checks if the name has already been set
|
||||
// previously.
|
||||
for _, m := range n.LabelMatchers[0 : len(n.LabelMatchers)-1] {
|
||||
if m != nil && m.Name == labels.MetricName {
|
||||
p.addParseErrf(n.PositionRange(), "metric name must not be set twice: %q or %q", n.Name, m.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// Skip the check for non-empty matchers because an explicit
|
||||
// metric name is a non-empty matcher.
|
||||
break
|
||||
}
|
||||
|
||||
// A Vector selector must contain at least one non-empty matcher to prevent
|
||||
// implicit selection of all metrics (e.g. by a typo).
|
||||
notEmpty := false
|
||||
|
@ -596,17 +611,6 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
|
|||
p.addParseErrf(n.PositionRange(), "vector selector must contain at least one non-empty matcher")
|
||||
}
|
||||
|
||||
if n.Name != "" {
|
||||
// In this case the last LabelMatcher is checking for the metric name
|
||||
// set outside the braces. This checks if the name has already been set
|
||||
// previously
|
||||
for _, m := range n.LabelMatchers[0 : len(n.LabelMatchers)-1] {
|
||||
if m != nil && m.Name == labels.MetricName {
|
||||
p.addParseErrf(n.PositionRange(), "metric name must not be set twice: %q or %q", n.Name, m.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case *NumberLiteral, *StringLiteral:
|
||||
// Nothing to do for terminals.
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then
|
|||
fi
|
||||
|
||||
# List of files that should be synced.
|
||||
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common"
|
||||
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md"
|
||||
|
||||
# Go to the root of the repo
|
||||
cd "$(git rev-parse --show-cdup)" || exit 1
|
||||
|
@ -73,10 +73,12 @@ process_repo() {
|
|||
fi
|
||||
if [[ -z "${target_file}" ]]; then
|
||||
echo "${source_file} doesn't exist in ${org_repo}"
|
||||
if [[ "${source_file}" == 'CODE_OF_CONDUCT.md' ]] ; then
|
||||
echo "CODE_OF_CONDUCT.md missing in ${org_repo}, force updating."
|
||||
needs_update+=('CODE_OF_CONDUCT.md')
|
||||
fi
|
||||
case "${source_file}" in
|
||||
CODE_OF_CONDUCT.md | SECURITY.md)
|
||||
echo "${source_file} missing in ${org_repo}, force updating."
|
||||
needs_update+=("${source_file}")
|
||||
;;
|
||||
esac
|
||||
continue
|
||||
fi
|
||||
target_checksum="$(echo "${target_file}" | sha256sum | cut -d' ' -f1)"
|
||||
|
|
|
@ -32,6 +32,7 @@ func TestBlockWriter(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
outputDir, err := ioutil.TempDir(os.TempDir(), "output")
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, os.RemoveAll(outputDir)) }()
|
||||
w, err := NewBlockWriter(log.NewNopLogger(), outputDir, DefaultBlockDuration)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -55,6 +56,7 @@ func TestBlockWriter(t *testing.T) {
|
|||
blockpath := filepath.Join(outputDir, id.String())
|
||||
b, err := OpenBlock(nil, blockpath, nil)
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, b.Close()) }()
|
||||
q, err := NewBlockQuerier(b, math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||
|
|
10
tsdb/db.go
10
tsdb/db.go
|
@ -56,6 +56,8 @@ const (
|
|||
// about removing those too on start to save space. Currently only blocks tmp dirs are removed.
|
||||
tmpForDeletionBlockDirSuffix = ".tmp-for-deletion"
|
||||
tmpForCreationBlockDirSuffix = ".tmp-for-creation"
|
||||
// Pre-2.21 tmp dir suffix, used in clean-up functions.
|
||||
tmpLegacy = ".tmp"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -732,6 +734,12 @@ func (db *DB) run() {
|
|||
|
||||
select {
|
||||
case <-time.After(1 * time.Minute):
|
||||
db.cmtx.Lock()
|
||||
if err := db.reloadBlocks(); err != nil {
|
||||
level.Error(db.logger).Log("msg", "reloadBlocks", "err", err)
|
||||
}
|
||||
db.cmtx.Unlock()
|
||||
|
||||
select {
|
||||
case db.compactc <- struct{}{}:
|
||||
default:
|
||||
|
@ -1564,7 +1572,7 @@ func isTmpBlockDir(fi os.FileInfo) bool {
|
|||
|
||||
fn := fi.Name()
|
||||
ext := filepath.Ext(fn)
|
||||
if ext == tmpForDeletionBlockDirSuffix || ext == tmpForCreationBlockDirSuffix {
|
||||
if ext == tmpForDeletionBlockDirSuffix || ext == tmpForCreationBlockDirSuffix || ext == tmpLegacy {
|
||||
if _, err := ulid.ParseStrict(fn[:len(fn)-len(ext)]); err == nil {
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -2795,15 +2795,20 @@ func TestOpen_VariousBlockStates(t *testing.T) {
|
|||
require.NoError(t, os.Remove(filepath.Join(dir, metaFilename)))
|
||||
}
|
||||
{
|
||||
// Tmp blocks during creation & deletion; those should be removed on start.
|
||||
// Tmp blocks during creation; those should be removed on start.
|
||||
dir := createBlock(t, tmpDir, genSeries(10, 2, 30, 40))
|
||||
require.NoError(t, fileutil.Replace(dir, dir+tmpForCreationBlockDirSuffix))
|
||||
expectedRemovedDirs[dir+tmpForCreationBlockDirSuffix] = struct{}{}
|
||||
|
||||
// Tmp blocks during creation & deletion; those should be removed on start.
|
||||
// Tmp blocks during deletion; those should be removed on start.
|
||||
dir = createBlock(t, tmpDir, genSeries(10, 2, 40, 50))
|
||||
require.NoError(t, fileutil.Replace(dir, dir+tmpForDeletionBlockDirSuffix))
|
||||
expectedRemovedDirs[dir+tmpForDeletionBlockDirSuffix] = struct{}{}
|
||||
|
||||
// Pre-2.21 tmp blocks; those should be removed on start.
|
||||
dir = createBlock(t, tmpDir, genSeries(10, 2, 50, 60))
|
||||
require.NoError(t, fileutil.Replace(dir, dir+tmpLegacy))
|
||||
expectedRemovedDirs[dir+tmpLegacy] = struct{}{}
|
||||
}
|
||||
{
|
||||
// One ok block; but two should be replaced.
|
||||
|
|
|
@ -115,6 +115,7 @@ type PostingsStats struct {
|
|||
CardinalityLabelStats []Stat
|
||||
LabelValueStats []Stat
|
||||
LabelValuePairsStats []Stat
|
||||
NumLabelPairs int
|
||||
}
|
||||
|
||||
// Stats calculates the cardinality statistics from postings.
|
||||
|
@ -128,6 +129,7 @@ func (p *MemPostings) Stats(label string) *PostingsStats {
|
|||
labels := &maxHeap{}
|
||||
labelValueLength := &maxHeap{}
|
||||
labelValuePairs := &maxHeap{}
|
||||
numLabelPairs := 0
|
||||
|
||||
metrics.init(maxNumOfRecords)
|
||||
labels.init(maxNumOfRecords)
|
||||
|
@ -139,6 +141,7 @@ func (p *MemPostings) Stats(label string) *PostingsStats {
|
|||
continue
|
||||
}
|
||||
labels.push(Stat{Name: n, Count: uint64(len(e))})
|
||||
numLabelPairs += len(e)
|
||||
size = 0
|
||||
for name, values := range e {
|
||||
if n == label {
|
||||
|
@ -157,6 +160,7 @@ func (p *MemPostings) Stats(label string) *PostingsStats {
|
|||
CardinalityLabelStats: labels.get(),
|
||||
LabelValueStats: labelValueLength.get(),
|
||||
LabelValuePairsStats: labelValuePairs.get(),
|
||||
NumLabelPairs: numLabelPairs,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -454,8 +454,9 @@ func TestLogPartialWrite(t *testing.T) {
|
|||
|
||||
for testName, testData := range tests {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
dirPath, err := ioutil.TempDir("", "")
|
||||
dirPath, err := ioutil.TempDir("", "logpartialwrite")
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, os.RemoveAll(dirPath)) }()
|
||||
|
||||
w, err := NewSize(nil, nil, dirPath, segmentSize, false)
|
||||
require.NoError(t, err)
|
||||
|
@ -480,6 +481,7 @@ func TestLogPartialWrite(t *testing.T) {
|
|||
// Read it back. We expect no corruption.
|
||||
s, err := OpenReadSegment(SegmentName(dirPath, 0))
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
r := NewReader(NewSegmentBufReader(s))
|
||||
for i := 0; i < testData.numRecords; i++ {
|
||||
|
|
|
@ -1236,10 +1236,11 @@ type stat struct {
|
|||
|
||||
// HeadStats has information about the TSDB head.
|
||||
type HeadStats struct {
|
||||
NumSeries uint64 `json:"numSeries"`
|
||||
ChunkCount int64 `json:"chunkCount"`
|
||||
MinTime int64 `json:"minTime"`
|
||||
MaxTime int64 `json:"maxTime"`
|
||||
NumSeries uint64 `json:"numSeries"`
|
||||
NumLabelPairs int `json:"numLabelPairs"`
|
||||
ChunkCount int64 `json:"chunkCount"`
|
||||
MinTime int64 `json:"minTime"`
|
||||
MaxTime int64 `json:"maxTime"`
|
||||
}
|
||||
|
||||
// tsdbStatus has information of cardinality statistics from postings.
|
||||
|
@ -1281,10 +1282,11 @@ func (api *API) serveTSDBStatus(*http.Request) apiFuncResult {
|
|||
}
|
||||
return apiFuncResult{tsdbStatus{
|
||||
HeadStats: HeadStats{
|
||||
NumSeries: s.NumSeries,
|
||||
ChunkCount: chunkCount,
|
||||
MinTime: s.MinTime,
|
||||
MaxTime: s.MaxTime,
|
||||
NumSeries: s.NumSeries,
|
||||
ChunkCount: chunkCount,
|
||||
MinTime: s.MinTime,
|
||||
MaxTime: s.MaxTime,
|
||||
NumLabelPairs: s.IndexPostingStats.NumLabelPairs,
|
||||
},
|
||||
SeriesCountByMetricName: convertStats(s.IndexPostingStats.CardinalityMetricsStats),
|
||||
LabelValueCountByLabelName: convertStats(s.IndexPostingStats.CardinalityLabelStats),
|
||||
|
|
|
@ -15,6 +15,7 @@ const fakeTSDBStatusResponse: {
|
|||
data: {
|
||||
headStats: {
|
||||
numSeries: 508,
|
||||
numLabelPairs: 1234,
|
||||
chunkCount: 937,
|
||||
minTime: 1591516800000,
|
||||
maxTime: 1598896800143,
|
||||
|
@ -85,7 +86,7 @@ describe('TSDB Stats', () => {
|
|||
.at(0)
|
||||
.find('tbody')
|
||||
.find('td');
|
||||
['508', '937', '2020-06-07T08:00:00.000Z (1591516800000)', '2020-08-31T18:00:00.143Z (1598896800143)'].forEach(
|
||||
['508', '937', '1234', '2020-06-07T08:00:00.000Z (1591516800000)', '2020-08-31T18:00:00.143Z (1598896800143)'].forEach(
|
||||
(value, i) => {
|
||||
expect(headStats.at(i).text()).toEqual(value);
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ interface Stats {
|
|||
|
||||
interface HeadStats {
|
||||
numSeries: number;
|
||||
numLabelPairs: number;
|
||||
chunkCount: number;
|
||||
minTime: number;
|
||||
maxTime: number;
|
||||
|
@ -35,10 +36,11 @@ export const TSDBStatusContent: FC<TSDBMap> = ({
|
|||
seriesCountByLabelValuePair,
|
||||
}) => {
|
||||
const unixToTime = (unix: number): string => new Date(unix).toISOString();
|
||||
const { chunkCount, numSeries, minTime, maxTime } = headStats;
|
||||
const { chunkCount, numSeries, numLabelPairs, minTime, maxTime } = headStats;
|
||||
const stats = [
|
||||
{ header: 'Number of Series', value: numSeries },
|
||||
{ header: 'Number of Chunks', value: chunkCount },
|
||||
{ header: 'Number of Label Pairs', value: numLabelPairs },
|
||||
{ header: 'Current Min Time', value: `${unixToTime(minTime)} (${minTime})` },
|
||||
{ header: 'Current Max Time', value: `${unixToTime(maxTime)} (${maxTime})` },
|
||||
];
|
||||
|
|
|
@ -50,7 +50,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/route"
|
||||
"github.com/prometheus/common/server"
|
||||
https "github.com/prometheus/exporter-toolkit/web"
|
||||
toolkit_web "github.com/prometheus/exporter-toolkit/web"
|
||||
"go.uber.org/atomic"
|
||||
"golang.org/x/net/netutil"
|
||||
|
||||
|
@ -544,7 +544,7 @@ func (h *Handler) Listener() (net.Listener, error) {
|
|||
}
|
||||
|
||||
// Run serves the HTTP endpoints.
|
||||
func (h *Handler) Run(ctx context.Context, listener net.Listener, httpsConfig string) error {
|
||||
func (h *Handler) Run(ctx context.Context, listener net.Listener, webConfig string) error {
|
||||
if listener == nil {
|
||||
var err error
|
||||
listener, err = h.Listener()
|
||||
|
@ -580,7 +580,7 @@ func (h *Handler) Run(ctx context.Context, listener net.Listener, httpsConfig st
|
|||
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
errCh <- https.Serve(listener, httpSrv, httpsConfig, h.logger)
|
||||
errCh <- toolkit_web.Serve(listener, httpSrv, webConfig, h.logger)
|
||||
}()
|
||||
|
||||
select {
|
||||
|
|
Loading…
Reference in New Issue