Browse Source

Update to latest procfs library (#1611)

Bump to v0.0.10 procfs library.

Signed-off-by: Ben Kochie <superq@gmail.com>
pull/1612/head
Ben Kochie 5 years ago committed by GitHub
parent
commit
14df2a1a1a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      collector/softnet_linux.go
  2. 2
      go.mod
  3. 4
      go.sum
  4. 12
      vendor/github.com/prometheus/procfs/Makefile.common
  5. 160
      vendor/github.com/prometheus/procfs/crypto.go
  6. 37
      vendor/github.com/prometheus/procfs/fixtures.ttar
  7. 62
      vendor/github.com/prometheus/procfs/loadavg.go
  8. 153
      vendor/github.com/prometheus/procfs/net_conntrackstat.go
  9. 101
      vendor/github.com/prometheus/procfs/net_softnet.go
  10. 229
      vendor/github.com/prometheus/procfs/net_udp.go
  11. 222
      vendor/github.com/prometheus/procfs/net_unix.go
  12. 5
      vendor/github.com/prometheus/procfs/proc_status.go
  13. 89
      vendor/github.com/prometheus/procfs/swaps.go
  14. 2
      vendor/github.com/prometheus/procfs/sysfs/class_infiniband.go
  15. 16
      vendor/github.com/prometheus/procfs/xfs/parse.go
  16. 1
      vendor/github.com/prometheus/procfs/xfs/xfs.go
  17. 2
      vendor/modules.txt

2
collector/softnet_linux.go

@ -70,7 +70,7 @@ func NewSoftnetCollector(logger log.Logger) (Collector, error) {
// Update gets parsed softnet statistics using procfs.
func (c *softnetCollector) Update(ch chan<- prometheus.Metric) error {
stats, err := c.fs.GatherSoftnetStats()
stats, err := c.fs.NetSoftnetStat()
if err != nil {
return fmt.Errorf("could not get softnet statistics: %s", err)
}

2
go.mod

@ -15,7 +15,7 @@ require (
github.com/prometheus/client_golang v1.0.0
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
github.com/prometheus/common v0.7.0
github.com/prometheus/procfs v0.0.8
github.com/prometheus/procfs v0.0.10
github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745
github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a
go.uber.org/atomic v1.3.2 // indirect

4
go.sum

@ -87,8 +87,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.10 h1:QJQN3jYQhkamO4mhfUWqdDH2asK7ONOI9MTWjyAxNKM=
github.com/prometheus/procfs v0.0.10/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745 h1:IuH7WumZNax0D+rEqmy2TyhKCzrtMGqbZO0b8rO00JA=
github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=

12
vendor/github.com/prometheus/procfs/Makefile.common generated vendored

@ -69,12 +69,12 @@ else
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
endif
PROMU_VERSION ?= 0.4.0
PROMU_VERSION ?= 0.5.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.16.0
GOLANGCI_LINT_VERSION ?= v1.18.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -86,7 +86,8 @@ endif
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
DOCKERFILE_PATH ?= ./
DOCKERFILE_PATH ?= ./Dockerfile
DOCKERBUILD_CONTEXT ?= ./
DOCKER_REPO ?= prom
DOCKER_ARCHS ?= amd64
@ -200,7 +201,7 @@ endif
.PHONY: common-build
common-build: promu
@echo ">> building binaries"
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX)
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
.PHONY: common-tarball
common-tarball: promu
@ -211,9 +212,10 @@ common-tarball: promu
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
$(DOCKERFILE_PATH)
$(DOCKERBUILD_CONTEXT)
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)

160
vendor/github.com/prometheus/procfs/crypto.go generated vendored

@ -14,10 +14,10 @@
package procfs
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"strconv"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
@ -52,80 +52,102 @@ type Crypto struct {
// structs containing the relevant info. More information available here:
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
func (fs FS) Crypto() ([]Crypto, error) {
data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
return nil, fmt.Errorf("error reading crypto %s: %s", path, err)
}
crypto, err := parseCrypto(data)
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
return nil, fmt.Errorf("error parsing crypto %s: %s", path, err)
}
return crypto, nil
}
func parseCrypto(cryptoData []byte) ([]Crypto, error) {
crypto := []Crypto{}
cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
for _, block := range cryptoBlocks {
var newCryptoElem Crypto
lines := strings.Split(string(block), "\n")
for _, line := range lines {
if strings.TrimSpace(line) == "" || line[0] == ' ' {
continue
}
fields := strings.Split(line, ":")
key := strings.TrimSpace(fields[0])
value := strings.TrimSpace(fields[1])
vp := util.NewValueParser(value)
switch strings.TrimSpace(key) {
case "async":
b, err := strconv.ParseBool(value)
if err == nil {
newCryptoElem.Async = b
}
case "blocksize":
newCryptoElem.Blocksize = vp.PUInt64()
case "chunksize":
newCryptoElem.Chunksize = vp.PUInt64()
case "digestsize":
newCryptoElem.Digestsize = vp.PUInt64()
case "driver":
newCryptoElem.Driver = value
case "geniv":
newCryptoElem.Geniv = value
case "internal":
newCryptoElem.Internal = value
case "ivsize":
newCryptoElem.Ivsize = vp.PUInt64()
case "maxauthsize":
newCryptoElem.Maxauthsize = vp.PUInt64()
case "max keysize":
newCryptoElem.MaxKeysize = vp.PUInt64()
case "min keysize":
newCryptoElem.MinKeysize = vp.PUInt64()
case "module":
newCryptoElem.Module = value
case "name":
newCryptoElem.Name = value
case "priority":
newCryptoElem.Priority = vp.PInt64()
case "refcnt":
newCryptoElem.Refcnt = vp.PInt64()
case "seedsize":
newCryptoElem.Seedsize = vp.PUInt64()
case "selftest":
newCryptoElem.Selftest = value
case "type":
newCryptoElem.Type = value
case "walksize":
newCryptoElem.Walksize = vp.PUInt64()
}
// parseCrypto parses a /proc/crypto stream into Crypto elements.
func parseCrypto(r io.Reader) ([]Crypto, error) {
var out []Crypto
s := bufio.NewScanner(r)
for s.Scan() {
text := s.Text()
switch {
case strings.HasPrefix(text, "name"):
// Each crypto element begins with its name.
out = append(out, Crypto{})
case text == "":
continue
}
kv := strings.Split(text, ":")
if len(kv) != 2 {
return nil, fmt.Errorf("malformed crypto line: %q", text)
}
k := strings.TrimSpace(kv[0])
v := strings.TrimSpace(kv[1])
// Parse the key/value pair into the currently focused element.
c := &out[len(out)-1]
if err := c.parseKV(k, v); err != nil {
return nil, err
}
crypto = append(crypto, newCryptoElem)
}
return crypto, nil
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}
// parseKV parses a key/value pair into the appropriate field of c.
func (c *Crypto) parseKV(k, v string) error {
vp := util.NewValueParser(v)
switch k {
case "async":
// Interpret literal yes as true.
c.Async = v == "yes"
case "blocksize":
c.Blocksize = vp.PUInt64()
case "chunksize":
c.Chunksize = vp.PUInt64()
case "digestsize":
c.Digestsize = vp.PUInt64()
case "driver":
c.Driver = v
case "geniv":
c.Geniv = v
case "internal":
c.Internal = v
case "ivsize":
c.Ivsize = vp.PUInt64()
case "maxauthsize":
c.Maxauthsize = vp.PUInt64()
case "max keysize":
c.MaxKeysize = vp.PUInt64()
case "min keysize":
c.MinKeysize = vp.PUInt64()
case "module":
c.Module = v
case "name":
c.Name = v
case "priority":
c.Priority = vp.PInt64()
case "refcnt":
c.Refcnt = vp.PInt64()
case "seedsize":
c.Seedsize = vp.PUInt64()
case "selftest":
c.Selftest = v
case "type":
c.Type = v
case "walksize":
c.Walksize = vp.PUInt64()
}
return vp.Err()
}

37
vendor/github.com/prometheus/procfs/fixtures.ttar generated vendored

@ -189,7 +189,7 @@ Ngid: 0
Pid: 26231
PPid: 1
TracerPid: 0
Uid: 0 0 0 0
Uid: 1000 1000 1000 0
Gid: 0 0 0 0
FDSize: 128
Groups:
@ -554,7 +554,7 @@ power management:
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/crypto
Lines: 971
Lines: 972
name : ccm(aes)
driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni))
module : ccm
@ -588,6 +588,7 @@ refcnt : 1
selftest : passed
internal : no
type : kpp
async : yes
name : ecb(arc4)
driver : ecb(arc4)-generic
@ -1526,6 +1527,11 @@ blocksize : 16
min keysize : 16
max keysize : 32
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/loadavg
Lines: 1
0.02 0.04 0.05 1/497 11947
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/diskstats
@ -1825,6 +1831,27 @@ Lines: 1
00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/udp
Lines: 4
sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
0: 0A000005:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/udp6
Lines: 3
sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0
6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/udp_broken
Lines: 2
sl local_address rem_address st
1: 00000000:0016 00000000:0000 0A
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/unix
Lines: 6
Num RefCount Protocol Flags Type St Inode Path
@ -1930,6 +1957,12 @@ procs_blocked 1
softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/swaps
Lines: 2
Filename Type Size Used Priority
/dev/dm-2 partition 131068 176 -2
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/proc/symlinktargets
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

62
vendor/github.com/prometheus/procfs/loadavg.go generated vendored

@ -0,0 +1,62 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// LoadAvg represents an entry in /proc/loadavg
type LoadAvg struct {
Load1 float64
Load5 float64
Load15 float64
}
// LoadAvg returns loadavg from /proc.
func (fs FS) LoadAvg() (*LoadAvg, error) {
path := fs.proc.Path("loadavg")
data, err := util.ReadFileNoStat(path)
if err != nil {
return nil, err
}
return parseLoad(data)
}
// Parse /proc loadavg and return 1m, 5m and 15m.
func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes))
}
var err error
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
return nil, fmt.Errorf("could not parse load '%s': %s", load, err)
}
}
return &LoadAvg{
Load1: loads[0],
Load5: loads[1],
Load15: loads[2],
}, nil
}

153
vendor/github.com/prometheus/procfs/net_conntrackstat.go generated vendored

@ -0,0 +1,153 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
// and contains netfilter conntrack statistics at one CPU core
type ConntrackStatEntry struct {
Entries uint64
Found uint64
Invalid uint64
Ignore uint64
Insert uint64
InsertFailed uint64
Drop uint64
EarlyDrop uint64
SearchRestart uint64
}
// Retrieves netfilter's conntrack statistics, split by CPU cores
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
}
// Parses a slice of ConntrackStatEntries from the given filepath
func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
// This file is small and can be read with one syscall.
b, err := util.ReadFileNoStat(path)
if err != nil {
// Do not wrap this error so the caller can detect os.IsNotExist and
// similar conditions.
return nil, err
}
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err)
}
return stat, nil
}
// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
var entries []ConntrackStatEntry
scanner := bufio.NewScanner(r)
scanner.Scan()
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
conntrackEntry, err := parseConntrackStatEntry(fields)
if err != nil {
return nil, err
}
entries = append(entries, *conntrackEntry)
}
return entries, nil
}
// Parses a ConntrackStatEntry from given array of fields
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
if len(fields) != 17 {
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
}
entry := &ConntrackStatEntry{}
entries, err := parseConntrackStatField(fields[0])
if err != nil {
return nil, err
}
entry.Entries = entries
found, err := parseConntrackStatField(fields[2])
if err != nil {
return nil, err
}
entry.Found = found
invalid, err := parseConntrackStatField(fields[4])
if err != nil {
return nil, err
}
entry.Invalid = invalid
ignore, err := parseConntrackStatField(fields[5])
if err != nil {
return nil, err
}
entry.Ignore = ignore
insert, err := parseConntrackStatField(fields[8])
if err != nil {
return nil, err
}
entry.Insert = insert
insertFailed, err := parseConntrackStatField(fields[9])
if err != nil {
return nil, err
}
entry.InsertFailed = insertFailed
drop, err := parseConntrackStatField(fields[10])
if err != nil {
return nil, err
}
entry.Drop = drop
earlyDrop, err := parseConntrackStatField(fields[11])
if err != nil {
return nil, err
}
entry.EarlyDrop = earlyDrop
searchRestart, err := parseConntrackStatField(fields[16])
if err != nil {
return nil, err
}
entry.SearchRestart = searchRestart
return entry, nil
}
// Parses a uint64 from given hex in string
func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err)
}
return val, err
}

101
vendor/github.com/prometheus/procfs/net_softnet.go generated vendored

@ -14,78 +14,85 @@
package procfs
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// For the proc file format details,
// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
type SoftnetEntry struct {
// SoftnetStat contains a single row of data from /proc/net/softnet_stat
type SoftnetStat struct {
// Number of processed packets
Processed uint
Processed uint32
// Number of dropped packets
Dropped uint
Dropped uint32
// Number of times processing packets ran out of quota
TimeSqueezed uint
TimeSqueezed uint32
}
// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
// and then return a slice of SoftnetEntry's.
func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
// NetSoftnetStat reads data from /proc/net/softnet_stat.
func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
b, err := util.ReadFileNoStat(fs.proc.Path("net/softnet_stat"))
if err != nil {
return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
return nil, err
}
return parseSoftnetEntries(data)
entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err)
}
return entries, nil
}
func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
lines := strings.Split(string(data), "\n")
entries := make([]SoftnetEntry, 0)
var err error
const (
expectedColumns = 11
)
for _, line := range lines {
columns := strings.Fields(line)
func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
const expectedColumns = 11
s := bufio.NewScanner(r)
var stats []SoftnetStat
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
if width == 0 {
continue
}
if width != expectedColumns {
return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
if width != 11 {
return nil, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
}
var entry SoftnetEntry
if entry, err = parseSoftnetEntry(columns); err != nil {
return []SoftnetEntry{}, err
// We only parse the first three columns at the moment.
us, err := parseHexUint32s(columns[0:3])
if err != nil {
return nil, err
}
entries = append(entries, entry)
stats = append(stats, SoftnetStat{
Processed: us[0],
Dropped: us[1],
TimeSqueezed: us[2],
})
}
return entries, nil
return stats, nil
}
func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
var err error
var processed, dropped, timeSqueezed uint64
if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
}
if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
}
if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
func parseHexUint32s(ss []string) ([]uint32, error) {
us := make([]uint32, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 16, 32)
if err != nil {
return nil, err
}
us = append(us, uint32(u))
}
return SoftnetEntry{
Processed: uint(processed),
Dropped: uint(dropped),
TimeSqueezed: uint(timeSqueezed),
}, nil
return us, nil
}

229
vendor/github.com/prometheus/procfs/net_udp.go generated vendored

@ -0,0 +1,229 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"encoding/hex"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
)
const (
// readLimit is used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
// With e.g. 150 Byte per line and the maximum number of 65535,
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
readLimit = 4294967296 // Byte -> 4 GiB
)
type (
// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
NetUDP []*netUDPLine
// NetUDPSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetUDP it does not collect
// the parsed lines into a slice.
NetUDPSummary struct {
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
TxQueueLength uint64
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
RxQueueLength uint64
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
}
// netUDPLine represents the fields parsed from a single line
// in /proc/net/udp{,6}. Fields which are not used by UDP are skipped.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netUDPLine struct {
Sl uint64
LocalAddr net.IP
LocalPort uint64
RemAddr net.IP
RemPort uint64
St uint64
TxQueue uint64
RxQueue uint64
UID uint64
}
)
// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
// read from /proc/net/udp.
func (fs FS) NetUDP() (NetUDP, error) {
return newNetUDP(fs.proc.Path("net/udp"))
}
// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
// read from /proc/net/udp6.
func (fs FS) NetUDP6() (NetUDP, error) {
return newNetUDP(fs.proc.Path("net/udp6"))
}
// NetUDPSummary returns already computed statistics like the total queue lengths
// for UDP datagrams read from /proc/net/udp.
func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
return newNetUDPSummary(fs.proc.Path("net/udp"))
}
// NetUDP6Summary returns already computed statistics like the total queue lengths
// for UDP datagrams read from /proc/net/udp6.
func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
return newNetUDPSummary(fs.proc.Path("net/udp6"))
}
// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
func newNetUDP(file string) (NetUDP, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
netUDP := NetUDP{}
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetUDPLine(fields)
if err != nil {
return nil, err
}
netUDP = append(netUDP, line)
}
if err := s.Err(); err != nil {
return nil, err
}
return netUDP, nil
}
// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file.
func newNetUDPSummary(file string) (*NetUDPSummary, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
netUDPSummary := &NetUDPSummary{}
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetUDPLine(fields)
if err != nil {
return nil, err
}
netUDPSummary.TxQueueLength += line.TxQueue
netUDPSummary.RxQueueLength += line.RxQueue
netUDPSummary.UsedSockets++
}
if err := s.Err(); err != nil {
return nil, err
}
return netUDPSummary, nil
}
// parseNetUDPLine parses a single line, represented by a list of fields.
func parseNetUDPLine(fields []string) (*netUDPLine, error) {
line := &netUDPLine{}
if len(fields) < 8 {
return nil, fmt.Errorf(
"cannot parse net udp socket line as it has less then 8 columns: %s",
strings.Join(fields, " "),
)
}
var err error // parse error
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
return nil, fmt.Errorf(
"cannot parse sl field in udp socket line: %s", fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
return nil, fmt.Errorf(
"cannot parse local_address field in udp socket line: %s", fields[1])
}
if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil {
return nil, fmt.Errorf(
"cannot parse local_address value in udp socket line: %s", err)
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse local_address port value in udp socket line: %s", err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
return nil, fmt.Errorf(
"cannot parse rem_address field in udp socket line: %s", fields[1])
}
if line.RemAddr, err = hex.DecodeString(r[0]); err != nil {
return nil, fmt.Errorf(
"cannot parse rem_address value in udp socket line: %s", err)
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse rem_address port value in udp socket line: %s", err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse st value in udp socket line: %s", err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
"cannot parse tx/rx queues in udp socket line as it has a missing colon: %s",
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse uid value in udp socket line: %s", err)
}
return line, nil
}

222
vendor/github.com/prometheus/procfs/net_unix.go generated vendored

@ -15,7 +15,6 @@ package procfs
import (
"bufio"
"errors"
"fmt"
"io"
"os"
@ -27,25 +26,15 @@ import (
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
const (
netUnixKernelPtrIdx = iota
netUnixRefCountIdx
_
netUnixFlagsIdx
netUnixTypeIdx
netUnixStateIdx
netUnixInodeIdx
// Inode and Path are optional.
netUnixStaticFieldsCnt = 6
)
// Constants for the various /proc/net/unix enumerations.
// TODO: match against x/sys/unix or similar?
const (
netUnixTypeStream = 1
netUnixTypeDgram = 2
netUnixTypeSeqpacket = 5
netUnixFlagListen = 1 << 16
netUnixFlagDefault = 0
netUnixFlagListen = 1 << 16
netUnixStateUnconnected = 1
netUnixStateConnecting = 2
@ -53,129 +42,127 @@ const (
netUnixStateDisconnected = 4
)
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
// NetUNIXType is the type of the type field.
type NetUNIXType uint64
// NetUnixType is the type of the type field.
type NetUnixType uint64
// NetUNIXFlags is the type of the flags field.
type NetUNIXFlags uint64
// NetUnixFlags is the type of the flags field.
type NetUnixFlags uint64
// NetUNIXState is the type of the state field.
type NetUNIXState uint64
// NetUnixState is the type of the state field.
type NetUnixState uint64
// NetUnixLine represents a line of /proc/net/unix.
type NetUnixLine struct {
// NetUNIXLine represents a line of /proc/net/unix.
type NetUNIXLine struct {
KernelPtr string
RefCount uint64
Protocol uint64
Flags NetUnixFlags
Type NetUnixType
State NetUnixState
Flags NetUNIXFlags
Type NetUNIXType
State NetUNIXState
Inode uint64
Path string
}
// NetUnix holds the data read from /proc/net/unix.
type NetUnix struct {
Rows []*NetUnixLine
// NetUNIX holds the data read from /proc/net/unix.
type NetUNIX struct {
Rows []*NetUNIXLine
}
// NewNetUnix returns data read from /proc/net/unix.
func NewNetUnix() (*NetUnix, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewNetUnix()
// NetUNIX returns data read from /proc/net/unix.
func (fs FS) NetUNIX() (*NetUNIX, error) {
return readNetUNIX(fs.proc.Path("net/unix"))
}
// NewNetUnix returns data read from /proc/net/unix.
func (fs FS) NewNetUnix() (*NetUnix, error) {
return NewNetUnixByPath(fs.proc.Path("net/unix"))
}
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByPath(path string) (*NetUnix, error) {
f, err := os.Open(path)
// readNetUNIX reads data in /proc/net/unix format from the specified file.
func readNetUNIX(file string) (*NetUNIX, error) {
// This file could be quite large and a streaming read is desirable versus
// reading the entire contents at once.
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
return NewNetUnixByReader(f)
return parseNetUNIX(f)
}
// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
nu := &NetUnix{
Rows: make([]*NetUnixLine, 0, 32),
}
scanner := bufio.NewScanner(reader)
// Omit the header line.
scanner.Scan()
header := scanner.Text()
// parseNetUNIX creates a NetUnix structure from the incoming stream.
func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
// Begin scanning by checking for the existence of Inode.
s := bufio.NewScanner(r)
s.Scan()
// From the man page of proc(5), it does not contain an Inode field,
// but in actually it exists.
// This code works for both cases.
hasInode := strings.Contains(header, "Inode")
// but in actually it exists. This code works for both cases.
hasInode := strings.Contains(s.Text(), "Inode")
minFieldsCnt := netUnixStaticFieldsCnt
// Expect a minimum number of fields, but Inode and Path are optional:
// Num RefCount Protocol Flags Type St Inode Path
minFields := 6
if hasInode {
minFieldsCnt++
minFields++
}
for scanner.Scan() {
line := scanner.Text()
item, err := nu.parseLine(line, hasInode, minFieldsCnt)
var nu NetUNIX
for s.Scan() {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
return nu, err
return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err)
}
nu.Rows = append(nu.Rows, item)
}
return nu, scanner.Err()
if err := s.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err)
}
return &nu, nil
}
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
fields := strings.Fields(line)
fieldsLen := len(fields)
if fieldsLen < minFieldsCnt {
return nil, fmt.Errorf(
"Parse Unix domain failed: expect at least %d fields but got %d",
minFieldsCnt, fieldsLen)
}
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
l := len(fields)
if l < min {
return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
}
users, err := u.parseUsers(fields[netUnixRefCountIdx])
// Field offsets are as follows:
// Num RefCount Protocol Flags Type St Inode Path
kernelPtr := strings.TrimSuffix(fields[0], ":")
users, err := u.parseUsers(fields[1])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err)
}
flags, err := u.parseFlags(fields[netUnixFlagsIdx])
flags, err := u.parseFlags(fields[3])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err)
}
typ, err := u.parseType(fields[netUnixTypeIdx])
typ, err := u.parseType(fields[4])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err)
}
state, err := u.parseState(fields[netUnixStateIdx])
state, err := u.parseState(fields[5])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err)
}
var inode uint64
if hasInode {
inodeStr := fields[netUnixInodeIdx]
inode, err = u.parseInode(inodeStr)
inode, err = u.parseInode(fields[6])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err)
}
}
nuLine := &NetUnixLine{
n := &NetUNIXLine{
KernelPtr: kernelPtr,
RefCount: users,
Type: typ,
@ -185,57 +172,56 @@ func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetU
}
// Path field is optional.
if fieldsLen > minFieldsCnt {
pathIdx := netUnixInodeIdx + 1
if l > min {
// Path occurs at either index 6 or 7 depending on whether inode is
// already present.
pathIdx := 7
if !hasInode {
pathIdx--
}
nuLine.Path = fields[pathIdx]
}
return nuLine, nil
}
func (u NetUnix) parseKernelPtr(str string) (string, error) {
if !strings.HasSuffix(str, ":") {
return "", errInvalidKernelPtrFmt
n.Path = fields[pathIdx]
}
return str[:len(str)-1], nil
return n, nil
}
func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
return strconv.ParseUint(hexStr, 16, 32)
func (u NetUNIX) parseUsers(s string) (uint64, error) {
return strconv.ParseUint(s, 16, 32)
}
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
typ, err := strconv.ParseUint(hexStr, 16, 16)
func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
typ, err := strconv.ParseUint(s, 16, 16)
if err != nil {
return 0, err
}
return NetUnixType(typ), nil
return NetUNIXType(typ), nil
}
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
flags, err := strconv.ParseUint(hexStr, 16, 32)
func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
flags, err := strconv.ParseUint(s, 16, 32)
if err != nil {
return 0, err
}
return NetUnixFlags(flags), nil
return NetUNIXFlags(flags), nil
}
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
st, err := strconv.ParseInt(hexStr, 16, 8)
func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
st, err := strconv.ParseInt(s, 16, 8)
if err != nil {
return 0, err
}
return NetUnixState(st), nil
return NetUNIXState(st), nil
}
func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
return strconv.ParseUint(inodeStr, 10, 64)
func (u NetUNIX) parseInode(s string) (uint64, error) {
return strconv.ParseUint(s, 10, 64)
}
func (t NetUnixType) String() string {
func (t NetUNIXType) String() string {
switch t {
case netUnixTypeStream:
return "stream"
@ -247,7 +233,7 @@ func (t NetUnixType) String() string {
return "unknown"
}
func (f NetUnixFlags) String() string {
func (f NetUNIXFlags) String() string {
switch f {
case netUnixFlagListen:
return "listen"
@ -256,7 +242,7 @@ func (f NetUnixFlags) String() string {
}
}
func (s NetUnixState) String() string {
func (s NetUNIXState) String() string {
switch s {
case netUnixStateUnconnected:
return "unconnected"

5
vendor/github.com/prometheus/procfs/proc_status.go generated vendored

@ -71,6 +71,9 @@ type ProcStatus struct {
VoluntaryCtxtSwitches uint64
// Number of involuntary context switches.
NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs))
UIDs [4]string
}
// NewStatus returns the current status information of the process.
@ -114,6 +117,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.TGID = int(vUint)
case "Name":
s.Name = vString
case "Uid":
copy(s.UIDs[:], strings.Split(vString, "\t"))
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":

89
vendor/github.com/prometheus/procfs/swaps.go generated vendored

@ -0,0 +1,89 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Swap represents an entry in /proc/swaps.
type Swap struct {
Filename string
Type string
Size int
Used int
Priority int
}
// Swaps returns a slice of all configured swap devices on the system.
func (fs FS) Swaps() ([]*Swap, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
if err != nil {
return nil, err
}
return parseSwaps(data)
}
func parseSwaps(info []byte) ([]*Swap, error) {
swaps := []*Swap{}
scanner := bufio.NewScanner(bytes.NewReader(info))
scanner.Scan() // ignore header line
for scanner.Scan() {
swapString := scanner.Text()
parsedSwap, err := parseSwapString(swapString)
if err != nil {
return nil, err
}
swaps = append(swaps, parsedSwap)
}
err := scanner.Err()
return swaps, err
}
func parseSwapString(swapString string) (*Swap, error) {
var err error
swapFields := strings.Fields(swapString)
swapLength := len(swapFields)
if swapLength < 5 {
return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
}
swap := &Swap{
Filename: swapFields[0],
Type: swapFields[1],
}
swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil {
return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
}
swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil {
return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
}
swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil {
return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
}
return swap, nil
}

2
vendor/github.com/prometheus/procfs/sysfs/class_infiniband.go generated vendored

@ -97,7 +97,7 @@ func (fs FS) InfiniBandClass() (InfiniBandClass, error) {
dirs, err := ioutil.ReadDir(path)
if err != nil {
return nil, fmt.Errorf("failed to list InfiniBand devices at %q: %v", path, err)
return nil, err
}
ibc := make(InfiniBandClass, len(dirs))

16
vendor/github.com/prometheus/procfs/xfs/parse.go generated vendored

@ -381,11 +381,15 @@ func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
}
func quotaManagerStats(us []uint32) (QuotaManagerStats, error) {
if l := len(us); l != 8 {
// The "Unused" attribute first appears in Linux 4.20
// As a result either 8 or 9 elements may appear in this slice depending on
// the kernel version.
l := len(us)
if l != 8 && l != 9 {
return QuotaManagerStats{}, fmt.Errorf("incorrect number of values for XFS quota stats: %d", l)
}
return QuotaManagerStats{
s := QuotaManagerStats{
Reclaims: us[0],
ReclaimMisses: us[1],
DquoteDups: us[2],
@ -394,7 +398,13 @@ func quotaManagerStats(us []uint32) (QuotaManagerStats, error) {
Wants: us[5],
ShakeReclaims: us[6],
InactReclaims: us[7],
}, nil
}
if l > 8 {
s.Unused = us[8]
}
return s, nil
}
func debugStats(us []uint32) (DebugStats, error) {

1
vendor/github.com/prometheus/procfs/xfs/xfs.go generated vendored

@ -202,6 +202,7 @@ type QuotaManagerStats struct {
Wants uint32
ShakeReclaims uint32
InactReclaims uint32
Unused uint32
}
// XstratStats contains statistics regarding bytes processed by the XFS daemon.

2
vendor/modules.txt vendored

@ -53,7 +53,7 @@ github.com/prometheus/common/model
github.com/prometheus/common/promlog
github.com/prometheus/common/promlog/flag
github.com/prometheus/common/version
# github.com/prometheus/procfs v0.0.8
# github.com/prometheus/procfs v0.0.10
github.com/prometheus/procfs
github.com/prometheus/procfs/bcache
github.com/prometheus/procfs/internal/fs

Loading…
Cancel
Save