Update github.com/prometheus/procfs dependencies
parent
047003b6bb
commit
30fb33f7a4
|
@ -0,0 +1,84 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package bcache provides access to statistics exposed by the bcache (Linux
|
||||
// block cache).
|
||||
package bcache
|
||||
|
||||
// Stats contains bcache runtime statistics, parsed from /sys/fs/bcache/.
|
||||
//
|
||||
// The names and meanings of each statistic were taken from bcache.txt and
|
||||
// files in drivers/md/bcache in the Linux kernel source. Counters are uint64
|
||||
// (in-kernel counters are mostly unsigned long).
|
||||
type Stats struct {
|
||||
// The name of the bcache used to source these statistics.
|
||||
Name string
|
||||
Bcache BcacheStats
|
||||
Bdevs []BdevStats
|
||||
Caches []CacheStats
|
||||
}
|
||||
|
||||
// BcacheStats contains statistics tied to a bcache ID.
|
||||
type BcacheStats struct {
|
||||
AverageKeySize uint64
|
||||
BtreeCacheSize uint64
|
||||
CacheAvailablePercent uint64
|
||||
Congested uint64
|
||||
RootUsagePercent uint64
|
||||
TreeDepth uint64
|
||||
Internal InternalStats
|
||||
FiveMin PeriodStats
|
||||
Total PeriodStats
|
||||
}
|
||||
|
||||
// BdevStats contains statistics for one backing device.
|
||||
type BdevStats struct {
|
||||
Name string
|
||||
DirtyData uint64
|
||||
FiveMin PeriodStats
|
||||
Total PeriodStats
|
||||
}
|
||||
|
||||
// CacheStats contains statistics for one cache device.
|
||||
type CacheStats struct {
|
||||
Name string
|
||||
IOErrors uint64
|
||||
MetadataWritten uint64
|
||||
Written uint64
|
||||
Priority PriorityStats
|
||||
}
|
||||
|
||||
// PriorityStats contains statistics from the priority_stats file.
|
||||
type PriorityStats struct {
|
||||
UnusedPercent uint64
|
||||
MetadataPercent uint64
|
||||
}
|
||||
|
||||
// InternalStats contains internal bcache statistics.
|
||||
type InternalStats struct {
|
||||
ActiveJournalEntries uint64
|
||||
BtreeNodes uint64
|
||||
BtreeReadAverageDurationNanoSeconds uint64
|
||||
CacheReadRaces uint64
|
||||
}
|
||||
|
||||
// PeriodStats contains statistics for a time period (5 min or total).
|
||||
type PeriodStats struct {
|
||||
Bypassed uint64
|
||||
CacheBypassHits uint64
|
||||
CacheBypassMisses uint64
|
||||
CacheHits uint64
|
||||
CacheMissCollisions uint64
|
||||
CacheMisses uint64
|
||||
CacheReadaheads uint64
|
||||
}
|
|
@ -0,0 +1,330 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bcache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParsePseudoFloat parses the peculiar format produced by bcache's bch_hprint.
|
||||
func parsePseudoFloat(str string) (float64, error) {
|
||||
ss := strings.Split(str, ".")
|
||||
|
||||
intPart, err := strconv.ParseFloat(ss[0], 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(ss) == 1 {
|
||||
// Pure integers are fine.
|
||||
return intPart, nil
|
||||
}
|
||||
fracPart, err := strconv.ParseFloat(ss[1], 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// fracPart is a number between 0 and 1023 divided by 100; it is off
|
||||
// by a small amount. Unexpected bumps in time lines may occur because
|
||||
// for bch_hprint .1 != .10 and .10 > .9 (at least up to Linux
|
||||
// v4.12-rc3).
|
||||
|
||||
// Restore the proper order:
|
||||
fracPart = fracPart / 10.24
|
||||
return intPart + fracPart, nil
|
||||
}
|
||||
|
||||
// Dehumanize converts a human-readable byte slice into a uint64.
|
||||
func dehumanize(hbytes []byte) (uint64, error) {
|
||||
ll := len(hbytes)
|
||||
if ll == 0 {
|
||||
return 0, fmt.Errorf("zero-length reply")
|
||||
}
|
||||
lastByte := hbytes[ll-1]
|
||||
mul := float64(1)
|
||||
var (
|
||||
mant float64
|
||||
err error
|
||||
)
|
||||
// If lastByte is beyond the range of ASCII digits, it must be a
|
||||
// multiplier.
|
||||
if lastByte > 57 {
|
||||
// Remove multiplier from slice.
|
||||
hbytes = hbytes[:len(hbytes)-1]
|
||||
|
||||
const (
|
||||
_ = 1 << (10 * iota)
|
||||
KiB
|
||||
MiB
|
||||
GiB
|
||||
TiB
|
||||
PiB
|
||||
EiB
|
||||
ZiB
|
||||
YiB
|
||||
)
|
||||
|
||||
multipliers := map[rune]float64{
|
||||
// Source for conversion rules:
|
||||
// linux-kernel/drivers/md/bcache/util.c:bch_hprint()
|
||||
'k': KiB,
|
||||
'M': MiB,
|
||||
'G': GiB,
|
||||
'T': TiB,
|
||||
'P': PiB,
|
||||
'E': EiB,
|
||||
'Z': ZiB,
|
||||
'Y': YiB,
|
||||
}
|
||||
mul = float64(multipliers[rune(lastByte)])
|
||||
mant, err = parsePseudoFloat(string(hbytes))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
// Not humanized by bch_hprint
|
||||
mant, err = strconv.ParseFloat(string(hbytes), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
res := uint64(mant * mul)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
uuidPath string
|
||||
subDir string
|
||||
currentDir string
|
||||
err error
|
||||
}
|
||||
|
||||
func (p *parser) setSubDir(pathElements ...string) {
|
||||
p.subDir = path.Join(pathElements...)
|
||||
p.currentDir = path.Join(p.uuidPath, p.subDir)
|
||||
}
|
||||
|
||||
func (p *parser) readValue(fileName string) uint64 {
|
||||
if p.err != nil {
|
||||
return 0
|
||||
}
|
||||
path := path.Join(p.currentDir, fileName)
|
||||
byt, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
p.err = fmt.Errorf("failed to read: %s", path)
|
||||
return 0
|
||||
}
|
||||
// Remove trailing newline.
|
||||
byt = byt[:len(byt)-1]
|
||||
res, err := dehumanize(byt)
|
||||
p.err = err
|
||||
return res
|
||||
}
|
||||
|
||||
// ParsePriorityStats parses lines from the priority_stats file.
|
||||
func parsePriorityStats(line string, ps *PriorityStats) (error) {
|
||||
var (
|
||||
value uint64
|
||||
err error
|
||||
)
|
||||
switch {
|
||||
case strings.HasPrefix(line, "Unused:"):
|
||||
fields := strings.Fields(line)
|
||||
rawValue := fields[len(fields)-1]
|
||||
valueStr := strings.TrimSuffix(rawValue, "%")
|
||||
value, err = strconv.ParseUint(valueStr, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ps.UnusedPercent = value
|
||||
case strings.HasPrefix(line, "Metadata:"):
|
||||
fields := strings.Fields(line)
|
||||
rawValue := fields[len(fields)-1]
|
||||
valueStr := strings.TrimSuffix(rawValue, "%")
|
||||
value, err = strconv.ParseUint(valueStr, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ps.MetadataPercent = value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *parser) getPriorityStats() PriorityStats {
|
||||
var res PriorityStats
|
||||
|
||||
if p.err != nil {
|
||||
return res
|
||||
}
|
||||
|
||||
path := path.Join(p.currentDir, "priority_stats")
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
p.err = fmt.Errorf("failed to read: %s", path)
|
||||
return res
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
err = parsePriorityStats(scanner.Text(), &res)
|
||||
if err != nil {
|
||||
p.err = fmt.Errorf("failed to parse: %s (%s)", path, err)
|
||||
return res
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
p.err = fmt.Errorf("failed to parse: %s (%s)", path, err)
|
||||
return res
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// GetStats collects from sysfs files data tied to one bcache ID.
|
||||
func GetStats(uuidPath string) (*Stats, error) {
|
||||
var bs Stats
|
||||
|
||||
par := parser{uuidPath: uuidPath}
|
||||
|
||||
// bcache stats
|
||||
|
||||
// dir <uuidPath>
|
||||
par.setSubDir("")
|
||||
bs.Bcache.AverageKeySize = par.readValue("average_key_size")
|
||||
bs.Bcache.BtreeCacheSize = par.readValue("btree_cache_size")
|
||||
bs.Bcache.CacheAvailablePercent = par.readValue("cache_available_percent")
|
||||
bs.Bcache.Congested = par.readValue("congested")
|
||||
bs.Bcache.RootUsagePercent = par.readValue("root_usage_percent")
|
||||
bs.Bcache.TreeDepth = par.readValue("tree_depth")
|
||||
|
||||
// bcache stats (internal)
|
||||
|
||||
// dir <uuidPath>/internal
|
||||
par.setSubDir("internal")
|
||||
bs.Bcache.Internal.ActiveJournalEntries = par.readValue("active_journal_entries")
|
||||
bs.Bcache.Internal.BtreeNodes = par.readValue("btree_nodes")
|
||||
bs.Bcache.Internal.BtreeReadAverageDurationNanoSeconds = par.readValue("btree_read_average_duration_us")
|
||||
bs.Bcache.Internal.CacheReadRaces = par.readValue("cache_read_races")
|
||||
|
||||
// bcache stats (period)
|
||||
|
||||
// dir <uuidPath>/stats_five_minute
|
||||
par.setSubDir("stats_five_minute")
|
||||
bs.Bcache.FiveMin.Bypassed = par.readValue("bypassed")
|
||||
bs.Bcache.FiveMin.CacheHits = par.readValue("cache_hits")
|
||||
|
||||
bs.Bcache.FiveMin.Bypassed = par.readValue("bypassed")
|
||||
bs.Bcache.FiveMin.CacheBypassHits = par.readValue("cache_bypass_hits")
|
||||
bs.Bcache.FiveMin.CacheBypassMisses = par.readValue("cache_bypass_misses")
|
||||
bs.Bcache.FiveMin.CacheHits = par.readValue("cache_hits")
|
||||
bs.Bcache.FiveMin.CacheMissCollisions = par.readValue("cache_miss_collisions")
|
||||
bs.Bcache.FiveMin.CacheMisses = par.readValue("cache_misses")
|
||||
bs.Bcache.FiveMin.CacheReadaheads = par.readValue("cache_readaheads")
|
||||
|
||||
// dir <uuidPath>/stats_total
|
||||
par.setSubDir("stats_total")
|
||||
bs.Bcache.Total.Bypassed = par.readValue("bypassed")
|
||||
bs.Bcache.Total.CacheHits = par.readValue("cache_hits")
|
||||
|
||||
bs.Bcache.Total.Bypassed = par.readValue("bypassed")
|
||||
bs.Bcache.Total.CacheBypassHits = par.readValue("cache_bypass_hits")
|
||||
bs.Bcache.Total.CacheBypassMisses = par.readValue("cache_bypass_misses")
|
||||
bs.Bcache.Total.CacheHits = par.readValue("cache_hits")
|
||||
bs.Bcache.Total.CacheMissCollisions = par.readValue("cache_miss_collisions")
|
||||
bs.Bcache.Total.CacheMisses = par.readValue("cache_misses")
|
||||
bs.Bcache.Total.CacheReadaheads = par.readValue("cache_readaheads")
|
||||
|
||||
if par.err != nil {
|
||||
return nil, par.err
|
||||
}
|
||||
|
||||
// bdev stats
|
||||
|
||||
reg := path.Join(uuidPath, "bdev[0-9]*")
|
||||
bdevDirs, err := filepath.Glob(reg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bs.Bdevs = make([]BdevStats, len(bdevDirs))
|
||||
|
||||
for ii, bdevDir := range bdevDirs {
|
||||
var bds = &bs.Bdevs[ii]
|
||||
|
||||
bds.Name = filepath.Base(bdevDir)
|
||||
|
||||
par.setSubDir(bds.Name)
|
||||
bds.DirtyData = par.readValue("dirty_data")
|
||||
|
||||
// dir <uuidPath>/<bds.Name>/stats_five_minute
|
||||
par.setSubDir(bds.Name, "stats_five_minute")
|
||||
bds.FiveMin.Bypassed = par.readValue("bypassed")
|
||||
bds.FiveMin.CacheBypassHits = par.readValue("cache_bypass_hits")
|
||||
bds.FiveMin.CacheBypassMisses = par.readValue("cache_bypass_misses")
|
||||
bds.FiveMin.CacheHits = par.readValue("cache_hits")
|
||||
bds.FiveMin.CacheMissCollisions = par.readValue("cache_miss_collisions")
|
||||
bds.FiveMin.CacheMisses = par.readValue("cache_misses")
|
||||
bds.FiveMin.CacheReadaheads = par.readValue("cache_readaheads")
|
||||
|
||||
// dir <uuidPath>/<bds.Name>/stats_total
|
||||
par.setSubDir("stats_total")
|
||||
bds.Total.Bypassed = par.readValue("bypassed")
|
||||
bds.Total.CacheBypassHits = par.readValue("cache_bypass_hits")
|
||||
bds.Total.CacheBypassMisses = par.readValue("cache_bypass_misses")
|
||||
bds.Total.CacheHits = par.readValue("cache_hits")
|
||||
bds.Total.CacheMissCollisions = par.readValue("cache_miss_collisions")
|
||||
bds.Total.CacheMisses = par.readValue("cache_misses")
|
||||
bds.Total.CacheReadaheads = par.readValue("cache_readaheads")
|
||||
}
|
||||
|
||||
if par.err != nil {
|
||||
return nil, par.err
|
||||
}
|
||||
|
||||
// cache stats
|
||||
|
||||
reg = path.Join(uuidPath, "cache[0-9]*")
|
||||
cacheDirs, err := filepath.Glob(reg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bs.Caches = make([]CacheStats, len(cacheDirs))
|
||||
|
||||
for ii, cacheDir := range cacheDirs {
|
||||
var cs = &bs.Caches[ii]
|
||||
cs.Name = filepath.Base(cacheDir)
|
||||
|
||||
// dir is <uuidPath>/<cs.Name>
|
||||
par.setSubDir(cs.Name)
|
||||
cs.IOErrors = par.readValue("io_errors")
|
||||
cs.MetadataWritten = par.readValue("metadata_written")
|
||||
cs.Written = par.readValue("written")
|
||||
|
||||
ps := par.getPriorityStats()
|
||||
cs.Priority = ps
|
||||
}
|
||||
|
||||
if par.err != nil {
|
||||
return nil, par.err
|
||||
}
|
||||
|
||||
return &bs, nil
|
||||
}
|
|
@ -3,15 +3,66 @@ package procfs
|
|||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CPUStat shows how much time the cpu spend in various stages.
|
||||
type CPUStat struct {
|
||||
User float64
|
||||
Nice float64
|
||||
System float64
|
||||
Idle float64
|
||||
Iowait float64
|
||||
IRQ float64
|
||||
SoftIRQ float64
|
||||
Steal float64
|
||||
Guest float64
|
||||
GuestNice float64
|
||||
}
|
||||
|
||||
// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
|
||||
// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
|
||||
// It is possible to get per-cpu stats by reading /proc/softirqs
|
||||
type SoftIRQStat struct {
|
||||
Hi uint64
|
||||
Timer uint64
|
||||
NetTx uint64
|
||||
NetRx uint64
|
||||
Block uint64
|
||||
BlockIoPoll uint64
|
||||
Tasklet uint64
|
||||
Sched uint64
|
||||
Hrtimer uint64
|
||||
Rcu uint64
|
||||
}
|
||||
|
||||
// Stat represents kernel/system statistics.
|
||||
type Stat struct {
|
||||
// Boot time in seconds since the Epoch.
|
||||
BootTime int64
|
||||
BootTime uint64
|
||||
// Summed up cpu statistics.
|
||||
CPUTotal CPUStat
|
||||
// Per-CPU statistics.
|
||||
CPU []CPUStat
|
||||
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
|
||||
IRQTotal uint64
|
||||
// Number of times a numbered IRQ was triggered.
|
||||
IRQ []uint64
|
||||
// Number of times a context switch happened.
|
||||
ContextSwitches uint64
|
||||
// Number of times a process was created.
|
||||
ProcessCreated uint64
|
||||
// Number of processes currently running.
|
||||
ProcessesRunning uint64
|
||||
// Number of processes currently blocked (waiting for IO).
|
||||
ProcessesBlocked uint64
|
||||
// Number of times a softirq was scheduled.
|
||||
SoftIRQTotal uint64
|
||||
// Detailed softirq statistics.
|
||||
SoftIRQ SoftIRQStat
|
||||
}
|
||||
|
||||
// NewStat returns kernel/system statistics read from /proc/stat.
|
||||
|
@ -24,33 +75,145 @@ func NewStat() (Stat, error) {
|
|||
return fs.NewStat()
|
||||
}
|
||||
|
||||
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
|
||||
func parseCPUStat(line string) (CPUStat, int64, error) {
|
||||
cpuStat := CPUStat{}
|
||||
var cpu string
|
||||
|
||||
count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
|
||||
&cpu,
|
||||
&cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
|
||||
&cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
|
||||
&cpuStat.Guest, &cpuStat.GuestNice)
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
|
||||
}
|
||||
if count == 0 {
|
||||
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
|
||||
}
|
||||
|
||||
cpuStat.User /= userHZ
|
||||
cpuStat.Nice /= userHZ
|
||||
cpuStat.System /= userHZ
|
||||
cpuStat.Idle /= userHZ
|
||||
cpuStat.Iowait /= userHZ
|
||||
cpuStat.IRQ /= userHZ
|
||||
cpuStat.SoftIRQ /= userHZ
|
||||
cpuStat.Steal /= userHZ
|
||||
cpuStat.Guest /= userHZ
|
||||
cpuStat.GuestNice /= userHZ
|
||||
|
||||
if cpu == "cpu" {
|
||||
return cpuStat, -1, nil
|
||||
}
|
||||
|
||||
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
|
||||
if err != nil {
|
||||
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
|
||||
}
|
||||
|
||||
return cpuStat, cpuID, nil
|
||||
}
|
||||
|
||||
// Parse a softirq line.
|
||||
func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
|
||||
softIRQStat := SoftIRQStat{}
|
||||
var total uint64
|
||||
var prefix string
|
||||
|
||||
_, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
|
||||
&prefix, &total,
|
||||
&softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
|
||||
&softIRQStat.Block, &softIRQStat.BlockIoPoll,
|
||||
&softIRQStat.Tasklet, &softIRQStat.Sched,
|
||||
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
|
||||
|
||||
if err != nil {
|
||||
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
|
||||
}
|
||||
|
||||
return softIRQStat, total, nil
|
||||
}
|
||||
|
||||
// NewStat returns an information about current kernel/system statistics.
|
||||
func (fs FS) NewStat() (Stat, error) {
|
||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||
|
||||
f, err := os.Open(fs.Path("stat"))
|
||||
if err != nil {
|
||||
return Stat{}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
if !strings.HasPrefix(line, "btime") {
|
||||
stat := Stat{}
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
parts := strings.Fields(scanner.Text())
|
||||
// require at least <key> <value>
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) != 2 {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
|
||||
switch {
|
||||
case parts[0] == "btime":
|
||||
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "intr":
|
||||
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
|
||||
}
|
||||
numberedIRQs := parts[2:]
|
||||
stat.IRQ = make([]uint64, len(numberedIRQs))
|
||||
for i, count := range numberedIRQs {
|
||||
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "ctxt":
|
||||
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "processes":
|
||||
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "procs_running":
|
||||
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "procs_blocked":
|
||||
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "softirq":
|
||||
softIRQStats, total, err := parseSoftIRQStat(line)
|
||||
if err != nil {
|
||||
return Stat{}, err
|
||||
}
|
||||
stat.SoftIRQTotal = total
|
||||
stat.SoftIRQ = softIRQStats
|
||||
case strings.HasPrefix(parts[0], "cpu"):
|
||||
cpuStat, cpuID, err := parseCPUStat(line)
|
||||
if err != nil {
|
||||
return Stat{}, err
|
||||
}
|
||||
if cpuID == -1 {
|
||||
stat.CPUTotal = cpuStat
|
||||
} else {
|
||||
for int64(len(stat.CPU)) <= cpuID {
|
||||
stat.CPU = append(stat.CPU, CPUStat{})
|
||||
}
|
||||
stat.CPU[cpuID] = cpuStat
|
||||
}
|
||||
}
|
||||
i, err := strconv.ParseInt(fields[1], 10, 32)
|
||||
if err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
|
||||
}
|
||||
return Stat{BootTime: i}, nil
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
|
||||
}
|
||||
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
|
||||
return stat, nil
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/prometheus/procfs/bcache"
|
||||
"github.com/prometheus/procfs/xfs"
|
||||
)
|
||||
|
||||
|
@ -80,3 +81,28 @@ func (fs FS) XFSStats() ([]*xfs.Stats, error) {
|
|||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// BcacheStats retrieves bcache runtime statistics for each bcache.
|
||||
func (fs FS) BcacheStats() ([]*bcache.Stats, error) {
|
||||
matches, err := filepath.Glob(fs.Path("fs/bcache/*-*"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats := make([]*bcache.Stats, 0, len(matches))
|
||||
for _, uuidPath := range matches {
|
||||
// "*-*" in glob above indicates the name of the bcache.
|
||||
name := filepath.Base(uuidPath)
|
||||
|
||||
// stats
|
||||
s, err := bcache.GetStats(uuidPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.Name = name
|
||||
stats = append(stats, s)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,187 @@
|
|||
// Copyright 2017 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// XfrmStat models the contents of /proc/net/xfrm_stat.
|
||||
type XfrmStat struct {
|
||||
// All errors which are not matched by other
|
||||
XfrmInError int
|
||||
// No buffer is left
|
||||
XfrmInBufferError int
|
||||
// Header Error
|
||||
XfrmInHdrError int
|
||||
// No state found
|
||||
// i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
|
||||
XfrmInNoStates int
|
||||
// Transformation protocol specific error
|
||||
// e.g. SA Key is wrong
|
||||
XfrmInStateProtoError int
|
||||
// Transformation mode specific error
|
||||
XfrmInStateModeError int
|
||||
// Sequence error
|
||||
// e.g. sequence number is out of window
|
||||
XfrmInStateSeqError int
|
||||
// State is expired
|
||||
XfrmInStateExpired int
|
||||
// State has mismatch option
|
||||
// e.g. UDP encapsulation type is mismatched
|
||||
XfrmInStateMismatch int
|
||||
// State is invalid
|
||||
XfrmInStateInvalid int
|
||||
// No matching template for states
|
||||
// e.g. Inbound SAs are correct but SP rule is wrong
|
||||
XfrmInTmplMismatch int
|
||||
// No policy is found for states
|
||||
// e.g. Inbound SAs are correct but no SP is found
|
||||
XfrmInNoPols int
|
||||
// Policy discards
|
||||
XfrmInPolBlock int
|
||||
// Policy error
|
||||
XfrmInPolError int
|
||||
// All errors which are not matched by others
|
||||
XfrmOutError int
|
||||
// Bundle generation error
|
||||
XfrmOutBundleGenError int
|
||||
// Bundle check error
|
||||
XfrmOutBundleCheckError int
|
||||
// No state was found
|
||||
XfrmOutNoStates int
|
||||
// Transformation protocol specific error
|
||||
XfrmOutStateProtoError int
|
||||
// Transportation mode specific error
|
||||
XfrmOutStateModeError int
|
||||
// Sequence error
|
||||
// i.e sequence number overflow
|
||||
XfrmOutStateSeqError int
|
||||
// State is expired
|
||||
XfrmOutStateExpired int
|
||||
// Policy discads
|
||||
XfrmOutPolBlock int
|
||||
// Policy is dead
|
||||
XfrmOutPolDead int
|
||||
// Policy Error
|
||||
XfrmOutPolError int
|
||||
XfrmFwdHdrError int
|
||||
XfrmOutStateInvalid int
|
||||
XfrmAcquireError int
|
||||
}
|
||||
|
||||
// NewXfrmStat reads the xfrm_stat statistics.
|
||||
func NewXfrmStat() (XfrmStat, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return XfrmStat{}, err
|
||||
}
|
||||
|
||||
return fs.NewXfrmStat()
|
||||
}
|
||||
|
||||
// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
|
||||
func (fs FS) NewXfrmStat() (XfrmStat, error) {
|
||||
file, err := os.Open(fs.Path("net/xfrm_stat"))
|
||||
if err != nil {
|
||||
return XfrmStat{}, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var (
|
||||
x = XfrmStat{}
|
||||
s = bufio.NewScanner(file)
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
|
||||
if len(fields) != 2 {
|
||||
return XfrmStat{}, fmt.Errorf(
|
||||
"couldnt parse %s line %s", file.Name(), s.Text())
|
||||
}
|
||||
|
||||
name := fields[0]
|
||||
value, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return XfrmStat{}, err
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "XfrmInError":
|
||||
x.XfrmInError = value
|
||||
case "XfrmInBufferError":
|
||||
x.XfrmInBufferError = value
|
||||
case "XfrmInHdrError":
|
||||
x.XfrmInHdrError = value
|
||||
case "XfrmInNoStates":
|
||||
x.XfrmInNoStates = value
|
||||
case "XfrmInStateProtoError":
|
||||
x.XfrmInStateProtoError = value
|
||||
case "XfrmInStateModeError":
|
||||
x.XfrmInStateModeError = value
|
||||
case "XfrmInStateSeqError":
|
||||
x.XfrmInStateSeqError = value
|
||||
case "XfrmInStateExpired":
|
||||
x.XfrmInStateExpired = value
|
||||
case "XfrmInStateInvalid":
|
||||
x.XfrmInStateInvalid = value
|
||||
case "XfrmInTmplMismatch":
|
||||
x.XfrmInTmplMismatch = value
|
||||
case "XfrmInNoPols":
|
||||
x.XfrmInNoPols = value
|
||||
case "XfrmInPolBlock":
|
||||
x.XfrmInPolBlock = value
|
||||
case "XfrmInPolError":
|
||||
x.XfrmInPolError = value
|
||||
case "XfrmOutError":
|
||||
x.XfrmOutError = value
|
||||
case "XfrmInStateMismatch":
|
||||
x.XfrmInStateMismatch = value
|
||||
case "XfrmOutBundleGenError":
|
||||
x.XfrmOutBundleGenError = value
|
||||
case "XfrmOutBundleCheckError":
|
||||
x.XfrmOutBundleCheckError = value
|
||||
case "XfrmOutNoStates":
|
||||
x.XfrmOutNoStates = value
|
||||
case "XfrmOutStateProtoError":
|
||||
x.XfrmOutStateProtoError = value
|
||||
case "XfrmOutStateModeError":
|
||||
x.XfrmOutStateModeError = value
|
||||
case "XfrmOutStateSeqError":
|
||||
x.XfrmOutStateSeqError = value
|
||||
case "XfrmOutStateExpired":
|
||||
x.XfrmOutStateExpired = value
|
||||
case "XfrmOutPolBlock":
|
||||
x.XfrmOutPolBlock = value
|
||||
case "XfrmOutPolDead":
|
||||
x.XfrmOutPolDead = value
|
||||
case "XfrmOutPolError":
|
||||
x.XfrmOutPolError = value
|
||||
case "XfrmFwdHdrError":
|
||||
x.XfrmFwdHdrError = value
|
||||
case "XfrmOutStateInvalid":
|
||||
x.XfrmOutStateInvalid = value
|
||||
case "XfrmAcquireError":
|
||||
x.XfrmAcquireError = value
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return x, s.Err()
|
||||
}
|
|
@ -135,22 +135,28 @@
|
|||
"revisionTime": "2017-02-20T10:38:46Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "my9sP6myQbSinzWZwxa4Q1V6awU=",
|
||||
"checksumSHA1": "K1HtPl6z/FoGkfWO7zZkzFqeDZ4=",
|
||||
"path": "github.com/prometheus/procfs",
|
||||
"revision": "d098ca18df8bc825079013daf7bacefbb1ee877e",
|
||||
"revisionTime": "2017-05-01T09:11:53Z"
|
||||
"revision": "a66a2f8b6f3fe82a95a1bed0bb3705bac8031717",
|
||||
"revisionTime": "2017-06-07T19:36:46Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "eiBAd4edewJTOtTwxh/ubJdjd+I=",
|
||||
"checksumSHA1": "RAb14ZPZ6Y+AD0TnMBqLHaIw/I8=",
|
||||
"path": "github.com/prometheus/procfs/bcache",
|
||||
"revision": "a66a2f8b6f3fe82a95a1bed0bb3705bac8031717",
|
||||
"revisionTime": "2017-06-07T19:36:46Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "8wNwVL3xTGA0N92ywfyiPvQeFmE=",
|
||||
"path": "github.com/prometheus/procfs/sysfs",
|
||||
"revision": "d098ca18df8bc825079013daf7bacefbb1ee877e",
|
||||
"revisionTime": "2017-05-01T09:11:53Z"
|
||||
"revision": "a66a2f8b6f3fe82a95a1bed0bb3705bac8031717",
|
||||
"revisionTime": "2017-06-07T19:36:46Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "xCiFAAwVTrjsfZT1BIJQ3DgeNCY=",
|
||||
"path": "github.com/prometheus/procfs/xfs",
|
||||
"revision": "d098ca18df8bc825079013daf7bacefbb1ee877e",
|
||||
"revisionTime": "2017-05-01T09:11:53Z"
|
||||
"revision": "a66a2f8b6f3fe82a95a1bed0bb3705bac8031717",
|
||||
"revisionTime": "2017-06-07T19:36:46Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "uozMgPjB4AggpuuJkGq3FgAs4CA=",
|
||||
|
|
Loading…
Reference in New Issue