Merge branch 'master' into add_infiniband_hwcounters

pull/2827/head
dongjiang 2024-10-09 20:43:20 +08:00 committed by GitHub
commit 783f90e0eb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
175 changed files with 2674 additions and 1416 deletions

View File

@ -7,10 +7,10 @@ executors:
# should also be updated.
golang:
docker:
- image: cimg/go:1.21
- image: cimg/go:1.23
arm:
machine:
image: ubuntu-2204:current
docker:
- image: cimg/go:1.23
resource_class: arm.medium
jobs:
@ -25,7 +25,7 @@ jobs:
test-arm:
executor: arm
steps:
- checkout
- prometheus/setup_environment
- run: uname -a
- run: make test-e2e
test_mixins:
@ -49,6 +49,16 @@ jobs:
- run: docker run --privileged linuxkit/binfmt:af88a591f9cc896a52ce596b9cf7ca26a061ef97
- run: promu crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX
- run: promu --config .promu-cgo.yml crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX
# sign the darwin build so it doesn't get SIGKILLed on start, see: https://github.com/prometheus/node_exporter/issues/2539
- run:
command: |
if [[ -f "$(pwd)/.build/darwin-arm64/node_exporter" ]]; then
promu codesign "$(pwd)/.build/darwin-arm64/node_exporter"
fi
if [[ -f "$(pwd)/.build/darwin-amd64/node_exporter" ]]; then
promu codesign "$(pwd)/.build/darwin-amd64/node_exporter"
fi
- persist_to_workspace:
root: .
paths:
@ -60,7 +70,7 @@ jobs:
machine:
image: ubuntu-2204:current
environment:
DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.21-base
DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.23-base
REPO_PATH: github.com/prometheus/node_exporter
steps:
- prometheus/setup_environment

View File

@ -4,6 +4,7 @@ on:
push:
paths:
- "README.md"
- "README-containers.md"
- ".github/workflows/container_description.yml"
branches: [ main, master ]
@ -17,7 +18,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub
@ -29,7 +30,9 @@ jobs:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: dockerhub
short_description: ${{ env.DOCKER_REPO_NAME }}
readme_file: 'README.md'
# Empty string results in README-containers.md being pushed if it
# exists. Otherwise, README.md is pushed.
readme_file: ''
PushQuayIoReadme:
runs-on: ubuntu-latest
@ -37,7 +40,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name
@ -49,4 +52,6 @@ jobs:
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: quay
readme_file: 'README.md'
# Empty string results in README-containers.md being pushed if it
# exists. Otherwise, README.md is pushed.
readme_file: ''

View File

@ -24,15 +24,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: 1.22.x
go-version: 1.23.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
with:
version: v1.55.2
args: --verbose
version: v1.60.2

View File

@ -32,8 +32,6 @@ linters-settings:
exclude-functions:
# Used in HTTP handlers, any error is handled by the server itself.
- (net/http.ResponseWriter).Write
# Never check for logger errors.
- (github.com/go-kit/log.Logger).Log
revive:
rules:
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter

View File

@ -1,14 +1,13 @@
go:
# Whenever the Go version is updated here, .circle/config.yml and
# .promu.yml should also be updated.
version: 1.21
version: 1.23
cgo: true
repository:
path: github.com/prometheus/node_exporter
build:
binaries:
- name: node_exporter
flags: -a -tags 'netgo osusergo static_build'
ldflags: |
-X github.com/prometheus/common/version.Version={{.Version}}
-X github.com/prometheus/common/version.Revision={{.Revision}}

View File

@ -1,13 +1,12 @@
go:
# Whenever the Go version is updated here, .circle/config.yml and
# .promu-cgo.yml should also be updated.
version: 1.21
version: 1.23
repository:
path: github.com/prometheus/node_exporter
build:
binaries:
- name: node_exporter
flags: -a -tags 'netgo osusergo static_build'
ldflags: |
-X github.com/prometheus/common/version.Version={{.Version}}
-X github.com/prometheus/common/version.Revision={{.Revision}}

View File

@ -1,5 +1,7 @@
---
extends: default
ignore: |
ui/react-app/node_modules
rules:
braces:

View File

@ -5,6 +5,34 @@
* [ENHANCEMENT]
* [BUGFIX]
## 1.8.1 / 2024-05-16
* [BUGFIX] Fix CPU seconds on Solaris #2963
* [BUGFIX] Sign Darwin/MacOS binaries #3008
* [BUGFIX] Fix pressure collector nil reference #3016
## 1.8.0 / 2024-04-24
* [CHANGE] exec_bsd: Fix labels for `vm.stats.sys.v_syscall` sysctl #2895
* [CHANGE] diskstats: Ignore zram devices on linux systems #2898
* [CHANGE] textfile: Avoid inconsistent help-texts #2962
* [CHANGE] os: Removed caching of modtime/filename of os-release file #2987
* [FEATURE] xfrm: Add new collector #2866
* [FEATURE] watchdog: Add new collector #2880
* [ENHANCEMENT] cpu_vulnerabilities: Add mitigation information label #2806
* [ENHANCEMENT] nfsd: Handle new `wdeleg_getattr` attribute #2810
* [ENHANCEMENT] netstat: Add TCPOFOQueue to default netstat metrics #2867
* [ENHANCEMENT] filesystem: surface device errors #2923
* [ENHANCEMENT] os: Add support end parsing #2982
* [ENHANCEMENT] zfs: Log mib when sysctl read fails on FreeBSD #2975
* [ENHANCEMENT] fibre_channel: update procfs to take into account optional attributes #2933
* [BUGFIX] cpu: Fix debug log in cpu collector #2857
* [BUGFIX] hwmon: Fix hwmon nil ptr #2873
* [BUGFIX] hwmon: Fix hwmon error capture #2915
* [BUGFIX] zfs: Revert "Add ZFS freebsd per dataset stats #2925
* [BUGFIX] ethtool: Sanitize ethtool metric name keys #2940
* [BUGFIX] fix: data race of NetClassCollector metrics initialization #2995
## 1.7.0 / 2023-11-11
* [FEATURE] Add ZFS freebsd per dataset stats #2753

View File

@ -49,19 +49,19 @@ endif
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
ifneq ($(shell command -v gotestsum > /dev/null),)
ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
endif
PROMU_VERSION ?= 0.15.0
PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.55.2
GOLANGCI_LINT_VERSION ?= v1.60.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -182,7 +182,7 @@ endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell command -v yamllint > /dev/null))
ifeq (, $(shell command -v yamllint 2> /dev/null))
@echo "yamllint not installed so skipping"
else
yamllint .
@ -275,3 +275,9 @@ $(1)_precheck:
exit 1; \
fi
endef
govulncheck: install-govulncheck
govulncheck ./...
install-govulncheck:
command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest

View File

@ -102,8 +102,11 @@ ethtool | metrics | --collector.ethtool.metrics-include | N/A
filesystem | fs-types | N/A | --collector.filesystem.fs-types-exclude
filesystem | mount-points | N/A | --collector.filesystem.mount-points-exclude
hwmon | chip | --collector.hwmon.chip-include | --collector.hwmon.chip-exclude
hwmon | sensor | --collector.hwmon.sensor-include | --collector.hwmon.sensor-exclude
interrupts | name | --collector.interrupts.name-include | --collector.interrupts.name-exclude
netdev | device | --collector.netdev.device-include | --collector.netdev.device-exclude
qdisk | device | --collector.qdisk.device-include | --collector.qdisk.device-exclude
slabinfo | slab-names | --collector.slabinfo.slabs-include | --collector.slabinfo.slabs-exclude
sysctl | all | --collector.sysctl.include | N/A
systemd | unit | --collector.systemd.unit-include | --collector.systemd.unit-exclude
@ -336,13 +339,21 @@ mv /path/to/directory/role.prom.$$ /path/to/directory/role.prom
The `node_exporter` will expose all metrics from enabled collectors by default. This is the recommended way to collect metrics to avoid errors when comparing metrics of different families.
For advanced use the `node_exporter` can be passed an optional list of collectors to filter metrics. The `collect[]` parameter may be used multiple times. In Prometheus configuration you can use this syntax under the [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>).
For advanced use the `node_exporter` can be passed an optional list of collectors to filter metrics. The parameters `collect[]` and `exclude[]` can be used multiple times (but cannot be combined). In Prometheus configuration you can use this syntax under the [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>).
Collect only `cpu` and `meminfo` collector metrics:
```
params:
collect[]:
- foo
- bar
- cpu
- meminfo
```
Collect all enabled collector metrics but exclude `netdev`:
```
params:
exclude[]:
- netdev
```
This can be useful for having different Prometheus servers collect specific metrics from nodes.
@ -371,7 +382,7 @@ To see all available configuration flags:
## TLS endpoint
** EXPERIMENTAL **
**EXPERIMENTAL**
The exporter supports TLS via a new web configuration file.

View File

@ -1 +1 @@
1.7.0
1.8.1

View File

@ -19,11 +19,11 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"net"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/jsimonetti/rtnetlink"
"github.com/jsimonetti/rtnetlink/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
"golang.org/x/sys/unix"
@ -39,7 +39,7 @@ type arpCollector struct {
fs procfs.FS
deviceFilter deviceFilter
entries *prometheus.Desc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -47,7 +47,7 @@ func init() {
}
// NewARPCollector returns a new Collector exposing ARP stats.
func NewARPCollector(logger log.Logger) (Collector, error) {
func NewARPCollector(logger *slog.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)

View File

@ -18,9 +18,9 @@ package collector
import (
"fmt"
"log/slog"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/bcache"
)
@ -36,12 +36,12 @@ func init() {
// A bcacheCollector is a Collector which gathers metrics from Linux bcache.
type bcacheCollector struct {
fs bcache.FS
logger log.Logger
logger *slog.Logger
}
// NewBcacheCollector returns a newly allocated bcacheCollector.
// It exposes a number of Linux bcache statistics.
func NewBcacheCollector(logger log.Logger) (Collector, error) {
func NewBcacheCollector(logger *slog.Logger) (Collector, error) {
fs, err := bcache.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)

View File

@ -19,18 +19,17 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"os"
"path/filepath"
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
type bondingCollector struct {
slaves, active typedDesc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -39,7 +38,7 @@ func init() {
// NewBondingCollector returns a newly allocated bondingCollector.
// It exposes the number of configured and active slave of linux bonding interfaces.
func NewBondingCollector(logger log.Logger) (Collector, error) {
func NewBondingCollector(logger *slog.Logger) (Collector, error) {
return &bondingCollector{
slaves: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(namespace, "bonding", "slaves"),
@ -61,7 +60,7 @@ func (c *bondingCollector) Update(ch chan<- prometheus.Metric) error {
bondingStats, err := readBondingStats(statusfile)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
level.Debug(c.logger).Log("msg", "Not collecting bonding, file does not exist", "file", statusfile)
c.logger.Debug("Not collecting bonding, file does not exist", "file", statusfile)
return ErrNoData
}
return err

View File

@ -18,13 +18,13 @@
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
"log/slog"
)
type bootTimeCollector struct {
logger log.Logger
logger *slog.Logger
}
func init() {
@ -32,7 +32,7 @@ func init() {
}
// newBootTimeCollector returns a new Collector exposing system boot time on BSD systems.
func newBootTimeCollector(logger log.Logger) (Collector, error) {
func newBootTimeCollector(logger *slog.Logger) (Collector, error) {
return &bootTimeCollector{
logger: logger,
}, nil

View File

@ -17,21 +17,21 @@
package collector
import (
"github.com/go-kit/log"
"github.com/illumos/go-kstat"
"github.com/prometheus/client_golang/prometheus"
"log/slog"
)
type bootTimeCollector struct {
boottime typedDesc
logger log.Logger
logger *slog.Logger
}
func init() {
registerCollector("boottime", defaultEnabled, newBootTimeCollector)
}
func newBootTimeCollector(logger log.Logger) (Collector, error) {
func newBootTimeCollector(logger *slog.Logger) (Collector, error) {
return &bootTimeCollector{
boottime: typedDesc{
prometheus.NewDesc(

View File

@ -18,13 +18,12 @@ package collector
import (
"fmt"
"log/slog"
"path"
"strings"
"syscall"
dennwc "github.com/dennwc/btrfs"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/btrfs"
)
@ -32,7 +31,7 @@ import (
// A btrfsCollector is a Collector which gathers metrics from Btrfs filesystems.
type btrfsCollector struct {
fs btrfs.FS
logger log.Logger
logger *slog.Logger
}
func init() {
@ -40,7 +39,7 @@ func init() {
}
// NewBtrfsCollector returns a new Collector exposing Btrfs statistics.
func NewBtrfsCollector(logger log.Logger) (Collector, error) {
func NewBtrfsCollector(logger *slog.Logger) (Collector, error) {
fs, err := btrfs.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)
@ -62,8 +61,8 @@ func (c *btrfsCollector) Update(ch chan<- prometheus.Metric) error {
ioctlStatsMap, err := c.getIoctlStats()
if err != nil {
level.Debug(c.logger).Log(
"msg", "Error querying btrfs device stats with ioctl",
c.logger.Debug(
"Error querying btrfs device stats with ioctl",
"err", err)
ioctlStatsMap = make(map[string]*btrfsIoctlFsStats)
}
@ -129,8 +128,8 @@ func (c *btrfsCollector) getIoctlStats() (map[string]*btrfsIoctlFsStats, error)
if err != nil {
// Failed to open this mount point, maybe we didn't have permission
// maybe we'll find another mount point for this FS later.
level.Debug(c.logger).Log(
"msg", "Error inspecting btrfs mountpoint",
c.logger.Debug(
"Error inspecting btrfs mountpoint",
"mountPoint", mountPath,
"err", err)
continue
@ -141,8 +140,8 @@ func (c *btrfsCollector) getIoctlStats() (map[string]*btrfsIoctlFsStats, error)
if err != nil {
// Failed to get the FS info for some reason,
// perhaps it'll work with a different mount point
level.Debug(c.logger).Log(
"msg", "Error querying btrfs filesystem",
c.logger.Debug(
"Error querying btrfs filesystem",
"mountPoint", mountPath,
"err", err)
continue
@ -156,8 +155,8 @@ func (c *btrfsCollector) getIoctlStats() (map[string]*btrfsIoctlFsStats, error)
deviceStats, err := c.getIoctlDeviceStats(fs, &fsInfo)
if err != nil {
level.Debug(c.logger).Log(
"msg", "Error querying btrfs device stats",
c.logger.Debug(
"Error querying btrfs device stats",
"mountPoint", mountPath,
"err", err)
continue
@ -275,6 +274,30 @@ func (c *btrfsCollector) getMetrics(s *btrfs.Stats, ioctlStats *btrfsIoctlFsStat
metricType: prometheus.GaugeValue,
value: float64(s.Allocation.GlobalRsvSize),
},
{
name: "commits_total",
desc: "The total number of commits that have occurred.",
metricType: prometheus.CounterValue,
value: float64(s.CommitStats.Commits),
},
{
name: "last_commit_seconds",
desc: "Duration of the most recent commit, in seconds.",
metricType: prometheus.GaugeValue,
value: float64(s.CommitStats.LastCommitMs) / 1000,
},
{
name: "max_commit_seconds",
desc: "Duration of the slowest commit, in seconds.",
metricType: prometheus.GaugeValue,
value: float64(s.CommitStats.MaxCommitMs) / 1000,
},
{
name: "commit_seconds_total",
desc: "Sum of the duration of all commits, in seconds.",
metricType: prometheus.CounterValue,
value: float64(s.CommitStats.TotalCommitMs) / 1000,
},
}
// Information about data, metadata and system data.

View File

@ -27,6 +27,10 @@ var expectedBtrfsMetrics = [][]btrfsMetric{
{
{name: "info", value: 1, extraLabel: []string{"label"}, extraLabelValue: []string{"fixture"}},
{name: "global_rsv_size_bytes", value: 1.6777216e+07},
{name: "commits_total", value: 258051, metricType: 1},
{name: "last_commit_seconds", value: 1.0},
{name: "max_commit_seconds", value: 51.462},
{name: "commit_seconds_total", value: 47836.090, metricType: 1},
{name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"data"}},
{name: "used_bytes", value: 8.08189952e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}},
{name: "size_bytes", value: 2.147483648e+09, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}},
@ -45,6 +49,10 @@ var expectedBtrfsMetrics = [][]btrfsMetric{
{
{name: "info", value: 1, extraLabel: []string{"label"}, extraLabelValue: []string{""}},
{name: "global_rsv_size_bytes", value: 1.6777216e+07},
{name: "commits_total", value: 0, metricType: 1},
{name: "last_commit_seconds", value: 0},
{name: "max_commit_seconds", value: 0},
{name: "commit_seconds_total", value: 0, metricType: 1},
{name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"data"}},
{name: "used_bytes", value: 0, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}},
{name: "size_bytes", value: 6.44087808e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}},
@ -92,7 +100,10 @@ func checkMetric(exp, got *btrfsMetric) bool {
}
func TestBtrfs(t *testing.T) {
fs, _ := btrfs.NewFS("fixtures/sys")
fs, err := btrfs.NewFS("fixtures/sys")
if err != nil {
t.Fatal(err)
}
collector := &btrfsCollector{fs: fs}
stats, err := collector.fs.Stats()

View File

@ -18,10 +18,9 @@ package collector
import (
"fmt"
"log/slog"
"strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
)
@ -33,7 +32,7 @@ const (
type buddyinfoCollector struct {
fs procfs.FS
desc *prometheus.Desc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -41,7 +40,7 @@ func init() {
}
// NewBuddyinfoCollector returns a new Collector exposing buddyinfo stats.
func NewBuddyinfoCollector(logger log.Logger) (Collector, error) {
func NewBuddyinfoCollector(logger *slog.Logger) (Collector, error) {
desc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, buddyInfoSubsystem, "blocks"),
"Count of free blocks according to size.",
@ -62,7 +61,7 @@ func (c *buddyinfoCollector) Update(ch chan<- prometheus.Metric) error {
return fmt.Errorf("couldn't get buddyinfo: %w", err)
}
level.Debug(c.logger).Log("msg", "Set node_buddy", "buddyInfo", buddyInfo)
c.logger.Debug("Set node_buddy", "buddyInfo", buddyInfo)
for _, entry := range buddyInfo {
for size, value := range entry.Sizes {
ch <- prometheus.MustNewConstMetric(

View File

@ -18,8 +18,8 @@ package collector
import (
"fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
)
@ -30,7 +30,7 @@ type cgroupSummaryCollector struct {
fs procfs.FS
cgroups *prometheus.Desc
enabled *prometheus.Desc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -38,7 +38,7 @@ func init() {
}
// NewCgroupSummaryCollector returns a new Collector exposing a summary of cgroups.
func NewCgroupSummaryCollector(logger log.Logger) (Collector, error) {
func NewCgroupSummaryCollector(logger *slog.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)

View File

@ -17,12 +17,11 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"sync"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
@ -50,14 +49,14 @@ const (
)
var (
factories = make(map[string]func(logger log.Logger) (Collector, error))
factories = make(map[string]func(logger *slog.Logger) (Collector, error))
initiatedCollectorsMtx = sync.Mutex{}
initiatedCollectors = make(map[string]Collector)
collectorState = make(map[string]*bool)
forcedCollectors = map[string]bool{} // collectors which have been explicitly enabled or disabled
)
func registerCollector(collector string, isDefaultEnabled bool, factory func(logger log.Logger) (Collector, error)) {
func registerCollector(collector string, isDefaultEnabled bool, factory func(logger *slog.Logger) (Collector, error)) {
var helpDefaultState string
if isDefaultEnabled {
helpDefaultState = "enabled"
@ -78,7 +77,7 @@ func registerCollector(collector string, isDefaultEnabled bool, factory func(log
// NodeCollector implements the prometheus.Collector interface.
type NodeCollector struct {
Collectors map[string]Collector
logger log.Logger
logger *slog.Logger
}
// DisableDefaultCollectors sets the collector state to false for all collectors which
@ -104,7 +103,7 @@ func collectorFlagAction(collector string) func(ctx *kingpin.ParseContext) error
}
// NewNodeCollector creates a new NodeCollector.
func NewNodeCollector(logger log.Logger, filters ...string) (*NodeCollector, error) {
func NewNodeCollector(logger *slog.Logger, filters ...string) (*NodeCollector, error) {
f := make(map[string]bool)
for _, filter := range filters {
enabled, exist := collectorState[filter]
@ -126,7 +125,7 @@ func NewNodeCollector(logger log.Logger, filters ...string) (*NodeCollector, err
if collector, ok := initiatedCollectors[key]; ok {
collectors[key] = collector
} else {
collector, err := factories[key](log.With(logger, "collector", key))
collector, err := factories[key](logger.With("collector", key))
if err != nil {
return nil, err
}
@ -156,7 +155,7 @@ func (n NodeCollector) Collect(ch chan<- prometheus.Metric) {
wg.Wait()
}
func execute(name string, c Collector, ch chan<- prometheus.Metric, logger log.Logger) {
func execute(name string, c Collector, ch chan<- prometheus.Metric, logger *slog.Logger) {
begin := time.Now()
err := c.Update(ch)
duration := time.Since(begin)
@ -164,13 +163,13 @@ func execute(name string, c Collector, ch chan<- prometheus.Metric, logger log.L
if err != nil {
if IsNoDataError(err) {
level.Debug(logger).Log("msg", "collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err)
logger.Debug("collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err)
} else {
level.Error(logger).Log("msg", "collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err)
logger.Error("collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err)
}
success = 0
} else {
level.Debug(logger).Log("msg", "collector succeeded", "name", name, "duration_seconds", duration.Seconds())
logger.Debug("collector succeeded", "name", name, "duration_seconds", duration.Seconds())
success = 1
}
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name)

View File

@ -19,10 +19,9 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"os"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
)
@ -38,7 +37,7 @@ type conntrackCollector struct {
drop *prometheus.Desc
earlyDrop *prometheus.Desc
searchRestart *prometheus.Desc
logger log.Logger
logger *slog.Logger
}
type conntrackStatistics struct {
@ -57,7 +56,7 @@ func init() {
}
// NewConntrackCollector returns a new Collector exposing conntrack stats.
func NewConntrackCollector(logger log.Logger) (Collector, error) {
func NewConntrackCollector(logger *slog.Logger) (Collector, error) {
return &conntrackCollector{
current: prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "nf_conntrack_entries"),
@ -154,7 +153,7 @@ func (c *conntrackCollector) Update(ch chan<- prometheus.Metric) error {
func (c *conntrackCollector) handleErr(err error) error {
if errors.Is(err, os.ErrNotExist) {
level.Debug(c.logger).Log("msg", "conntrack probably not loaded")
c.logger.Debug("conntrack probably not loaded")
return ErrNoData
}
return fmt.Errorf("failed to retrieve conntrack stats: %w", err)

76
collector/cpu_aix.go Normal file
View File

@ -0,0 +1,76 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nocpu
// +build !nocpu
package collector
/*
#include <unistd.h> // Include the standard Unix header
#include <errno.h> // For errno
*/
import "C"
import (
"fmt"
"log/slog"
"strconv"
"github.com/power-devops/perfstat"
"github.com/prometheus/client_golang/prometheus"
)
type cpuCollector struct {
cpu typedDesc
logger *slog.Logger
tickPerSecond int64
}
func init() {
registerCollector("cpu", defaultEnabled, NewCpuCollector)
}
func tickPerSecond() (int64, error) {
ticks, err := C.sysconf(C._SC_CLK_TCK)
if ticks == -1 || err != nil {
return 0, fmt.Errorf("failed to get clock ticks per second: %v", err)
}
return int64(ticks), nil
}
func NewCpuCollector(logger *slog.Logger) (Collector, error) {
ticks, err := tickPerSecond()
if err != nil {
return nil, err
}
return &cpuCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
logger: logger,
tickPerSecond: ticks,
}, nil
}
func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error {
stats, err := perfstat.CpuStat()
if err != nil {
return err
}
for n, stat := range stats {
ch <- c.cpu.mustNewConstMetric(float64(stat.User/c.tickPerSecond), strconv.Itoa(n), "user")
ch <- c.cpu.mustNewConstMetric(float64(stat.Sys/c.tickPerSecond), strconv.Itoa(n), "system")
ch <- c.cpu.mustNewConstMetric(float64(stat.Idle/c.tickPerSecond), strconv.Itoa(n), "idle")
ch <- c.cpu.mustNewConstMetric(float64(stat.Wait/c.tickPerSecond), strconv.Itoa(n), "wait")
}
return nil
}

View File

@ -23,10 +23,10 @@ import (
"bytes"
"encoding/binary"
"fmt"
"log/slog"
"strconv"
"unsafe"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -53,7 +53,7 @@ const ClocksPerSec = float64(C.CLK_TCK)
type statCollector struct {
cpu *prometheus.Desc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -61,7 +61,7 @@ func init() {
}
// NewCPUCollector returns a new Collector exposing CPU stats.
func NewCPUCollector(logger log.Logger) (Collector, error) {
func NewCPUCollector(logger *slog.Logger) (Collector, error) {
return &statCollector{
cpu: nodeCPUSecondsDesc,
logger: logger,

View File

@ -18,10 +18,10 @@ package collector
import (
"errors"
"log/slog"
"strconv"
"unsafe"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -78,7 +78,7 @@ const maxCPUTimesLen = C.MAXCPU * C.CPUSTATES
type statCollector struct {
cpu *prometheus.Desc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -86,7 +86,7 @@ func init() {
}
// NewStatCollector returns a new Collector exposing CPU stats.
func NewStatCollector(logger log.Logger) (Collector, error) {
func NewStatCollector(logger *slog.Logger) (Collector, error) {
return &statCollector{
cpu: nodeCPUSecondsDesc,
logger: logger,

View File

@ -18,12 +18,11 @@ package collector
import (
"fmt"
"log/slog"
"math"
"strconv"
"unsafe"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
)
@ -85,7 +84,7 @@ func getCPUTimes() ([]cputime, error) {
type statCollector struct {
cpu typedDesc
temp typedDesc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -93,7 +92,7 @@ func init() {
}
// NewStatCollector returns a new Collector exposing CPU stats.
func NewStatCollector(logger log.Logger) (Collector, error) {
func NewStatCollector(logger *slog.Logger) (Collector, error) {
return &statCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
temp: typedDesc{prometheus.NewDesc(
@ -134,11 +133,11 @@ func (c *statCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil {
if err == unix.ENOENT {
// No temperature information for this CPU
level.Debug(c.logger).Log("msg", "no temperature information for CPU", "cpu", cpu)
c.logger.Debug("no temperature information for CPU", "cpu", cpu)
} else {
// Unexpected error
ch <- c.temp.mustNewConstMetric(math.NaN(), lcpu)
level.Error(c.logger).Log("msg", "failed to query CPU temperature for CPU", "cpu", cpu, "err", err)
c.logger.Error("failed to query CPU temperature for CPU", "cpu", cpu, "err", err)
}
continue
}

View File

@ -18,20 +18,19 @@ package collector
import (
"fmt"
"log/slog"
"os"
"path/filepath"
"regexp"
"slices"
"strconv"
"sync"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
"github.com/prometheus/procfs/sysfs"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
type cpuCollector struct {
@ -45,7 +44,7 @@ type cpuCollector struct {
cpuCoreThrottle *prometheus.Desc
cpuPackageThrottle *prometheus.Desc
cpuIsolated *prometheus.Desc
logger log.Logger
logger *slog.Logger
cpuStats map[int64]procfs.CPUStat
cpuStatsMutex sync.Mutex
isolatedCpus []uint16
@ -70,7 +69,7 @@ func init() {
}
// NewCPUCollector returns a new Collector exposing kernel/system statistics.
func NewCPUCollector(logger log.Logger) (Collector, error) {
func NewCPUCollector(logger *slog.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
@ -86,7 +85,7 @@ func NewCPUCollector(logger log.Logger) (Collector, error) {
if !os.IsNotExist(err) {
return nil, fmt.Errorf("Unable to get isolated cpus: %w", err)
}
level.Debug(logger).Log("msg", "Could not open isolated file", "error", err)
logger.Debug("Could not open isolated file", "error", err)
}
c := &cpuCollector{
@ -146,7 +145,7 @@ func NewCPUCollector(logger log.Logger) (Collector, error) {
func (c *cpuCollector) compileIncludeFlags(flagsIncludeFlag, bugsIncludeFlag *string) error {
if (*flagsIncludeFlag != "" || *bugsIncludeFlag != "") && !*enableCPUInfo {
*enableCPUInfo = true
level.Info(c.logger).Log("msg", "--collector.cpu.info has been set to `true` because you set the following flags, like --collector.cpu.info.flags-include and --collector.cpu.info.bugs-include")
c.logger.Info("--collector.cpu.info has been set to `true` because you set the following flags, like --collector.cpu.info.flags-include and --collector.cpu.info.bugs-include")
}
var err error
@ -205,7 +204,7 @@ func (c *cpuCollector) updateInfo(ch chan<- prometheus.Metric) error {
cpuFreqEnabled, ok := collectorState["cpufreq"]
if !ok || cpuFreqEnabled == nil {
level.Debug(c.logger).Log("msg", "cpufreq key missing or nil value in collectorState map")
c.logger.Debug("cpufreq key missing or nil value in collectorState map")
} else if !*cpuFreqEnabled {
for _, cpu := range info {
ch <- prometheus.MustNewConstMetric(c.cpuFrequencyHz,
@ -269,12 +268,12 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error
// topology/physical_package_id
if physicalPackageID, err = readUintFromFile(filepath.Join(cpu, "topology", "physical_package_id")); err != nil {
level.Debug(c.logger).Log("msg", "CPU is missing physical_package_id", "cpu", cpu)
c.logger.Debug("CPU is missing physical_package_id", "cpu", cpu)
continue
}
// topology/core_id
if coreID, err = readUintFromFile(filepath.Join(cpu, "topology", "core_id")); err != nil {
level.Debug(c.logger).Log("msg", "CPU is missing core_id", "cpu", cpu)
c.logger.Debug("CPU is missing core_id", "cpu", cpu)
continue
}
@ -292,7 +291,7 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error
if coreThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "core_throttle_count")); err == nil {
packageCoreThrottles[physicalPackageID][coreID] = coreThrottleCount
} else {
level.Debug(c.logger).Log("msg", "CPU is missing core_throttle_count", "cpu", cpu)
c.logger.Debug("CPU is missing core_throttle_count", "cpu", cpu)
}
}
@ -302,7 +301,7 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error
if packageThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "package_throttle_count")); err == nil {
packageThrottles[physicalPackageID] = packageThrottleCount
} else {
level.Debug(c.logger).Log("msg", "CPU is missing package_throttle_count", "cpu", cpu)
c.logger.Debug("CPU is missing package_throttle_count", "cpu", cpu)
}
}
}
@ -380,68 +379,68 @@ func (c *cpuCollector) updateCPUStats(newStats map[int64]procfs.CPUStat) {
// If idle jumps backwards by more than X seconds, assume we had a hotplug event and reset the stats for this CPU.
if (cpuStats.Idle - n.Idle) >= jumpBackSeconds {
level.Debug(c.logger).Log("msg", jumpBackDebugMessage, "cpu", i, "old_value", cpuStats.Idle, "new_value", n.Idle)
c.logger.Debug(jumpBackDebugMessage, "cpu", i, "old_value", cpuStats.Idle, "new_value", n.Idle)
cpuStats = procfs.CPUStat{}
}
if n.Idle >= cpuStats.Idle {
cpuStats.Idle = n.Idle
} else {
level.Debug(c.logger).Log("msg", "CPU Idle counter jumped backwards", "cpu", i, "old_value", cpuStats.Idle, "new_value", n.Idle)
c.logger.Debug("CPU Idle counter jumped backwards", "cpu", i, "old_value", cpuStats.Idle, "new_value", n.Idle)
}
if n.User >= cpuStats.User {
cpuStats.User = n.User
} else {
level.Debug(c.logger).Log("msg", "CPU User counter jumped backwards", "cpu", i, "old_value", cpuStats.User, "new_value", n.User)
c.logger.Debug("CPU User counter jumped backwards", "cpu", i, "old_value", cpuStats.User, "new_value", n.User)
}
if n.Nice >= cpuStats.Nice {
cpuStats.Nice = n.Nice
} else {
level.Debug(c.logger).Log("msg", "CPU Nice counter jumped backwards", "cpu", i, "old_value", cpuStats.Nice, "new_value", n.Nice)
c.logger.Debug("CPU Nice counter jumped backwards", "cpu", i, "old_value", cpuStats.Nice, "new_value", n.Nice)
}
if n.System >= cpuStats.System {
cpuStats.System = n.System
} else {
level.Debug(c.logger).Log("msg", "CPU System counter jumped backwards", "cpu", i, "old_value", cpuStats.System, "new_value", n.System)
c.logger.Debug("CPU System counter jumped backwards", "cpu", i, "old_value", cpuStats.System, "new_value", n.System)
}
if n.Iowait >= cpuStats.Iowait {
cpuStats.Iowait = n.Iowait
} else {
level.Debug(c.logger).Log("msg", "CPU Iowait counter jumped backwards", "cpu", i, "old_value", cpuStats.Iowait, "new_value", n.Iowait)
c.logger.Debug("CPU Iowait counter jumped backwards", "cpu", i, "old_value", cpuStats.Iowait, "new_value", n.Iowait)
}
if n.IRQ >= cpuStats.IRQ {
cpuStats.IRQ = n.IRQ
} else {
level.Debug(c.logger).Log("msg", "CPU IRQ counter jumped backwards", "cpu", i, "old_value", cpuStats.IRQ, "new_value", n.IRQ)
c.logger.Debug("CPU IRQ counter jumped backwards", "cpu", i, "old_value", cpuStats.IRQ, "new_value", n.IRQ)
}
if n.SoftIRQ >= cpuStats.SoftIRQ {
cpuStats.SoftIRQ = n.SoftIRQ
} else {
level.Debug(c.logger).Log("msg", "CPU SoftIRQ counter jumped backwards", "cpu", i, "old_value", cpuStats.SoftIRQ, "new_value", n.SoftIRQ)
c.logger.Debug("CPU SoftIRQ counter jumped backwards", "cpu", i, "old_value", cpuStats.SoftIRQ, "new_value", n.SoftIRQ)
}
if n.Steal >= cpuStats.Steal {
cpuStats.Steal = n.Steal
} else {
level.Debug(c.logger).Log("msg", "CPU Steal counter jumped backwards", "cpu", i, "old_value", cpuStats.Steal, "new_value", n.Steal)
c.logger.Debug("CPU Steal counter jumped backwards", "cpu", i, "old_value", cpuStats.Steal, "new_value", n.Steal)
}
if n.Guest >= cpuStats.Guest {
cpuStats.Guest = n.Guest
} else {
level.Debug(c.logger).Log("msg", "CPU Guest counter jumped backwards", "cpu", i, "old_value", cpuStats.Guest, "new_value", n.Guest)
c.logger.Debug("CPU Guest counter jumped backwards", "cpu", i, "old_value", cpuStats.Guest, "new_value", n.Guest)
}
if n.GuestNice >= cpuStats.GuestNice {
cpuStats.GuestNice = n.GuestNice
} else {
level.Debug(c.logger).Log("msg", "CPU GuestNice counter jumped backwards", "cpu", i, "old_value", cpuStats.GuestNice, "new_value", n.GuestNice)
c.logger.Debug("CPU GuestNice counter jumped backwards", "cpu", i, "old_value", cpuStats.GuestNice, "new_value", n.GuestNice)
}
c.cpuStats[i] = cpuStats

View File

@ -17,10 +17,11 @@
package collector
import (
"io"
"log/slog"
"reflect"
"testing"
"github.com/go-kit/log"
"github.com/prometheus/procfs"
)
@ -35,7 +36,7 @@ func makeTestCPUCollector(s map[int64]procfs.CPUStat) *cpuCollector {
dup := make(map[int64]procfs.CPUStat, len(s))
copyStats(dup, s)
return &cpuCollector{
logger: log.NewNopLogger(),
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
cpuStats: dup,
}
}

View File

@ -18,6 +18,7 @@ package collector
import (
"errors"
"log/slog"
"math"
"regexp"
"sort"
@ -25,8 +26,6 @@ import (
"strings"
"unsafe"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
@ -214,7 +213,7 @@ func getCPUTimes() ([]cputime, error) {
type statCollector struct {
cpu typedDesc
temp typedDesc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -222,7 +221,7 @@ func init() {
}
// NewStatCollector returns a new Collector exposing CPU stats.
func NewStatCollector(logger log.Logger) (Collector, error) {
func NewStatCollector(logger *slog.Logger) (Collector, error) {
return &statCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
temp: typedDesc{prometheus.NewDesc(
@ -269,7 +268,7 @@ func (c *statCollector) Update(ch chan<- prometheus.Metric) error {
if temp, ok := cpuTemperatures[cpu]; ok {
ch <- c.temp.mustNewConstMetric(temp, lcpu)
} else {
level.Debug(c.logger).Log("msg", "no temperature information for CPU", "cpu", cpu)
c.logger.Debug("no temperature information for CPU", "cpu", cpu)
ch <- c.temp.mustNewConstMetric(math.NaN(), lcpu)
}
}

View File

@ -17,10 +17,10 @@
package collector
import (
"log/slog"
"strconv"
"unsafe"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
)
@ -45,14 +45,14 @@ const (
type cpuCollector struct {
cpu typedDesc
logger log.Logger
logger *slog.Logger
}
func init() {
registerCollector("cpu", defaultEnabled, NewCPUCollector)
}
func NewCPUCollector(logger log.Logger) (Collector, error) {
func NewCPUCollector(logger *slog.Logger) (Collector, error) {
return &cpuCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
logger: logger,

View File

@ -17,9 +17,9 @@
package collector
import (
"log/slog"
"strconv"
"github.com/go-kit/log"
"github.com/illumos/go-kstat"
"github.com/prometheus/client_golang/prometheus"
)
@ -29,14 +29,14 @@ import "C"
type cpuCollector struct {
cpu typedDesc
logger log.Logger
logger *slog.Logger
}
func init() {
registerCollector("cpu", defaultEnabled, NewCpuCollector)
}
func NewCpuCollector(logger log.Logger) (Collector, error) {
func NewCpuCollector(logger *slog.Logger) (Collector, error) {
return &cpuCollector{
cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue},
logger: logger,
@ -60,17 +60,17 @@ func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error {
}
for k, v := range map[string]string{
"idle": "cpu_ticks_idle",
"kernel": "cpu_ticks_kernel",
"user": "cpu_ticks_user",
"wait": "cpu_ticks_wait",
"idle": "cpu_nsec_idle",
"kernel": "cpu_nsec_kernel",
"user": "cpu_nsec_user",
"wait": "cpu_nsec_wait",
} {
kstatValue, err := ksCPU.GetNamed(v)
if err != nil {
return err
}
ch <- c.cpu.mustNewConstMetric(float64(kstatValue.UintVal), strconv.Itoa(cpu), k)
ch <- c.cpu.mustNewConstMetric(float64(kstatValue.UintVal)/1e9, strconv.Itoa(cpu), k)
}
}
return nil

View File

@ -15,8 +15,8 @@ package collector
import (
"fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
)
@ -40,7 +40,7 @@ func init() {
registerCollector(cpuVulerabilitiesCollector, defaultDisabled, NewVulnerabilitySysfsCollector)
}
func NewVulnerabilitySysfsCollector(logger log.Logger) (Collector, error) {
func NewVulnerabilitySysfsCollector(logger *slog.Logger) (Collector, error) {
return &cpuVulnerabilitiesCollector{}, nil
}

View File

@ -18,15 +18,15 @@ package collector
import (
"fmt"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
"log/slog"
"strings"
)
type cpuFreqCollector struct {
fs sysfs.FS
logger log.Logger
logger *slog.Logger
}
func init() {
@ -34,7 +34,7 @@ func init() {
}
// NewCPUFreqCollector returns a new Collector exposing kernel/system statistics.
func NewCPUFreqCollector(logger log.Logger) (Collector, error) {
func NewCPUFreqCollector(logger *slog.Logger) (Collector, error) {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)

View File

@ -18,9 +18,9 @@ package collector
import (
"fmt"
"log/slog"
"strconv"
"github.com/go-kit/log"
"github.com/illumos/go-kstat"
"github.com/prometheus/client_golang/prometheus"
)
@ -29,14 +29,14 @@ import (
import "C"
type cpuFreqCollector struct {
logger log.Logger
logger *slog.Logger
}
func init() {
registerCollector("cpufreq", defaultEnabled, NewCpuFreqCollector)
}
func NewCpuFreqCollector(logger log.Logger) (Collector, error) {
func NewCpuFreqCollector(logger *slog.Logger) (Collector, error) {
return &cpuFreqCollector{
logger: logger,
}, nil

View File

@ -19,8 +19,8 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -98,7 +98,7 @@ type devstatCollector struct {
bytesDesc *prometheus.Desc
transfersDesc *prometheus.Desc
blocksDesc *prometheus.Desc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -106,7 +106,7 @@ func init() {
}
// NewDevstatCollector returns a new Collector exposing Device stats.
func NewDevstatCollector(logger log.Logger) (Collector, error) {
func NewDevstatCollector(logger *slog.Logger) (Collector, error) {
return &devstatCollector{
bytesDesc: prometheus.NewDesc(
prometheus.BuildFQName(namespace, devstatSubsystem, "bytes_total"),

View File

@ -19,10 +19,10 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"sync"
"unsafe"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -43,7 +43,7 @@ type devstatCollector struct {
duration typedDesc
busyTime typedDesc
blocks typedDesc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -51,7 +51,7 @@ func init() {
}
// NewDevstatCollector returns a new Collector exposing Device stats.
func NewDevstatCollector(logger log.Logger) (Collector, error) {
func NewDevstatCollector(logger *slog.Logger) (Collector, error) {
return &devstatCollector{
devinfo: &C.struct_devinfo{},
bytes: typedDesc{prometheus.NewDesc(

View File

@ -0,0 +1,82 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nodiskstats
// +build !nodiskstats
package collector
import (
"fmt"
"log/slog"
"github.com/power-devops/perfstat"
"github.com/prometheus/client_golang/prometheus"
)
const diskstatsDefaultIgnoredDevices = ""
type diskstatsCollector struct {
rbytes typedDesc
wbytes typedDesc
time typedDesc
deviceFilter deviceFilter
logger *slog.Logger
tickPerSecond int64
}
func init() {
registerCollector("diskstats", defaultEnabled, NewDiskstatsCollector)
}
// NewDiskstatsCollector returns a new Collector exposing disk device stats.
func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) {
ticks, err := tickPerSecond()
if err != nil {
return nil, err
}
deviceFilter, err := newDiskstatsDeviceFilter(logger)
if err != nil {
return nil, fmt.Errorf("failed to parse device filter flags: %w", err)
}
return &diskstatsCollector{
rbytes: typedDesc{readBytesDesc, prometheus.CounterValue},
wbytes: typedDesc{writtenBytesDesc, prometheus.CounterValue},
time: typedDesc{ioTimeSecondsDesc, prometheus.CounterValue},
deviceFilter: deviceFilter,
logger: logger,
tickPerSecond: ticks,
}, nil
}
func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
stats, err := perfstat.DiskStat()
if err != nil {
return err
}
for _, stat := range stats {
if c.deviceFilter.ignored(stat.Name) {
continue
}
ch <- c.rbytes.mustNewConstMetric(float64(stat.Rblks*512), stat.Name)
ch <- c.wbytes.mustNewConstMetric(float64(stat.Wblks*512), stat.Name)
ch <- c.time.mustNewConstMetric(float64(stat.Time/c.tickPerSecond), stat.Name)
}
return nil
}

View File

@ -11,18 +11,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nodiskstats && (openbsd || linux || darwin)
//go:build !nodiskstats && (openbsd || linux || darwin || aix)
// +build !nodiskstats
// +build openbsd linux darwin
// +build openbsd linux darwin aix
package collector
import (
"errors"
"log/slog"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
@ -93,10 +92,10 @@ var (
)
)
func newDiskstatsDeviceFilter(logger log.Logger) (deviceFilter, error) {
func newDiskstatsDeviceFilter(logger *slog.Logger) (deviceFilter, error) {
if *oldDiskstatsDeviceExclude != "" {
if !diskstatsDeviceExcludeSet {
level.Warn(logger).Log("msg", "--collector.diskstats.ignored-devices is DEPRECATED and will be removed in 2.0.0, use --collector.diskstats.device-exclude")
logger.Warn("--collector.diskstats.ignored-devices is DEPRECATED and will be removed in 2.0.0, use --collector.diskstats.device-exclude")
*diskstatsDeviceExclude = *oldDiskstatsDeviceExclude
} else {
return deviceFilter{}, errors.New("--collector.diskstats.ignored-devices and --collector.diskstats.device-exclude are mutually exclusive")
@ -108,11 +107,11 @@ func newDiskstatsDeviceFilter(logger log.Logger) (deviceFilter, error) {
}
if *diskstatsDeviceExclude != "" {
level.Info(logger).Log("msg", "Parsed flag --collector.diskstats.device-exclude", "flag", *diskstatsDeviceExclude)
logger.Info("Parsed flag --collector.diskstats.device-exclude", "flag", *diskstatsDeviceExclude)
}
if *diskstatsDeviceInclude != "" {
level.Info(logger).Log("msg", "Parsed Flag --collector.diskstats.device-include", "flag", *diskstatsDeviceInclude)
logger.Info("Parsed Flag --collector.diskstats.device-include", "flag", *diskstatsDeviceInclude)
}
return newDeviceFilter(*diskstatsDeviceExclude, *diskstatsDeviceInclude), nil

View File

@ -18,8 +18,8 @@ package collector
import (
"fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/lufia/iostat"
"github.com/prometheus/client_golang/prometheus"
)
@ -35,7 +35,7 @@ type diskstatsCollector struct {
descs []typedDescFunc
deviceFilter deviceFilter
logger log.Logger
logger *slog.Logger
}
func init() {
@ -43,7 +43,7 @@ func init() {
}
// NewDiskstatsCollector returns a new Collector exposing disk device stats.
func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) {
var diskLabelNames = []string{"device"}
deviceFilter, err := newDiskstatsDeviceFilter(logger)

View File

@ -19,12 +19,11 @@ package collector
import (
"bufio"
"fmt"
"log/slog"
"os"
"strconv"
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/blockdevice"
)
@ -85,7 +84,7 @@ type diskstatsCollector struct {
filesystemInfoDesc typedFactorDesc
deviceMapperInfoDesc typedFactorDesc
ataDescs map[string]typedFactorDesc
logger log.Logger
logger *slog.Logger
getUdevDeviceProperties func(uint32, uint32) (udevInfo, error)
}
@ -95,7 +94,7 @@ func init() {
// NewDiskstatsCollector returns a new Collector exposing disk device stats.
// Docs from https://www.kernel.org/doc/Documentation/iostats.txt
func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) {
var diskLabelNames = []string{"device"}
fs, err := blockdevice.NewFS(*procPath, *sysPath)
if err != nil {
@ -262,7 +261,7 @@ func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
// Only enable getting device properties from udev if the directory is readable.
if stat, err := os.Stat(*udevDataPath); err != nil || !stat.IsDir() {
level.Error(logger).Log("msg", "Failed to open directory, disabling udev device properties", "path", *udevDataPath)
logger.Error("Failed to open directory, disabling udev device properties", "path", *udevDataPath)
} else {
collector.getUdevDeviceProperties = getUdevDeviceProperties
}
@ -284,7 +283,7 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
info, err := getUdevDeviceProperties(stats.MajorNumber, stats.MinorNumber)
if err != nil {
level.Debug(c.logger).Log("msg", "Failed to parse udev info", "err", err)
c.logger.Debug("Failed to parse udev info", "err", err)
}
// This is usually the serial printed on the disk label.
@ -355,14 +354,14 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
for attr, desc := range c.ataDescs {
str, ok := info[attr]
if !ok {
level.Debug(c.logger).Log("msg", "Udev attribute does not exist", "attribute", attr)
c.logger.Debug("Udev attribute does not exist", "attribute", attr)
continue
}
if value, err := strconv.ParseFloat(str, 64); err == nil {
ch <- desc.mustNewConstMetric(value, dev)
} else {
level.Error(c.logger).Log("msg", "Failed to parse ATA value", "err", err)
c.logger.Error("Failed to parse ATA value", "err", err)
}
}
}

View File

@ -18,11 +18,11 @@ package collector
import (
"fmt"
"os"
"io"
"log/slog"
"strings"
"testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil"
)
@ -39,7 +39,7 @@ func (c testDiskStatsCollector) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(c, ch)
}
func NewTestDiskStatsCollector(logger log.Logger) (prometheus.Collector, error) {
func NewTestDiskStatsCollector(logger *slog.Logger) (prometheus.Collector, error) {
dsc, err := NewDiskstatsCollector(logger)
if err != nil {
return testDiskStatsCollector{}, err
@ -317,10 +317,10 @@ node_disk_written_bytes_total{device="sr0"} 0
node_disk_written_bytes_total{device="vda"} 1.0938236928e+11
`
logger := log.NewLogfmtLogger(os.Stderr)
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
collector, err := NewDiskstatsCollector(logger)
if err != nil {
panic(err)
t.Fatal(err)
}
c, err := NewTestDiskStatsCollector(logger)
if err != nil {

View File

@ -18,9 +18,9 @@ package collector
import (
"fmt"
"log/slog"
"unsafe"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
)
@ -41,7 +41,7 @@ type diskstatsCollector struct {
time typedDesc
deviceFilter deviceFilter
logger log.Logger
logger *slog.Logger
}
func init() {
@ -49,7 +49,7 @@ func init() {
}
// NewDiskstatsCollector returns a new Collector exposing disk device stats.
func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) {
deviceFilter, err := newDiskstatsDeviceFilter(logger)
if err != nil {
return nil, fmt.Errorf("failed to parse device filter flags: %w", err)

View File

@ -18,9 +18,9 @@ package collector
import (
"fmt"
"log/slog"
"unsafe"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
)
@ -52,7 +52,7 @@ type diskstatsCollector struct {
time typedDesc
deviceFilter deviceFilter
logger log.Logger
logger *slog.Logger
}
func init() {
@ -60,7 +60,7 @@ func init() {
}
// NewDiskstatsCollector returns a new Collector exposing disk device stats.
func NewDiskstatsCollector(logger log.Logger) (Collector, error) {
func NewDiskstatsCollector(logger *slog.Logger) (Collector, error) {
deviceFilter, err := newDiskstatsDeviceFilter(logger)
if err != nil {
return nil, fmt.Errorf("failed to parse device filter flags: %w", err)

View File

@ -19,11 +19,10 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"os"
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
)
@ -38,7 +37,7 @@ func init() {
}
// NewDMICollector returns a new Collector exposing DMI information.
func NewDMICollector(logger log.Logger) (Collector, error) {
func NewDMICollector(logger *slog.Logger) (Collector, error) {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)
@ -47,7 +46,7 @@ func NewDMICollector(logger log.Logger) (Collector, error) {
dmi, err := fs.DMIClass()
if err != nil {
if errors.Is(err, os.ErrNotExist) {
level.Debug(logger).Log("msg", "Platform does not support Desktop Management Interface (DMI) information", "err", err)
logger.Debug("Platform does not support Desktop Management Interface (DMI) information", "err", err)
dmi = &sysfs.DMIClass{}
} else {
return nil, fmt.Errorf("failed to read Desktop Management Interface (DMI) information: %w", err)

View File

@ -20,12 +20,11 @@ import (
"bufio"
"errors"
"fmt"
"log/slog"
"os"
"strconv"
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
@ -79,14 +78,14 @@ type drbdCollector struct {
numerical map[string]drbdNumericalMetric
stringPair map[string]drbdStringPairMetric
connected *prometheus.Desc
logger log.Logger
logger *slog.Logger
}
func init() {
registerCollector("drbd", defaultDisabled, newDRBDCollector)
}
func newDRBDCollector(logger log.Logger) (Collector, error) {
func newDRBDCollector(logger *slog.Logger) (Collector, error) {
return &drbdCollector{
numerical: map[string]drbdNumericalMetric{
"ns": newDRBDNumericalMetric(
@ -191,7 +190,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error {
file, err := os.Open(statsFile)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
level.Debug(c.logger).Log("msg", "stats file does not exist, skipping", "file", statsFile, "err", err)
c.logger.Debug("stats file does not exist, skipping", "file", statsFile, "err", err)
return ErrNoData
}
@ -208,7 +207,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error {
kv := strings.Split(field, ":")
if len(kv) != 2 {
level.Debug(c.logger).Log("msg", "skipping invalid key:value pair", "field", field)
c.logger.Debug("skipping invalid key:value pair", "field", field)
continue
}
@ -274,7 +273,7 @@ func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error {
continue
}
level.Debug(c.logger).Log("msg", "unhandled key-value pair", "key", kv[0], "value", kv[1])
c.logger.Debug("unhandled key-value pair", "key", kv[0], "value", kv[1])
}
return scanner.Err()

View File

@ -18,8 +18,8 @@ package collector
import (
"fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
)
@ -30,7 +30,7 @@ const (
type drmCollector struct {
fs sysfs.FS
logger log.Logger
logger *slog.Logger
CardInfo *prometheus.Desc
GPUBusyPercent *prometheus.Desc
MemoryGTTSize *prometheus.Desc
@ -46,7 +46,7 @@ func init() {
}
// NewDrmCollector returns a new Collector exposing /sys/class/drm/card?/device stats.
func NewDrmCollector(logger log.Logger) (Collector, error) {
func NewDrmCollector(logger *slog.Logger) (Collector, error) {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)

View File

@ -18,10 +18,10 @@ package collector
import (
"fmt"
"log/slog"
"path/filepath"
"regexp"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -39,7 +39,7 @@ type edacCollector struct {
ueCount *prometheus.Desc
csRowCECount *prometheus.Desc
csRowUECount *prometheus.Desc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -47,7 +47,7 @@ func init() {
}
// NewEdacCollector returns a new Collector exposing edac stats.
func NewEdacCollector(logger log.Logger) (Collector, error) {
func NewEdacCollector(logger *slog.Logger) (Collector, error) {
return &edacCollector{
ceCount: prometheus.NewDesc(
prometheus.BuildFQName(namespace, edacSubsystem, "correctable_errors_total"),

View File

@ -18,8 +18,8 @@ package collector
import (
"fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
)
@ -28,7 +28,7 @@ type entropyCollector struct {
fs procfs.FS
entropyAvail *prometheus.Desc
entropyPoolSize *prometheus.Desc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -36,7 +36,7 @@ func init() {
}
// NewEntropyCollector returns a new Collector exposing entropy stats.
func NewEntropyCollector(logger log.Logger) (Collector, error) {
func NewEntropyCollector(logger *slog.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)

View File

@ -23,6 +23,7 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"os"
"regexp"
"sort"
@ -31,8 +32,6 @@ import (
"syscall"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
"github.com/safchain/ethtool"
@ -79,13 +78,13 @@ type ethtoolCollector struct {
deviceFilter deviceFilter
infoDesc *prometheus.Desc
metricsPattern *regexp.Regexp
logger log.Logger
logger *slog.Logger
}
// makeEthtoolCollector is the internal constructor for EthtoolCollector.
// This allows NewEthtoolTestCollector to override its .ethtool interface
// for testing.
func makeEthtoolCollector(logger log.Logger) (*ethtoolCollector, error) {
func makeEthtoolCollector(logger *slog.Logger) (*ethtoolCollector, error) {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %w", err)
@ -96,6 +95,16 @@ func makeEthtoolCollector(logger log.Logger) (*ethtoolCollector, error) {
return nil, fmt.Errorf("failed to initialize ethtool library: %w", err)
}
if *ethtoolDeviceInclude != "" {
logger.Info("Parsed flag --collector.ethtool.device-include", "flag", *ethtoolDeviceInclude)
}
if *ethtoolDeviceExclude != "" {
logger.Info("Parsed flag --collector.ethtool.device-exclude", "flag", *ethtoolDeviceExclude)
}
if *ethtoolIncludedMetrics != "" {
logger.Info("Parsed flag --collector.ethtool.metrics-include", "flag", *ethtoolIncludedMetrics)
}
// Pre-populate some common ethtool metrics.
return &ethtoolCollector{
fs: fs,
@ -213,7 +222,7 @@ func buildEthtoolFQName(metric string) string {
}
// NewEthtoolCollector returns a new Collector exposing ethtool stats.
func NewEthtoolCollector(logger log.Logger) (Collector, error) {
func NewEthtoolCollector(logger *slog.Logger) (Collector, error) {
return makeEthtoolCollector(logger)
}
@ -366,7 +375,7 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
netClass, err := c.fs.NetClass()
if err != nil {
if errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrPermission) {
level.Debug(c.logger).Log("msg", "Could not read netclass file", "err", err)
c.logger.Debug("Could not read netclass file", "err", err)
return ErrNoData
}
return fmt.Errorf("could not get net class info: %w", err)
@ -395,12 +404,12 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
} else {
if errno, ok := err.(syscall.Errno); ok {
if err == unix.EOPNOTSUPP {
level.Debug(c.logger).Log("msg", "ethtool link info error", "err", err, "device", device, "errno", uint(errno))
c.logger.Debug("ethtool link info error", "err", err, "device", device, "errno", uint(errno))
} else if errno != 0 {
level.Error(c.logger).Log("msg", "ethtool link info error", "err", err, "device", device, "errno", uint(errno))
c.logger.Error("ethtool link info error", "err", err, "device", device, "errno", uint(errno))
}
} else {
level.Error(c.logger).Log("msg", "ethtool link info error", "err", err, "device", device)
c.logger.Error("ethtool link info error", "err", err, "device", device)
}
}
@ -412,12 +421,12 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
} else {
if errno, ok := err.(syscall.Errno); ok {
if err == unix.EOPNOTSUPP {
level.Debug(c.logger).Log("msg", "ethtool driver info error", "err", err, "device", device, "errno", uint(errno))
c.logger.Debug("ethtool driver info error", "err", err, "device", device, "errno", uint(errno))
} else if errno != 0 {
level.Error(c.logger).Log("msg", "ethtool driver info error", "err", err, "device", device, "errno", uint(errno))
c.logger.Error("ethtool driver info error", "err", err, "device", device, "errno", uint(errno))
}
} else {
level.Error(c.logger).Log("msg", "ethtool driver info error", "err", err, "device", device)
c.logger.Error("ethtool driver info error", "err", err, "device", device)
}
}
@ -428,12 +437,12 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil {
if errno, ok := err.(syscall.Errno); ok {
if err == unix.EOPNOTSUPP {
level.Debug(c.logger).Log("msg", "ethtool stats error", "err", err, "device", device, "errno", uint(errno))
c.logger.Debug("ethtool stats error", "err", err, "device", device, "errno", uint(errno))
} else if errno != 0 {
level.Error(c.logger).Log("msg", "ethtool stats error", "err", err, "device", device, "errno", uint(errno))
c.logger.Error("ethtool stats error", "err", err, "device", device, "errno", uint(errno))
}
} else {
level.Error(c.logger).Log("msg", "ethtool stats error", "err", err, "device", device)
c.logger.Error("ethtool stats error", "err", err, "device", device)
}
}
@ -452,7 +461,7 @@ func (c *ethtoolCollector) Update(ch chan<- prometheus.Metric) error {
metricFQName := buildEthtoolFQName(metricName)
existingMetric, exists := metricFQNames[metricFQName]
if exists {
level.Debug(c.logger).Log("msg", "dropping duplicate metric name", "device", device,
c.logger.Debug("dropping duplicate metric name", "device", device,
"metricFQName", metricFQName, "metric1", existingMetric, "metric2", metricName)
// Keep the metricName as "deleted" in the dict in case there are 3 duplicates.
metricFQNames[metricFQName] = ""

View File

@ -19,6 +19,8 @@ package collector
import (
"bufio"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"strconv"
@ -26,7 +28,6 @@ import (
"syscall"
"testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/safchain/ethtool"
@ -49,7 +50,7 @@ func (c testEthtoolCollector) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(c, ch)
}
func NewTestEthtoolCollector(logger log.Logger) (prometheus.Collector, error) {
func NewTestEthtoolCollector(logger *slog.Logger) (prometheus.Collector, error) {
dsc, err := NewEthtoolTestCollector(logger)
if err != nil {
return testEthtoolCollector{}, err
@ -255,14 +256,14 @@ func (e *EthtoolFixture) LinkInfo(intf string) (ethtool.EthtoolCmd, error) {
return res, err
}
func NewEthtoolTestCollector(logger log.Logger) (Collector, error) {
func NewEthtoolTestCollector(logger *slog.Logger) (Collector, error) {
collector, err := makeEthtoolCollector(logger)
collector.ethtool = &EthtoolFixture{
fixturePath: "fixtures/ethtool/",
}
if err != nil {
return nil, err
}
collector.ethtool = &EthtoolFixture{
fixturePath: "fixtures/ethtool/",
}
return collector, nil
}
@ -370,10 +371,10 @@ node_network_supported_speed_bytes{device="eth0",duplex="half",mode="10baseT"} 1
`
*sysPath = "fixtures/sys"
logger := log.NewLogfmtLogger(os.Stderr)
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
collector, err := NewEthtoolTestCollector(logger)
if err != nil {
panic(err)
t.Fatal(err)
}
c, err := NewTestEthtoolCollector(logger)
if err != nil {

View File

@ -18,13 +18,13 @@
package collector
import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"log/slog"
)
type execCollector struct {
sysctls []bsdSysctl
logger log.Logger
logger *slog.Logger
}
func init() {
@ -32,7 +32,7 @@ func init() {
}
// NewExecCollector returns a new Collector exposing system execution statistics.
func NewExecCollector(logger log.Logger) (Collector, error) {
func NewExecCollector(logger *slog.Logger) (Collector, error) {
// From sys/vm/vm_meter.c:
// All are of type CTLTYPE_UINT.
//

View File

@ -18,11 +18,11 @@ package collector
import (
"fmt"
"log/slog"
"os"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/node_exporter/collector/utils"
"github.com/prometheus/procfs/sysfs"
)
@ -31,7 +31,7 @@ const maxUint64 = ^uint64(0)
type fibrechannelCollector struct {
fs sysfs.FS
metricDescs map[string]*prometheus.Desc
logger log.Logger
logger *slog.Logger
subsystem string
}
@ -40,7 +40,7 @@ func init() {
}
// NewFibreChannelCollector returns a new Collector exposing FibreChannel stats.
func NewFibreChannelCollector(logger log.Logger) (Collector, error) {
func NewFibreChannelCollector(logger *slog.Logger) (Collector, error) {
var i fibrechannelCollector
var err error
@ -66,18 +66,6 @@ func NewFibreChannelCollector(logger log.Logger) (Collector, error) {
"rx_words_total": "Number of words received by host port",
"tx_frames_total": "Number of frames transmitted by host port",
"link_failure_total": "Number of times the host port link has failed",
"name": "Name of Fibre Channel HBA",
"speed": "Current operating speed",
"port_state": "Current port state",
"port_type": "Port type, what the port is connected to",
"symbolic_name": "Symbolic Name",
"node_name": "Node Name as hexadecimal string",
"port_id": "Port ID as string",
"port_name": "Port Name as hexadecimal string",
"fabric_name": "Fabric Name; 0 if PTP",
"dev_loss_tmo": "Device Loss Timeout in seconds",
"supported_classes": "The FC classes supported",
"supported_speeds": "The FC speeds supported",
}
i.metricDescs = make(map[string]*prometheus.Desc)
@ -110,7 +98,7 @@ func (c *fibrechannelCollector) Update(ch chan<- prometheus.Metric) error {
hosts, err := c.fs.FibreChannelClass()
if err != nil {
if os.IsNotExist(err) {
level.Debug(c.logger).Log("msg", "fibrechannel statistics not found, skipping")
c.logger.Debug("fibrechannel statistics not found, skipping")
return ErrNoData
}
return fmt.Errorf("error obtaining FibreChannel class info: %s", err)
@ -126,23 +114,36 @@ func (c *fibrechannelCollector) Update(ch chan<- prometheus.Metric) error {
infoValue := 1.0
// First push the Host values
ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, host.Name, host.Speed, host.PortState, host.PortType, host.PortID, host.PortName, host.FabricName, host.SymbolicName, host.SupportedClasses, host.SupportedSpeeds, host.DevLossTMO)
ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, utils.SafeDereference(
host.Name,
host.Speed,
host.PortState,
host.PortType,
host.PortID,
host.PortName,
host.FabricName,
host.SymbolicName,
host.SupportedClasses,
host.SupportedSpeeds,
host.DevLossTMO,
)...)
// Then the counters
c.pushCounter(ch, "dumped_frames_total", host.Counters.DumpedFrames, host.Name)
c.pushCounter(ch, "error_frames_total", host.Counters.ErrorFrames, host.Name)
c.pushCounter(ch, "invalid_crc_total", host.Counters.InvalidCRCCount, host.Name)
c.pushCounter(ch, "rx_frames_total", host.Counters.RXFrames, host.Name)
c.pushCounter(ch, "rx_words_total", host.Counters.RXWords, host.Name)
c.pushCounter(ch, "tx_frames_total", host.Counters.TXFrames, host.Name)
c.pushCounter(ch, "tx_words_total", host.Counters.TXWords, host.Name)
c.pushCounter(ch, "seconds_since_last_reset_total", host.Counters.SecondsSinceLastReset, host.Name)
c.pushCounter(ch, "invalid_tx_words_total", host.Counters.InvalidTXWordCount, host.Name)
c.pushCounter(ch, "link_failure_total", host.Counters.LinkFailureCount, host.Name)
c.pushCounter(ch, "loss_of_sync_total", host.Counters.LossOfSyncCount, host.Name)
c.pushCounter(ch, "loss_of_signal_total", host.Counters.LossOfSignalCount, host.Name)
c.pushCounter(ch, "nos_total", host.Counters.NosCount, host.Name)
c.pushCounter(ch, "fcp_packet_aborts_total", host.Counters.FCPPacketAborts, host.Name)
// Note: `procfs` guarantees these a safe dereference for these counters.
c.pushCounter(ch, "dumped_frames_total", *host.Counters.DumpedFrames, *host.Name)
c.pushCounter(ch, "error_frames_total", *host.Counters.ErrorFrames, *host.Name)
c.pushCounter(ch, "invalid_crc_total", *host.Counters.InvalidCRCCount, *host.Name)
c.pushCounter(ch, "rx_frames_total", *host.Counters.RXFrames, *host.Name)
c.pushCounter(ch, "rx_words_total", *host.Counters.RXWords, *host.Name)
c.pushCounter(ch, "tx_frames_total", *host.Counters.TXFrames, *host.Name)
c.pushCounter(ch, "tx_words_total", *host.Counters.TXWords, *host.Name)
c.pushCounter(ch, "seconds_since_last_reset_total", *host.Counters.SecondsSinceLastReset, *host.Name)
c.pushCounter(ch, "invalid_tx_words_total", *host.Counters.InvalidTXWordCount, *host.Name)
c.pushCounter(ch, "link_failure_total", *host.Counters.LinkFailureCount, *host.Name)
c.pushCounter(ch, "loss_of_sync_total", *host.Counters.LossOfSyncCount, *host.Name)
c.pushCounter(ch, "loss_of_signal_total", *host.Counters.LossOfSignalCount, *host.Name)
c.pushCounter(ch, "nos_total", *host.Counters.NosCount, *host.Name)
c.pushCounter(ch, "fcp_packet_aborts_total", *host.Counters.FCPPacketAborts, *host.Name)
}
return nil

View File

@ -20,10 +20,10 @@ import (
"bytes"
"fmt"
"io"
"log/slog"
"os"
"strconv"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -32,7 +32,7 @@ const (
)
type fileFDStatCollector struct {
logger log.Logger
logger *slog.Logger
}
func init() {
@ -40,7 +40,7 @@ func init() {
}
// NewFileFDStatCollector returns a new Collector exposing file-nr stats.
func NewFileFDStatCollector(logger log.Logger) (Collector, error) {
func NewFileFDStatCollector(logger *slog.Logger) (Collector, error) {
return &fileFDStatCollector{logger}, nil
}

View File

@ -0,0 +1,65 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nofilesystem
// +build !nofilesystem
package collector
import (
"github.com/power-devops/perfstat"
)
const (
defMountPointsExcluded = "^/(dev|aha)($|/)"
defFSTypesExcluded = "^procfs$"
)
// Expose filesystem fullness.
func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
fsStat, err := perfstat.FileSystemStat()
if err != nil {
return nil, err
}
for _, stat := range fsStat {
if c.excludedMountPointsPattern.MatchString(stat.MountPoint) {
c.logger.Debug("Ignoring mount point", "mountpoint", stat.MountPoint)
continue
}
fstype := stat.TypeString()
if c.excludedFSTypesPattern.MatchString(fstype) {
c.logger.Debug("Ignoring fs type", "type", fstype)
continue
}
ro := 0.0
if stat.Flags&perfstat.VFS_READONLY != 0 {
ro = 1.0
}
stats = append(stats, filesystemStats{
labels: filesystemLabels{
device: stat.Device,
mountPoint: stat.MountPoint,
fsType: fstype,
},
size: float64(stat.TotalBlocks / 512.0),
free: float64(stat.FreeBlocks / 512.0),
avail: float64(stat.FreeBlocks / 512.0), // AIX doesn't distinguish between free and available blocks.
files: float64(stat.TotalInodes),
filesFree: float64(stat.FreeInodes),
ro: ro,
})
}
return stats, nil
}

View File

@ -20,8 +20,6 @@ package collector
import (
"errors"
"unsafe"
"github.com/go-kit/log/level"
)
/*
@ -51,14 +49,14 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
for i := 0; i < int(count); i++ {
mountpoint := C.GoString(&mnt[i].f_mntonname[0])
if c.excludedMountPointsPattern.MatchString(mountpoint) {
level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint)
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := C.GoString(&mnt[i].f_mntfromname[0])
fstype := C.GoString(&mnt[i].f_fstypename[0])
if c.excludedFSTypesPattern.MatchString(fstype) {
level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype)
c.logger.Debug("Ignoring fs type", "type", fstype)
continue
}

View File

@ -11,19 +11,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nofilesystem && (linux || freebsd || openbsd || darwin || dragonfly)
//go:build !nofilesystem && (linux || freebsd || netbsd || openbsd || darwin || dragonfly || aix)
// +build !nofilesystem
// +build linux freebsd openbsd darwin dragonfly
// +build linux freebsd netbsd openbsd darwin dragonfly aix
package collector
import (
"errors"
"log/slog"
"regexp"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
@ -69,11 +68,12 @@ type filesystemCollector struct {
sizeDesc, freeDesc, availDesc *prometheus.Desc
filesDesc, filesFreeDesc *prometheus.Desc
roDesc, deviceErrorDesc *prometheus.Desc
logger log.Logger
mountInfoDesc *prometheus.Desc
logger *slog.Logger
}
type filesystemLabels struct {
device, mountPoint, fsType, options, deviceError string
device, mountPoint, fsType, options, deviceError, major, minor string
}
type filesystemStats struct {
@ -88,10 +88,10 @@ func init() {
}
// NewFilesystemCollector returns a new Collector exposing filesystems stats.
func NewFilesystemCollector(logger log.Logger) (Collector, error) {
func NewFilesystemCollector(logger *slog.Logger) (Collector, error) {
if *oldMountPointsExcluded != "" {
if !mountPointsExcludeSet {
level.Warn(logger).Log("msg", "--collector.filesystem.ignored-mount-points is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.mount-points-exclude")
logger.Warn("--collector.filesystem.ignored-mount-points is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.mount-points-exclude")
*mountPointsExclude = *oldMountPointsExcluded
} else {
return nil, errors.New("--collector.filesystem.ignored-mount-points and --collector.filesystem.mount-points-exclude are mutually exclusive")
@ -100,7 +100,7 @@ func NewFilesystemCollector(logger log.Logger) (Collector, error) {
if *oldFSTypesExcluded != "" {
if !fsTypesExcludeSet {
level.Warn(logger).Log("msg", "--collector.filesystem.ignored-fs-types is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.fs-types-exclude")
logger.Warn("--collector.filesystem.ignored-fs-types is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.fs-types-exclude")
*fsTypesExclude = *oldFSTypesExcluded
} else {
return nil, errors.New("--collector.filesystem.ignored-fs-types and --collector.filesystem.fs-types-exclude are mutually exclusive")
@ -108,9 +108,9 @@ func NewFilesystemCollector(logger log.Logger) (Collector, error) {
}
subsystem := "filesystem"
level.Info(logger).Log("msg", "Parsed flag --collector.filesystem.mount-points-exclude", "flag", *mountPointsExclude)
logger.Info("Parsed flag --collector.filesystem.mount-points-exclude", "flag", *mountPointsExclude)
mountPointPattern := regexp.MustCompile(*mountPointsExclude)
level.Info(logger).Log("msg", "Parsed flag --collector.filesystem.fs-types-exclude", "flag", *fsTypesExclude)
logger.Info("Parsed flag --collector.filesystem.fs-types-exclude", "flag", *fsTypesExclude)
filesystemsTypesPattern := regexp.MustCompile(*fsTypesExclude)
sizeDesc := prometheus.NewDesc(
@ -155,6 +155,13 @@ func NewFilesystemCollector(logger log.Logger) (Collector, error) {
filesystemLabelNames, nil,
)
mountInfoDesc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "mount_info"),
"Filesystem mount information.",
[]string{"device", "major", "minor", "mountpoint"},
nil,
)
return &filesystemCollector{
excludedMountPointsPattern: mountPointPattern,
excludedFSTypesPattern: filesystemsTypesPattern,
@ -165,6 +172,7 @@ func NewFilesystemCollector(logger log.Logger) (Collector, error) {
filesFreeDesc: filesFreeDesc,
roDesc: roDesc,
deviceErrorDesc: deviceErrorDesc,
mountInfoDesc: mountInfoDesc,
logger: logger,
}, nil
}
@ -215,6 +223,10 @@ func (c *filesystemCollector) Update(ch chan<- prometheus.Metric) error {
c.filesFreeDesc, prometheus.GaugeValue,
s.filesFree, s.labels.device, s.labels.mountPoint, s.labels.fsType, s.labels.deviceError,
)
ch <- prometheus.MustNewConstMetric(
c.mountInfoDesc, prometheus.GaugeValue,
1.0, s.labels.device, s.labels.major, s.labels.minor, s.labels.mountPoint,
)
}
return nil
}

View File

@ -17,7 +17,6 @@
package collector
import (
"github.com/go-kit/log/level"
"golang.org/x/sys/unix"
)
@ -41,19 +40,19 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
for _, fs := range buf {
mountpoint := unix.ByteSliceToString(fs.Mntonname[:])
if c.excludedMountPointsPattern.MatchString(mountpoint) {
level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint)
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := unix.ByteSliceToString(fs.Mntfromname[:])
fstype := unix.ByteSliceToString(fs.Fstypename[:])
if c.excludedFSTypesPattern.MatchString(fstype) {
level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype)
c.logger.Debug("Ignoring fs type", "type", fstype)
continue
}
if (fs.Flags & unix.MNT_IGNORE) != 0 {
level.Debug(c.logger).Log("msg", "Ignoring mount flagged as ignore", "mountpoint", mountpoint)
c.logger.Debug("Ignoring mount flagged as ignore", "mountpoint", mountpoint)
continue
}

View File

@ -21,14 +21,13 @@ import (
"errors"
"fmt"
"io"
"log/slog"
"os"
"strings"
"sync"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"golang.org/x/sys/unix"
)
@ -75,11 +74,11 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
go func() {
for _, labels := range mps {
if c.excludedMountPointsPattern.MatchString(labels.mountPoint) {
level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", labels.mountPoint)
c.logger.Debug("Ignoring mount point", "mountpoint", labels.mountPoint)
continue
}
if c.excludedFSTypesPattern.MatchString(labels.fsType) {
level.Debug(c.logger).Log("msg", "Ignoring fs", "type", labels.fsType)
c.logger.Debug("Ignoring fs", "type", labels.fsType)
continue
}
@ -90,7 +89,7 @@ func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
labels: labels,
deviceError: 1,
})
level.Debug(c.logger).Log("msg", "Mount point is in an unresponsive state", "mountpoint", labels.mountPoint)
c.logger.Debug("Mount point is in an unresponsive state", "mountpoint", labels.mountPoint)
stuckMountsMtx.Unlock()
continue
}
@ -128,14 +127,14 @@ func (c *filesystemCollector) processStat(labels filesystemLabels) filesystemSta
// If the mount has been marked as stuck, unmark it and log it's recovery.
if _, ok := stuckMounts[labels.mountPoint]; ok {
level.Debug(c.logger).Log("msg", "Mount point has recovered, monitoring will resume", "mountpoint", labels.mountPoint)
c.logger.Debug("Mount point has recovered, monitoring will resume", "mountpoint", labels.mountPoint)
delete(stuckMounts, labels.mountPoint)
}
stuckMountsMtx.Unlock()
if err != nil {
labels.deviceError = err.Error()
level.Debug(c.logger).Log("msg", "Error on statfs() system call", "rootfs", rootfsFilePath(labels.mountPoint), "err", err)
c.logger.Debug("Error on statfs() system call", "rootfs", rootfsFilePath(labels.mountPoint), "err", err)
return filesystemStats{
labels: labels,
deviceError: 1,
@ -157,7 +156,7 @@ func (c *filesystemCollector) processStat(labels filesystemLabels) filesystemSta
// stuckMountWatcher listens on the given success channel and if the channel closes
// then the watcher does nothing. If instead the timeout is reached, the
// mount point that is being watched is marked as stuck.
func stuckMountWatcher(mountPoint string, success chan struct{}, logger log.Logger) {
func stuckMountWatcher(mountPoint string, success chan struct{}, logger *slog.Logger) {
mountCheckTimer := time.NewTimer(*mountTimeout)
defer mountCheckTimer.Stop()
select {
@ -170,19 +169,19 @@ func stuckMountWatcher(mountPoint string, success chan struct{}, logger log.Logg
case <-success:
// Success came in just after the timeout was reached, don't label the mount as stuck
default:
level.Debug(logger).Log("msg", "Mount point timed out, it is being labeled as stuck and will not be monitored", "mountpoint", mountPoint)
logger.Debug("Mount point timed out, it is being labeled as stuck and will not be monitored", "mountpoint", mountPoint)
stuckMounts[mountPoint] = struct{}{}
}
stuckMountsMtx.Unlock()
}
}
func mountPointDetails(logger log.Logger) ([]filesystemLabels, error) {
file, err := os.Open(procFilePath("1/mounts"))
func mountPointDetails(logger *slog.Logger) ([]filesystemLabels, error) {
file, err := os.Open(procFilePath("1/mountinfo"))
if errors.Is(err, os.ErrNotExist) {
// Fallback to `/proc/mounts` if `/proc/1/mounts` is missing due hidepid.
level.Debug(logger).Log("msg", "Reading root mounts failed, falling back to system mounts", "err", err)
file, err = os.Open(procFilePath("mounts"))
// Fallback to `/proc/self/mountinfo` if `/proc/1/mountinfo` is missing due hidepid.
logger.Debug("Reading root mounts failed, falling back to self mounts", "err", err)
file, err = os.Open(procFilePath("self/mountinfo"))
}
if err != nil {
return nil, err
@ -199,20 +198,33 @@ func parseFilesystemLabels(r io.Reader) ([]filesystemLabels, error) {
for scanner.Scan() {
parts := strings.Fields(scanner.Text())
if len(parts) < 4 {
if len(parts) < 10 {
return nil, fmt.Errorf("malformed mount point information: %q", scanner.Text())
}
major, minor := 0, 0
_, err := fmt.Sscanf(parts[2], "%d:%d", &major, &minor)
if err != nil {
return nil, fmt.Errorf("malformed mount point information: %q", scanner.Text())
}
m := 5
for parts[m+1] != "-" {
m++
}
// Ensure we handle the translation of \040 and \011
// as per fstab(5).
parts[1] = strings.Replace(parts[1], "\\040", " ", -1)
parts[1] = strings.Replace(parts[1], "\\011", "\t", -1)
parts[4] = strings.Replace(parts[4], "\\040", " ", -1)
parts[4] = strings.Replace(parts[4], "\\011", "\t", -1)
filesystems = append(filesystems, filesystemLabels{
device: parts[0],
mountPoint: rootfsStripPrefix(parts[1]),
fsType: parts[2],
options: parts[3],
device: parts[m+3],
mountPoint: rootfsStripPrefix(parts[4]),
fsType: parts[m+2],
options: parts[5],
major: fmt.Sprint(major),
minor: fmt.Sprint(minor),
deviceError: "",
})
}

View File

@ -17,11 +17,12 @@
package collector
import (
"io"
"log/slog"
"strings"
"testing"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
)
func Test_parseFilesystemLabelsError(t *testing.T) {
@ -82,15 +83,23 @@ func TestMountPointDetails(t *testing.T) {
"/var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore] bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk": "",
}
filesystems, err := mountPointDetails(log.NewNopLogger())
filesystems, err := mountPointDetails(slog.New(slog.NewTextHandler(io.Discard, nil)))
if err != nil {
t.Log(err)
}
foundSet := map[string]bool{}
for _, fs := range filesystems {
if _, ok := expected[fs.mountPoint]; !ok {
t.Errorf("Got unexpected %s", fs.mountPoint)
}
foundSet[fs.mountPoint] = true
}
for mountPoint := range expected {
if _, ok := foundSet[mountPoint]; !ok {
t.Errorf("Expected %s, got nothing", mountPoint)
}
}
}
@ -103,7 +112,7 @@ func TestMountsFallback(t *testing.T) {
"/": "",
}
filesystems, err := mountPointDetails(log.NewNopLogger())
filesystems, err := mountPointDetails(slog.New(slog.NewTextHandler(io.Discard, nil)))
if err != nil {
t.Log(err)
}
@ -131,7 +140,7 @@ func TestPathRootfs(t *testing.T) {
"/sys/fs/cgroup": "",
}
filesystems, err := mountPointDetails(log.NewNopLogger())
filesystems, err := mountPointDetails(slog.New(slog.NewTextHandler(io.Discard, nil)))
if err != nil {
t.Log(err)
}

View File

@ -0,0 +1,132 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nofilesystem
// +build !nofilesystem
package collector
import (
"fmt"
"syscall"
"unsafe"
"golang.org/x/sys/unix"
)
const (
defMountPointsExcluded = "^/(dev)($|/)"
defFSTypesExcluded = "^(kernfs|procfs|ptyfs|fdesc)$"
_VFS_NAMELEN = 32
_VFS_MNAMELEN = 1024
)
/*
* Go uses the NetBSD 9 ABI and thus syscall.SYS_GETVFSSTAT is compat_90_getvfsstat.
* We have to declare struct statvfs90 because it is not included in the unix package.
* See NetBSD/src/sys/compat/sys/statvfs.h.
*/
type statvfs90 struct {
F_flag uint
F_bsize uint
F_frsize uint
F_iosize uint
F_blocks uint64
F_bfree uint64
F_bavail uint64
F_bresvd uint64
F_files uint64
F_ffree uint64
F_favail uint64
F_fresvd uint64
F_syncreads uint64
F_syncwrites uint64
F_asyncreads uint64
F_asyncwrites uint64
F_fsidx [2]uint32
F_fsid uint32
F_namemax uint
F_owner uint32
F_spare [4]uint32
F_fstypename [_VFS_NAMELEN]byte
F_mntonname [_VFS_MNAMELEN]byte
F_mntfromname [_VFS_MNAMELEN]byte
cgo_pad [4]byte
}
func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
var mnt []statvfs90
if syscall.SYS_GETVFSSTAT != 356 /* compat_90_getvfsstat */ {
/*
* Catch if golang ever updates to newer ABI and bail.
*/
return nil, fmt.Errorf("getvfsstat: ABI mismatch")
}
for {
r1, _, errno := syscall.Syscall(syscall.SYS_GETVFSSTAT, uintptr(0), 0, unix.ST_NOWAIT)
if errno != 0 {
return nil, fmt.Errorf("getvfsstat: %s", string(errno))
}
mnt = make([]statvfs90, r1, r1)
r2, _, errno := syscall.Syscall(syscall.SYS_GETVFSSTAT, uintptr(unsafe.Pointer(&mnt[0])), unsafe.Sizeof(mnt[0])*r1, unix.ST_NOWAIT /* ST_NOWAIT */)
if errno != 0 {
return nil, fmt.Errorf("getvfsstat: %s", string(errno))
}
if r1 == r2 {
break
}
}
stats = []filesystemStats{}
for _, v := range mnt {
mountpoint := unix.ByteSliceToString(v.F_mntonname[:])
if c.excludedMountPointsPattern.MatchString(mountpoint) {
c.logger.Debug("msg", "Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := unix.ByteSliceToString(v.F_mntfromname[:])
fstype := unix.ByteSliceToString(v.F_fstypename[:])
if c.excludedFSTypesPattern.MatchString(fstype) {
c.logger.Debug("msg", "Ignoring fs type", "type", fstype)
continue
}
var ro float64
if (v.F_flag & unix.MNT_RDONLY) != 0 {
ro = 1
}
stats = append(stats, filesystemStats{
labels: filesystemLabels{
device: device,
mountPoint: mountpoint,
fsType: fstype,
},
size: float64(v.F_blocks) * float64(v.F_bsize),
free: float64(v.F_bfree) * float64(v.F_bsize),
avail: float64(v.F_bavail) * float64(v.F_bsize),
files: float64(v.F_files),
filesFree: float64(v.F_ffree),
ro: ro,
})
}
return stats, nil
}

View File

@ -17,7 +17,6 @@
package collector
import (
"github.com/go-kit/log/level"
"golang.org/x/sys/unix"
)
@ -43,14 +42,14 @@ func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
for _, v := range mnt {
mountpoint := unix.ByteSliceToString(v.F_mntonname[:])
if c.excludedMountPointsPattern.MatchString(mountpoint) {
level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint)
c.logger.Debug("Ignoring mount point", "mountpoint", mountpoint)
continue
}
device := unix.ByteSliceToString(v.F_mntfromname[:])
fstype := unix.ByteSliceToString(v.F_fstypename[:])
if c.excludedFSTypesPattern.MatchString(fstype) {
level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype)
c.logger.Debug("Ignoring fs type", "type", fstype)
continue
}

View File

@ -1,55 +1,59 @@
# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of frees.
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of allocated objects.
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_lookups_total Total number of pointer lookups.
# TYPE go_memstats_lookups_total counter
# HELP go_memstats_mallocs_total Total number of mallocs.
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system.
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_arp_entries ARP entries by device
@ -161,6 +165,14 @@ node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid1",uuid="0abb2
node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2
node_btrfs_allocation_ratio{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2
node_btrfs_allocation_ratio{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2
# HELP node_btrfs_commit_seconds_total Sum of the duration of all commits, in seconds.
# TYPE node_btrfs_commit_seconds_total counter
node_btrfs_commit_seconds_total{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 47836.09
node_btrfs_commit_seconds_total{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0
# HELP node_btrfs_commits_total The total number of commits that have occurred.
# TYPE node_btrfs_commits_total counter
node_btrfs_commits_total{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 258051
node_btrfs_commits_total{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0
# HELP node_btrfs_device_size_bytes Size of a device that is part of the filesystem.
# TYPE node_btrfs_device_size_bytes gauge
node_btrfs_device_size_bytes{device="loop22",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10
@ -177,6 +189,14 @@ node_btrfs_global_rsv_size_bytes{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.
# TYPE node_btrfs_info gauge
node_btrfs_info{label="",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1
node_btrfs_info{label="fixture",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1
# HELP node_btrfs_last_commit_seconds Duration of the most recent commit, in seconds.
# TYPE node_btrfs_last_commit_seconds gauge
node_btrfs_last_commit_seconds{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1
node_btrfs_last_commit_seconds{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0
# HELP node_btrfs_max_commit_seconds Duration of the slowest commit, in seconds.
# TYPE node_btrfs_max_commit_seconds gauge
node_btrfs_max_commit_seconds{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 51.462
node_btrfs_max_commit_seconds{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0
# HELP node_btrfs_reserved_bytes Amount of space reserved for a data type
# TYPE node_btrfs_reserved_bytes gauge
node_btrfs_reserved_bytes{block_group_type="data",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0
@ -742,48 +762,64 @@ node_entropy_available_bits 1337
node_entropy_pool_size_bits 4096
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_fibrechannel_dumped_frames_total Number of dumped frames
# TYPE node_fibrechannel_dumped_frames_total counter
node_fibrechannel_dumped_frames_total{fc_host="host1"} 0
# HELP node_fibrechannel_error_frames_total Number of errors in frames
# TYPE node_fibrechannel_error_frames_total counter
node_fibrechannel_error_frames_total{fc_host="host0"} 0
node_fibrechannel_error_frames_total{fc_host="host1"} 19
# HELP node_fibrechannel_fcp_packet_aborts_total Number of aborted packets
# TYPE node_fibrechannel_fcp_packet_aborts_total counter
node_fibrechannel_fcp_packet_aborts_total{fc_host="host0"} 19
# HELP node_fibrechannel_info Non-numeric data from /sys/class/fc_host/<host>, value is always 1.
# TYPE node_fibrechannel_info gauge
node_fibrechannel_info{dev_loss_tmo="",fabric_name="",fc_host="host1",port_id="",port_name="",port_state="",port_type="",speed="8 Gbit",supported_classes="",supported_speeds="",symbolic_name=""} 1
node_fibrechannel_info{dev_loss_tmo="30",fabric_name="0",fc_host="host0",port_id="000002",port_name="1000e0071bce95f2",port_state="Online",port_type="Point-To-Point (direct nport connection)",speed="16 Gbit",supported_classes="Class 3",supported_speeds="4 Gbit, 8 Gbit, 16 Gbit",symbolic_name="Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux"} 1
# HELP node_fibrechannel_invalid_crc_total Invalid Cyclic Redundancy Check count
# TYPE node_fibrechannel_invalid_crc_total counter
node_fibrechannel_invalid_crc_total{fc_host="host0"} 2
node_fibrechannel_invalid_crc_total{fc_host="host1"} 32
# HELP node_fibrechannel_invalid_tx_words_total Number of invalid words transmitted by host port
# TYPE node_fibrechannel_invalid_tx_words_total counter
node_fibrechannel_invalid_tx_words_total{fc_host="host0"} 8
node_fibrechannel_invalid_tx_words_total{fc_host="host1"} 128
# HELP node_fibrechannel_link_failure_total Number of times the host port link has failed
# TYPE node_fibrechannel_link_failure_total counter
node_fibrechannel_link_failure_total{fc_host="host0"} 9
node_fibrechannel_link_failure_total{fc_host="host1"} 144
# HELP node_fibrechannel_loss_of_signal_total Number of times signal has been lost
# TYPE node_fibrechannel_loss_of_signal_total counter
node_fibrechannel_loss_of_signal_total{fc_host="host0"} 17
node_fibrechannel_loss_of_signal_total{fc_host="host1"} 272
# HELP node_fibrechannel_loss_of_sync_total Number of failures on either bit or transmission word boundaries
# TYPE node_fibrechannel_loss_of_sync_total counter
node_fibrechannel_loss_of_sync_total{fc_host="host0"} 16
node_fibrechannel_loss_of_sync_total{fc_host="host1"} 256
# HELP node_fibrechannel_nos_total Number Not_Operational Primitive Sequence received by host port
# TYPE node_fibrechannel_nos_total counter
node_fibrechannel_nos_total{fc_host="host0"} 18
node_fibrechannel_nos_total{fc_host="host1"} 288
# HELP node_fibrechannel_rx_frames_total Number of frames received
# TYPE node_fibrechannel_rx_frames_total counter
node_fibrechannel_rx_frames_total{fc_host="host0"} 3
node_fibrechannel_rx_frames_total{fc_host="host1"} 48
# HELP node_fibrechannel_rx_words_total Number of words received by host port
# TYPE node_fibrechannel_rx_words_total counter
node_fibrechannel_rx_words_total{fc_host="host0"} 4
node_fibrechannel_rx_words_total{fc_host="host1"} 64
# HELP node_fibrechannel_seconds_since_last_reset_total Number of seconds since last host port reset
# TYPE node_fibrechannel_seconds_since_last_reset_total counter
node_fibrechannel_seconds_since_last_reset_total{fc_host="host0"} 7
node_fibrechannel_seconds_since_last_reset_total{fc_host="host1"} 112
# HELP node_fibrechannel_tx_frames_total Number of frames transmitted by host port
# TYPE node_fibrechannel_tx_frames_total counter
node_fibrechannel_tx_frames_total{fc_host="host0"} 5
node_fibrechannel_tx_frames_total{fc_host="host1"} 80
# HELP node_fibrechannel_tx_words_total Number of words transmitted by host port
# TYPE node_fibrechannel_tx_words_total counter
node_fibrechannel_tx_words_total{fc_host="host0"} 6
node_fibrechannel_tx_words_total{fc_host="host1"} 96
# HELP node_filefd_allocated File descriptor statistics: allocated.
# TYPE node_filefd_allocated gauge
node_filefd_allocated 1024
@ -2306,6 +2342,9 @@ node_netstat_TcpExt_SyncookiesSent 0
# HELP node_netstat_TcpExt_TCPOFOQueue Statistic TcpExtTCPOFOQueue.
# TYPE node_netstat_TcpExt_TCPOFOQueue untyped
node_netstat_TcpExt_TCPOFOQueue 42
# HELP node_netstat_TcpExt_TCPRcvQDrop Statistic TcpExtTCPRcvQDrop.
# TYPE node_netstat_TcpExt_TCPRcvQDrop untyped
node_netstat_TcpExt_TCPRcvQDrop 131
# HELP node_netstat_TcpExt_TCPTimeouts Statistic TcpExtTCPTimeouts.
# TYPE node_netstat_TcpExt_TCPTimeouts untyped
node_netstat_TcpExt_TCPTimeouts 115
@ -2814,6 +2853,9 @@ node_pressure_io_stalled_seconds_total 159.229614
# HELP node_pressure_io_waiting_seconds_total Total time in seconds that processes have waited due to IO congestion
# TYPE node_pressure_io_waiting_seconds_total counter
node_pressure_io_waiting_seconds_total 159.886802
# HELP node_pressure_irq_stalled_seconds_total Total time in seconds no process could make progress due to IRQ congestion
# TYPE node_pressure_irq_stalled_seconds_total counter
node_pressure_irq_stalled_seconds_total 0.008494
# HELP node_pressure_memory_stalled_seconds_total Total time in seconds no process could make progress due to memory congestion
# TYPE node_pressure_memory_stalled_seconds_total counter
node_pressure_memory_stalled_seconds_total 0
@ -3741,6 +3783,9 @@ node_zfs_arc_l2_writes_lock_retry 0
# HELP node_zfs_arc_l2_writes_sent kstat.zfs.misc.arcstats.l2_writes_sent
# TYPE node_zfs_arc_l2_writes_sent untyped
node_zfs_arc_l2_writes_sent 0
# HELP node_zfs_arc_memory_available_bytes kstat.zfs.misc.arcstats.memory_available_bytes
# TYPE node_zfs_arc_memory_available_bytes untyped
node_zfs_arc_memory_available_bytes -9.223372036854776e+17
# HELP node_zfs_arc_memory_direct_count kstat.zfs.misc.arcstats.memory_direct_count
# TYPE node_zfs_arc_memory_direct_count untyped
node_zfs_arc_memory_direct_count 542
@ -4548,6 +4593,10 @@ node_zoneinfo_spanned_pages{node="0",zone="Normal"} 7.806976e+06
# TYPE process_cpu_seconds_total counter
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
# HELP process_network_receive_bytes_total Number of bytes received by the process over the network.
# TYPE process_network_receive_bytes_total counter
# HELP process_network_transmit_bytes_total Number of bytes sent by the process over the network.
# TYPE process_network_transmit_bytes_total counter
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
# HELP process_resident_memory_bytes Resident memory size in bytes.

View File

@ -1,55 +1,59 @@
# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent
# TYPE go_gc_gogc_percent gauge
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes
# TYPE go_gc_gomemlimit_bytes gauge
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
# HELP go_memstats_frees_total Total number of frees.
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
# HELP go_memstats_heap_objects Number of allocated objects.
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
# HELP go_memstats_lookups_total Total number of pointer lookups.
# TYPE go_memstats_lookups_total counter
# HELP go_memstats_mallocs_total Total number of mallocs.
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
# HELP go_memstats_sys_bytes Number of bytes obtained from system.
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads
# TYPE go_sched_gomaxprocs_threads gauge
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
# HELP node_arp_entries ARP entries by device
@ -161,6 +165,14 @@ node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid1",uuid="0abb2
node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2
node_btrfs_allocation_ratio{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2
node_btrfs_allocation_ratio{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2
# HELP node_btrfs_commit_seconds_total Sum of the duration of all commits, in seconds.
# TYPE node_btrfs_commit_seconds_total counter
node_btrfs_commit_seconds_total{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 47836.09
node_btrfs_commit_seconds_total{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0
# HELP node_btrfs_commits_total The total number of commits that have occurred.
# TYPE node_btrfs_commits_total counter
node_btrfs_commits_total{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 258051
node_btrfs_commits_total{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0
# HELP node_btrfs_device_size_bytes Size of a device that is part of the filesystem.
# TYPE node_btrfs_device_size_bytes gauge
node_btrfs_device_size_bytes{device="loop22",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10
@ -177,6 +189,14 @@ node_btrfs_global_rsv_size_bytes{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.
# TYPE node_btrfs_info gauge
node_btrfs_info{label="",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1
node_btrfs_info{label="fixture",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1
# HELP node_btrfs_last_commit_seconds Duration of the most recent commit, in seconds.
# TYPE node_btrfs_last_commit_seconds gauge
node_btrfs_last_commit_seconds{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1
node_btrfs_last_commit_seconds{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0
# HELP node_btrfs_max_commit_seconds Duration of the slowest commit, in seconds.
# TYPE node_btrfs_max_commit_seconds gauge
node_btrfs_max_commit_seconds{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 51.462
node_btrfs_max_commit_seconds{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0
# HELP node_btrfs_reserved_bytes Amount of space reserved for a data type
# TYPE node_btrfs_reserved_bytes gauge
node_btrfs_reserved_bytes{block_group_type="data",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0
@ -764,48 +784,64 @@ node_entropy_available_bits 1337
node_entropy_pool_size_bits 4096
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build.
# TYPE node_exporter_build_info gauge
# HELP node_fibrechannel_dumped_frames_total Number of dumped frames
# TYPE node_fibrechannel_dumped_frames_total counter
node_fibrechannel_dumped_frames_total{fc_host="host1"} 0
# HELP node_fibrechannel_error_frames_total Number of errors in frames
# TYPE node_fibrechannel_error_frames_total counter
node_fibrechannel_error_frames_total{fc_host="host0"} 0
node_fibrechannel_error_frames_total{fc_host="host1"} 19
# HELP node_fibrechannel_fcp_packet_aborts_total Number of aborted packets
# TYPE node_fibrechannel_fcp_packet_aborts_total counter
node_fibrechannel_fcp_packet_aborts_total{fc_host="host0"} 19
# HELP node_fibrechannel_info Non-numeric data from /sys/class/fc_host/<host>, value is always 1.
# TYPE node_fibrechannel_info gauge
node_fibrechannel_info{dev_loss_tmo="",fabric_name="",fc_host="host1",port_id="",port_name="",port_state="",port_type="",speed="8 Gbit",supported_classes="",supported_speeds="",symbolic_name=""} 1
node_fibrechannel_info{dev_loss_tmo="30",fabric_name="0",fc_host="host0",port_id="000002",port_name="1000e0071bce95f2",port_state="Online",port_type="Point-To-Point (direct nport connection)",speed="16 Gbit",supported_classes="Class 3",supported_speeds="4 Gbit, 8 Gbit, 16 Gbit",symbolic_name="Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux"} 1
# HELP node_fibrechannel_invalid_crc_total Invalid Cyclic Redundancy Check count
# TYPE node_fibrechannel_invalid_crc_total counter
node_fibrechannel_invalid_crc_total{fc_host="host0"} 2
node_fibrechannel_invalid_crc_total{fc_host="host1"} 32
# HELP node_fibrechannel_invalid_tx_words_total Number of invalid words transmitted by host port
# TYPE node_fibrechannel_invalid_tx_words_total counter
node_fibrechannel_invalid_tx_words_total{fc_host="host0"} 8
node_fibrechannel_invalid_tx_words_total{fc_host="host1"} 128
# HELP node_fibrechannel_link_failure_total Number of times the host port link has failed
# TYPE node_fibrechannel_link_failure_total counter
node_fibrechannel_link_failure_total{fc_host="host0"} 9
node_fibrechannel_link_failure_total{fc_host="host1"} 144
# HELP node_fibrechannel_loss_of_signal_total Number of times signal has been lost
# TYPE node_fibrechannel_loss_of_signal_total counter
node_fibrechannel_loss_of_signal_total{fc_host="host0"} 17
node_fibrechannel_loss_of_signal_total{fc_host="host1"} 272
# HELP node_fibrechannel_loss_of_sync_total Number of failures on either bit or transmission word boundaries
# TYPE node_fibrechannel_loss_of_sync_total counter
node_fibrechannel_loss_of_sync_total{fc_host="host0"} 16
node_fibrechannel_loss_of_sync_total{fc_host="host1"} 256
# HELP node_fibrechannel_nos_total Number Not_Operational Primitive Sequence received by host port
# TYPE node_fibrechannel_nos_total counter
node_fibrechannel_nos_total{fc_host="host0"} 18
node_fibrechannel_nos_total{fc_host="host1"} 288
# HELP node_fibrechannel_rx_frames_total Number of frames received
# TYPE node_fibrechannel_rx_frames_total counter
node_fibrechannel_rx_frames_total{fc_host="host0"} 3
node_fibrechannel_rx_frames_total{fc_host="host1"} 48
# HELP node_fibrechannel_rx_words_total Number of words received by host port
# TYPE node_fibrechannel_rx_words_total counter
node_fibrechannel_rx_words_total{fc_host="host0"} 4
node_fibrechannel_rx_words_total{fc_host="host1"} 64
# HELP node_fibrechannel_seconds_since_last_reset_total Number of seconds since last host port reset
# TYPE node_fibrechannel_seconds_since_last_reset_total counter
node_fibrechannel_seconds_since_last_reset_total{fc_host="host0"} 7
node_fibrechannel_seconds_since_last_reset_total{fc_host="host1"} 112
# HELP node_fibrechannel_tx_frames_total Number of frames transmitted by host port
# TYPE node_fibrechannel_tx_frames_total counter
node_fibrechannel_tx_frames_total{fc_host="host0"} 5
node_fibrechannel_tx_frames_total{fc_host="host1"} 80
# HELP node_fibrechannel_tx_words_total Number of words transmitted by host port
# TYPE node_fibrechannel_tx_words_total counter
node_fibrechannel_tx_words_total{fc_host="host0"} 6
node_fibrechannel_tx_words_total{fc_host="host1"} 96
# HELP node_filefd_allocated File descriptor statistics: allocated.
# TYPE node_filefd_allocated gauge
node_filefd_allocated 1024
@ -2328,6 +2364,9 @@ node_netstat_TcpExt_SyncookiesSent 0
# HELP node_netstat_TcpExt_TCPOFOQueue Statistic TcpExtTCPOFOQueue.
# TYPE node_netstat_TcpExt_TCPOFOQueue untyped
node_netstat_TcpExt_TCPOFOQueue 42
# HELP node_netstat_TcpExt_TCPRcvQDrop Statistic TcpExtTCPRcvQDrop.
# TYPE node_netstat_TcpExt_TCPRcvQDrop untyped
node_netstat_TcpExt_TCPRcvQDrop 131
# HELP node_netstat_TcpExt_TCPTimeouts Statistic TcpExtTCPTimeouts.
# TYPE node_netstat_TcpExt_TCPTimeouts untyped
node_netstat_TcpExt_TCPTimeouts 115
@ -2836,6 +2875,9 @@ node_pressure_io_stalled_seconds_total 159.229614
# HELP node_pressure_io_waiting_seconds_total Total time in seconds that processes have waited due to IO congestion
# TYPE node_pressure_io_waiting_seconds_total counter
node_pressure_io_waiting_seconds_total 159.886802
# HELP node_pressure_irq_stalled_seconds_total Total time in seconds no process could make progress due to IRQ congestion
# TYPE node_pressure_irq_stalled_seconds_total counter
node_pressure_irq_stalled_seconds_total 0.008494
# HELP node_pressure_memory_stalled_seconds_total Total time in seconds no process could make progress due to memory congestion
# TYPE node_pressure_memory_stalled_seconds_total counter
node_pressure_memory_stalled_seconds_total 0
@ -3763,6 +3805,9 @@ node_zfs_arc_l2_writes_lock_retry 0
# HELP node_zfs_arc_l2_writes_sent kstat.zfs.misc.arcstats.l2_writes_sent
# TYPE node_zfs_arc_l2_writes_sent untyped
node_zfs_arc_l2_writes_sent 0
# HELP node_zfs_arc_memory_available_bytes kstat.zfs.misc.arcstats.memory_available_bytes
# TYPE node_zfs_arc_memory_available_bytes untyped
node_zfs_arc_memory_available_bytes -9.223372036854776e+17
# HELP node_zfs_arc_memory_direct_count kstat.zfs.misc.arcstats.memory_direct_count
# TYPE node_zfs_arc_memory_direct_count untyped
node_zfs_arc_memory_direct_count 542
@ -4570,6 +4615,10 @@ node_zoneinfo_spanned_pages{node="0",zone="Normal"} 7.806976e+06
# TYPE process_cpu_seconds_total counter
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
# HELP process_network_receive_bytes_total Number of bytes received by the process over the network.
# TYPE process_network_receive_bytes_total counter
# HELP process_network_transmit_bytes_total Number of bytes sent by the process over the network.
# TYPE process_network_transmit_bytes_total counter
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
# HELP process_resident_memory_bytes Resident memory size in bytes.

View File

@ -0,0 +1,31 @@
24 29 0:22 / /sys rw,nosuid,nodev,noexec,relatime shared:7 - sysfs sysfs rw
25 29 0:23 / /proc rw,nosuid,nodev,noexec,relatime shared:13 - proc proc rw
26 29 0:5 / /dev rw,nosuid,relatime shared:2 - devtmpfs udev rw,size=7978892k,nr_inodes=1994723,mode=755
27 26 0:24 / /dev/pts rw,nosuid,noexec,relatime shared:3 - devpts devpts rw,gid=5,mode=620,ptmxmode=000
28 29 0:25 / /run rw,nosuid,relatime shared:5 - tmpfs tmpfs rw,size=1617716k,mode=755
29 1 259:2 / / rw,relatime shared:1 - ext4 /dev/dm-2 errors=remount-ro,data=ordered
30 24 0:6 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:8 - securityfs securityfs rw
31 26 0:26 / /dev/shm rw,nosuid,nodev shared:4 - tmpfs tmpfs rw,inode64
32 28 0:27 / /run/lock rw,nosuid,nodev,noexec,relatime shared:6 - tmpfs tmpfs rw,size=5120k
33 24 0:28 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:9 - tmpfs tmpfs ro,mode=755
34 31 0:24 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd
35 32 0:25 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:11 - pstore pstore rw
36 33 0:26 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuset
37 34 0:27 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,cpu,cpuacct
38 35 0:28 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,devices
39 36 0:29 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,freezer
40 37 0:30 / /sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,net_cls,net_prio
41 38 0:31 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,blkio
42 39 0:32 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:20 - cgroup cgroup rw,perf_event
43 40 0:33 / /proc/sys/fs/binfmt_misc rw,relatime shared:21 - systemd-1 autofs rw,fd=22,pgrp=1,timeout=300,minproto=5,maxproto=5,direct
44 41 0:34 / /dev/mqueue rw,relatime shared:22 - mqueue mqueue rw
45 42 0:35 / /sys/kernel/debug rw,relatime shared:23 - debugfs debugfs rw
46 43 0:36 / /dev/hugepages rw,relatime shared:24 - hugetlbfs hugetlbfs rw
47 44 0:37 / /sys/fs/fuse/connections rw,relatime shared:25 - fusectl fusectl rw
48 45 260:3 / /boot rw,relatime shared:92 - ext2 /dev/sda3 rw
49 46 0:39 / /run/rpc_pipefs rw,relatime shared:27 - rpc_pipefs rpc_pipefs rw
265 37 0:41 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime shared:94 - binfmt_misc binfmt_misc rw
3002 28 0:79 / /run/user/1000 rw,nosuid,nodev,relatime shared:1225 - tmpfs tmpfs rw,size=1603436k,nr_inodes=400859,mode=700,uid=1000,gid=1000
3147 3002 0:81 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:1290 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000
3148 3003 260:0 / /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\040bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk rw,relatime shared:31 - ext4 /dev/sda rw,data=ordered
3149 3004 260:0 / /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\011bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk rw,relatime shared:32 - ext4 /dev/sda rw,data=ordered

View File

@ -1,32 +0,0 @@
rootfs / rootfs rw 0 0
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
udev /dev devtmpfs rw,relatime,size=10240k,nr_inodes=1008585,mode=755 0 0
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,nosuid,relatime,size=1617716k,mode=755 0 0
/dev/dm-2 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0
tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=22,pgrp=1,timeout=300,minproto=5,maxproto=5,direct 0 0
mqueue /dev/mqueue mqueue rw,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,relatime 0 0
fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0
/dev/sda3 /boot ext2 rw,relatime 0 0
rpc_pipefs /run/rpc_pipefs rpc_pipefs rw,relatime 0 0
binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,relatime 0 0
tmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=808860k,mode=700,uid=1000,gid=1000 0 0
gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
/dev/sda /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\040bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk ext4 rw,relatime,data=ordered 0 0
/dev/sda /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\011bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk ext4 rw,relatime,data=ordered 0 0

View File

@ -1,4 +1,4 @@
TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed EmbryonicRsts PruneCalled RcvPruned OfoPruned OutOfWindowIcmps LockDroppedIcmps ArpFilter TW TWRecycled TWKilled PAWSPassive PAWSActive PAWSEstab DelayedACKs DelayedACKLocked DelayedACKLost ListenOverflows ListenDrops TCPPrequeued TCPDirectCopyFromBacklog TCPDirectCopyFromPrequeue TCPPrequeueDropped TCPHPHits TCPHPHitsToUser TCPPureAcks TCPHPAcks TCPRenoRecovery TCPSackRecovery TCPSACKReneging TCPFACKReorder TCPSACKReorder TCPRenoReorder TCPTSReorder TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo TCPLoss TCPLostRetransmit TCPRenoFailures TCPSackFailures TCPLossFailures TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans TCPTimeouts TCPRenoRecoveryFail TCPSackRecoveryFail TCPSchedulerFailed TCPRcvCollapsed TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv TCPAbortOnData TCPAbortOnClose TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger TCPAbortFailed TCPMemoryPressures TCPSACKDiscard TCPDSACKIgnoredOld TCPDSACKIgnoredNoUndo TCPSpuriousRTOs TCPMD5NotFound TCPMD5Unexpected TCPSackShifted TCPSackMerged TCPSackShiftFallback TCPBacklogDrop TCPMinTTLDrop TCPDeferAcceptDrop IPReversePathFilter TCPTimeWaitOverflow TCPReqQFullDoCookies TCPReqQFullDrop TCPChallengeACK TCPSYNChallenge TCPOFOQueue
TcpExt: 0 0 2 0 0 0 0 0 0 0 388812 0 0 0 0 6 102471 17 9 0 0 80568 0 168808 0 4471289 26 1433940 3744565 0 1 0 0 0 0 0 0 0 0 48 0 0 0 1 0 1 0 1 115 0 0 0 0 9 0 5 0 41 4 0 0 0 0 0 0 0 1 0 0 0 0 2 5 0 0 0 0 0 0 0 2 2 42
TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed EmbryonicRsts PruneCalled RcvPruned OfoPruned OutOfWindowIcmps LockDroppedIcmps ArpFilter TW TWRecycled TWKilled PAWSPassive PAWSActive PAWSEstab DelayedACKs DelayedACKLocked DelayedACKLost ListenOverflows ListenDrops TCPPrequeued TCPDirectCopyFromBacklog TCPDirectCopyFromPrequeue TCPPrequeueDropped TCPHPHits TCPHPHitsToUser TCPPureAcks TCPHPAcks TCPRenoRecovery TCPSackRecovery TCPSACKReneging TCPFACKReorder TCPSACKReorder TCPRenoReorder TCPTSReorder TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo TCPLoss TCPLostRetransmit TCPRenoFailures TCPSackFailures TCPLossFailures TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans TCPTimeouts TCPRenoRecoveryFail TCPSackRecoveryFail TCPSchedulerFailed TCPRcvCollapsed TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv TCPAbortOnData TCPAbortOnClose TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger TCPAbortFailed TCPMemoryPressures TCPSACKDiscard TCPDSACKIgnoredOld TCPDSACKIgnoredNoUndo TCPSpuriousRTOs TCPMD5NotFound TCPMD5Unexpected TCPSackShifted TCPSackMerged TCPSackShiftFallback TCPBacklogDrop TCPMinTTLDrop TCPDeferAcceptDrop IPReversePathFilter TCPTimeWaitOverflow TCPReqQFullDoCookies TCPReqQFullDrop TCPChallengeACK TCPSYNChallenge TCPOFOQueue TCPRcvQDrop
TcpExt: 0 0 2 0 0 0 0 0 0 0 388812 0 0 0 0 6 102471 17 9 0 0 80568 0 168808 0 4471289 26 1433940 3744565 0 1 0 0 0 0 0 0 0 0 48 0 0 0 1 0 1 0 1 115 0 0 0 0 9 0 5 0 41 4 0 0 0 0 0 0 0 1 0 0 0 0 2 5 0 0 0 0 0 0 0 2 2 42 131
IpExt: InNoRoutes InTruncatedPkts InMcastPkts OutMcastPkts InBcastPkts OutBcastPkts InOctets OutOctets InMcastOctets OutMcastOctets InBcastOctets OutBcastOctets
IpExt: 0 0 0 0 0 0 6286396970 2786264347 0 0 0 0

View File

@ -1 +1,2 @@
some avg10=0.00 avg60=0.00 avg300=0.00 total=14036781
full avg10=0.00 avg60=0.00 avg300=0.00 total=0

View File

@ -0,0 +1 @@
full avg10=0.00 avg60=0.00 avg300=0.00 total=8494

View File

@ -1,93 +1,94 @@
6 1 0x01 91 4368 5266997922 97951858082072
name type data
hits 4 8772612
misses 4 604635
anon_evictable_data 4 0
anon_evictable_metadata 4 0
anon_size 4 1917440
arc_loaned_bytes 4 0
arc_meta_limit 4 6275982336
arc_meta_max 4 449286096
arc_meta_min 4 16777216
arc_meta_used 4 308103632
arc_need_free 4 0
arc_no_grow 4 0
arc_prune 4 0
arc_sys_free 4 261496832
arc_tempreserve 4 0
c 4 1643208777
c_max 4 8367976448
c_min 4 33554432
data_size 4 1295836160
deleted 4 60403
demand_data_hits 4 7221032
demand_data_misses 4 73300
demand_metadata_hits 4 1464353
demand_metadata_misses 4 498170
prefetch_data_hits 4 3615
prefetch_data_misses 4 17094
prefetch_metadata_hits 4 83612
prefetch_metadata_misses 4 16071
mru_hits 4 855535
mru_ghost_hits 4 21100
mfu_hits 4 7829854
mfu_ghost_hits 4 821
deleted 4 60403
mutex_miss 4 2
evict_skip 4 2265729
evict_not_enough 4 680
duplicate_buffers 4 0
duplicate_buffers_size 4 0
duplicate_reads 4 0
evict_l2_cached 4 0
evict_l2_eligible 4 8992514560
evict_l2_ineligible 4 992552448
evict_l2_skip 4 0
evict_not_enough 4 680
evict_skip 4 2265729
hash_chain_max 4 3
hash_chains 4 412
hash_collisions 4 50564
hash_elements 4 42359
hash_elements_max 4 88245
hash_collisions 4 50564
hash_chains 4 412
hash_chain_max 4 3
p 4 516395305
c 4 1643208777
c_min 4 33554432
c_max 4 8367976448
size 4 1603939792
hdr_size 4 16361080
data_size 4 1295836160
metadata_size 4 175298560
other_size 4 116443992
anon_size 4 1917440
anon_evictable_data 4 0
anon_evictable_metadata 4 0
mru_size 4 402593792
mru_evictable_data 4 278091264
mru_evictable_metadata 4 18606592
mru_ghost_size 4 999728128
mru_ghost_evictable_data 4 883765248
mru_ghost_evictable_metadata 4 115962880
mfu_size 4 1066623488
mfu_evictable_data 4 1017613824
mfu_evictable_metadata 4 9163776
mfu_ghost_size 4 104936448
mfu_ghost_evictable_data 4 96731136
mfu_ghost_evictable_metadata 4 8205312
l2_hits 4 0
l2_misses 4 0
hits 4 8772612
l2_abort_lowmem 4 0
l2_asize 4 0
l2_cdata_free_on_write 4 0
l2_cksum_bad 4 0
l2_compress_failures 4 0
l2_compress_successes 4 0
l2_compress_zeros 4 0
l2_evict_l1cached 4 0
l2_evict_lock_retry 4 0
l2_evict_reading 4 0
l2_feeds 4 0
l2_rw_clash 4 0
l2_free_on_write 4 0
l2_hdr_size 4 0
l2_hits 4 0
l2_io_error 4 0
l2_misses 4 0
l2_read_bytes 4 0
l2_rw_clash 4 0
l2_size 4 0
l2_write_bytes 4 0
l2_writes_sent 4 0
l2_writes_done 4 0
l2_writes_error 4 0
l2_writes_lock_retry 4 0
l2_evict_lock_retry 4 0
l2_evict_reading 4 0
l2_evict_l1cached 4 0
l2_free_on_write 4 0
l2_cdata_free_on_write 4 0
l2_abort_lowmem 4 0
l2_cksum_bad 4 0
l2_io_error 4 0
l2_size 4 0
l2_asize 4 0
l2_hdr_size 4 0
l2_compress_successes 4 0
l2_compress_zeros 4 0
l2_compress_failures 4 0
memory_throttle_count 4 0
duplicate_buffers 4 0
duplicate_buffers_size 4 0
duplicate_reads 4 0
l2_writes_sent 4 0
memory_available_bytes 3 -922337203685477580
memory_direct_count 4 542
memory_indirect_count 4 3006
arc_no_grow 4 0
arc_tempreserve 4 0
arc_loaned_bytes 4 0
arc_prune 4 0
arc_meta_used 4 308103632
arc_meta_limit 4 6275982336
arc_meta_max 4 449286096
arc_meta_min 4 16777216
arc_need_free 4 0
arc_sys_free 4 261496832
memory_throttle_count 4 0
metadata_size 4 175298560
mfu_evictable_data 4 1017613824
mfu_evictable_metadata 4 9163776
mfu_ghost_evictable_data 4 96731136
mfu_ghost_evictable_metadata 4 8205312
mfu_ghost_hits 4 821
mfu_ghost_size 4 104936448
mfu_hits 4 7829854
mfu_size 4 1066623488
misses 4 604635
mru_evictable_data 4 278091264
mru_evictable_metadata 4 18606592
mru_ghost_evictable_data 4 883765248
mru_ghost_evictable_metadata 4 115962880
mru_ghost_hits 4 21100
mru_ghost_size 4 999728128
mru_hits 4 855535
mru_size 4 402593792
mutex_miss 4 2
other_size 4 116443992
p 4 516395305
prefetch_data_hits 4 3615
prefetch_data_misses 4 17094
prefetch_metadata_hits 4 83612
prefetch_metadata_misses 4 16071
size 4 1603939792

View File

@ -288,6 +288,87 @@ Lines: 1
Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/class/fc_host/host1
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/speed
Lines: 1
8 Gbit
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/class/fc_host/host1/statistics
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/dumped_frames
Lines: 1
0x0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/error_frames
Lines: 1
0x13
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/fcp_packet_aborts
Lines: 1
0xffffffffffffffff
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/invalid_crc_count
Lines: 1
0x20
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/invalid_tx_word_count
Lines: 1
0x80
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/link_failure_count
Lines: 1
0x90
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/loss_of_signal_count
Lines: 1
0x110
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/loss_of_sync_count
Lines: 1
0x100
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/nos_count
Lines: 1
0x120
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/rx_frames
Lines: 1
0x30
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/rx_words
Lines: 1
0x40
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/seconds_since_last_reset
Lines: 1
0x70
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/tx_frames
Lines: 1
0x50
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/class/fc_host/host1/statistics/tx_words
Lines: 1
0x60
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/class/hwmon
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@ -4408,6 +4489,14 @@ Lines: 1
4096
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/commit_stats
Lines: 4
commits 258051
last_commit_ms 1000
max_commit_ms 51462
total_commit_ms 47836090EOF
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

View File

@ -8,4 +8,4 @@ node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_different_help
node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_different_help/b.prom"} 1
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
node_textfile_scrape_error 1

View File

@ -0,0 +1,6 @@
29 1 259:0 / /host rw,seclabel,relatime,data=ordered shared:1 - ext4 /dev/nvme1n0 rw
30 1 260:0 / /host/media/volume1 rw,seclabel,relatime,data=ordered shared:1 - ext4 /dev/nvme1n1 rw
31 1 261:0 / /host/media/volume2 rw,seclabel,relatime,data=ordered shared:1 - ext4 /dev/nvme1n2 rw
31 26 0:26 / /dev/shm rw,nosuid,nodev shared:4 - tmpfs tmpfs rw,inode64
32 28 0:27 / /run/lock rw,nosuid,nodev,noexec,relatime shared:6 - tmpfs tmpfs rw,size=5120k,inode64
33 24 0:28 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime shared:9 - cgroup2 cgroup2 rw

View File

@ -1,6 +0,0 @@
/dev/nvme1n0 /host ext4 rw,seclabel,relatime,data=ordered 0 0
/dev/nvme1n1 /host/media/volume1 ext4 rw,seclabel,relatime,data=ordered 0 0
/dev/nvme1n2 /host/media/volume2 ext4 rw,seclabel,relatime,data=ordered 0 0
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0
tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0

View File

@ -1 +0,0 @@
rootfs / rootfs rw 0 0

View File

@ -0,0 +1 @@
29 1 259:2 / / rw,relatime shared:1 - ext4 /dev/nvme0n1p2 rw,errors=remount-ro

View File

@ -18,6 +18,8 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"os"
"path/filepath"
"regexp"
@ -25,15 +27,15 @@ import (
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
)
var (
collectorHWmonChipInclude = kingpin.Flag("collector.hwmon.chip-include", "Regexp of hwmon chip to include (mutually exclusive to device-exclude).").String()
collectorHWmonChipExclude = kingpin.Flag("collector.hwmon.chip-exclude", "Regexp of hwmon chip to exclude (mutually exclusive to device-include).").String()
collectorHWmonChipInclude = kingpin.Flag("collector.hwmon.chip-include", "Regexp of hwmon chip to include (mutually exclusive to device-exclude).").String()
collectorHWmonChipExclude = kingpin.Flag("collector.hwmon.chip-exclude", "Regexp of hwmon chip to exclude (mutually exclusive to device-include).").String()
collectorHWmonSensorInclude = kingpin.Flag("collector.hwmon.sensor-include", "Regexp of hwmon sensor to include (mutually exclusive to sensor-exclude).").String()
collectorHWmonSensorExclude = kingpin.Flag("collector.hwmon.sensor-exclude", "Regexp of hwmon sensor to exclude (mutually exclusive to sensor-include).").String()
hwmonInvalidMetricChars = regexp.MustCompile("[^a-z0-9:_]")
hwmonFilenameFormat = regexp.MustCompile(`^(?P<type>[^0-9]+)(?P<id>[0-9]*)?(_(?P<property>.+))?$`)
@ -52,16 +54,18 @@ func init() {
type hwMonCollector struct {
deviceFilter deviceFilter
logger log.Logger
sensorFilter deviceFilter
logger *slog.Logger
}
// NewHwMonCollector returns a new Collector exposing /sys/class/hwmon stats
// (similar to lm-sensors).
func NewHwMonCollector(logger log.Logger) (Collector, error) {
func NewHwMonCollector(logger *slog.Logger) (Collector, error) {
return &hwMonCollector{
logger: logger,
deviceFilter: newDeviceFilter(*collectorHWmonChipExclude, *collectorHWmonChipInclude),
sensorFilter: newDeviceFilter(*collectorHWmonSensorExclude, *collectorHWmonSensorInclude),
}, nil
}
@ -104,6 +108,9 @@ func sysReadFile(file string) ([]byte, error) {
if err != nil {
return nil, err
}
if n < 0 {
return nil, fmt.Errorf("failed to read file: %q, read returned negative bytes value: %d", file, n)
}
return b[:n], nil
}
@ -164,7 +171,7 @@ func (c *hwMonCollector) updateHwmon(ch chan<- prometheus.Metric, dir string) er
}
if c.deviceFilter.ignored(hwmonName) {
level.Debug(c.logger).Log("msg", "ignoring hwmon chip", "chip", hwmonName)
c.logger.Debug("ignoring hwmon chip", "chip", hwmonName)
return nil
}
@ -202,6 +209,15 @@ func (c *hwMonCollector) updateHwmon(ch chan<- prometheus.Metric, dir string) er
// Format all sensors.
for sensor, sensorData := range data {
// Filtering for sensors is done on concatenated device name and sensor name
// separated by a semicolon. This allows for excluding or including of specific
// sensors on specific devices. For example, to exclude the sensor "temp3" on
// the device "platform_coretemp_0", use "platform_coretemp_0;temp3"
if c.sensorFilter.ignored(hwmonName + ";" + sensor) {
c.logger.Debug("ignoring sensor", "sensor", sensor)
continue
}
_, sensorType, _, _ := explodeSensorFilename(sensor)
labels := []string{hwmonName, sensor}
@ -437,7 +453,7 @@ func (c *hwMonCollector) Update(ch chan<- prometheus.Metric) error {
hwmonFiles, err := os.ReadDir(hwmonPathName)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
level.Debug(c.logger).Log("msg", "hwmon collector metrics are not available for this system")
c.logger.Debug("hwmon collector metrics are not available for this system")
return ErrNoData
}

View File

@ -19,11 +19,10 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"os"
"strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs/sysfs"
)
@ -31,7 +30,7 @@ import (
type infinibandCollector struct {
fs sysfs.FS
metricDescs map[string]*prometheus.Desc
logger log.Logger
logger *slog.Logger
subsystem string
}
@ -40,7 +39,7 @@ func init() {
}
// NewInfiniBandCollector returns a new Collector exposing InfiniBand stats.
func NewInfiniBandCollector(logger log.Logger) (Collector, error) {
func NewInfiniBandCollector(logger *slog.Logger) (Collector, error) {
var i infinibandCollector
var err error
@ -148,7 +147,7 @@ func (c *infinibandCollector) Update(ch chan<- prometheus.Metric) error {
devices, err := c.fs.InfiniBandClass()
if err != nil {
if errors.Is(err, os.ErrNotExist) {
level.Debug(c.logger).Log("msg", "infiniband statistics not found, skipping")
c.logger.Debug("infiniband statistics not found, skipping")
return ErrNoData
}
return fmt.Errorf("error obtaining InfiniBand class info: %w", err)

View File

@ -18,27 +18,38 @@
package collector
import (
"github.com/go-kit/log"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus/client_golang/prometheus"
"log/slog"
)
type interruptsCollector struct {
desc typedDesc
logger log.Logger
desc typedDesc
logger *slog.Logger
nameFilter deviceFilter
includeZeros bool
}
func init() {
registerCollector("interrupts", defaultDisabled, NewInterruptsCollector)
}
var (
interruptsInclude = kingpin.Flag("collector.interrupts.name-include", "Regexp of interrupts name to include (mutually exclusive to --collector.interrupts.name-exclude).").String()
interruptsExclude = kingpin.Flag("collector.interrupts.name-exclude", "Regexp of interrupts name to exclude (mutually exclusive to --collector.interrupts.name-include).").String()
interruptsIncludeZeros = kingpin.Flag("collector.interrupts.include-zeros", "Include interrupts that have a zero value").Default("true").Bool()
)
// NewInterruptsCollector returns a new Collector exposing interrupts stats.
func NewInterruptsCollector(logger log.Logger) (Collector, error) {
func NewInterruptsCollector(logger *slog.Logger) (Collector, error) {
return &interruptsCollector{
desc: typedDesc{prometheus.NewDesc(
namespace+"_interrupts_total",
"Interrupt details.",
interruptLabelNames, nil,
), prometheus.CounterValue},
logger: logger,
logger: logger,
nameFilter: newDeviceFilter(*interruptsExclude, *interruptsInclude),
includeZeros: *interruptsIncludeZeros,
}, nil
}

View File

@ -39,10 +39,19 @@ func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) (err error) {
}
for name, interrupt := range interrupts {
for cpuNo, value := range interrupt.values {
filterName := name + ";" + interrupt.info + ";" + interrupt.devices
if c.nameFilter.ignored(filterName) {
c.logger.Debug("ignoring interrupt name", "filter_name", filterName)
continue
}
fv, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmt.Errorf("invalid value %s in interrupts: %w", value, err)
}
if !c.includeZeros && fv == 0.0 {
c.logger.Debug("ignoring interrupt with zero value", "filter_name", filterName, "cpu", cpuNo)
continue
}
ch <- c.desc.mustNewConstMetric(fv, strconv.Itoa(cpuNo), name, interrupt.info, interrupt.devices)
}
}

View File

@ -106,10 +106,20 @@ func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) error {
}
for dev, interrupt := range interrupts {
for cpuNo, value := range interrupt.values {
interruptType := fmt.Sprintf("%d", interrupt.vector)
filterName := interruptType + ";" + dev
if c.nameFilter.ignored(filterName) {
c.logger.Debug("ignoring interrupt name", "filter_name", filterName)
continue
}
if !c.includeZeros && value == 0.0 {
c.logger.Debug("ignoring interrupt with zero value", "filter_name", filterName, "cpu", cpuNo)
continue
}
ch <- c.desc.mustNewConstMetric(
value,
strconv.Itoa(cpuNo),
strconv.Itoa(interrupt.vector),
interruptType,
dev,
)
}

View File

@ -77,10 +77,20 @@ func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) error {
}
for dev, interrupt := range interrupts {
for cpuNo, value := range interrupt.values {
interruptType := fmt.Sprintf("%d", interrupt.vector)
filterName := interruptType + ";" + dev
if c.nameFilter.ignored(filterName) {
c.logger.Debug("ignoring interrupt name", "filter_name", filterName)
continue
}
if !c.includeZeros && value == 0.0 {
c.logger.Debug("ignoring interrupt with zero value", "filter_name", filterName, "cpu", cpuNo)
continue
}
ch <- c.desc.mustNewConstMetric(
value,
strconv.Itoa(cpuNo),
fmt.Sprintf("%d", interrupt.vector),
interruptType,
dev,
)
}

View File

@ -19,14 +19,13 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"os"
"sort"
"strconv"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
)
@ -37,7 +36,7 @@ type ipvsCollector struct {
backendLabels []string
backendConnectionsActive, backendConnectionsInact, backendWeight typedDesc
connections, incomingPackets, outgoingPackets, incomingBytes, outgoingBytes typedDesc
logger log.Logger
logger *slog.Logger
}
type ipvsBackendStatus struct {
@ -73,11 +72,11 @@ func init() {
// NewIPVSCollector sets up a new collector for IPVS metrics. It accepts the
// "procfs" config parameter to override the default proc location (/proc).
func NewIPVSCollector(logger log.Logger) (Collector, error) {
func NewIPVSCollector(logger *slog.Logger) (Collector, error) {
return newIPVSCollector(logger)
}
func newIPVSCollector(logger log.Logger) (*ipvsCollector, error) {
func newIPVSCollector(logger *slog.Logger) (*ipvsCollector, error) {
var (
c ipvsCollector
err error
@ -143,7 +142,7 @@ func (c *ipvsCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil {
// Cannot access ipvs metrics, report no error.
if errors.Is(err, os.ErrNotExist) {
level.Debug(c.logger).Log("msg", "ipvs collector metrics are not available for this system")
c.logger.Debug("ipvs collector metrics are not available for this system")
return ErrNoData
}
return fmt.Errorf("could not get IPVS stats: %w", err)

View File

@ -19,14 +19,14 @@ package collector
import (
"errors"
"fmt"
"io"
"log/slog"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"github.com/go-kit/log"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
@ -114,7 +114,7 @@ func TestIPVSCollector(t *testing.T) {
if _, err := kingpin.CommandLine.Parse(args); err != nil {
t.Fatal(err)
}
collector, err := newIPVSCollector(log.NewNopLogger())
collector, err := newIPVSCollector(slog.New(slog.NewTextHandler(io.Discard, nil)))
if err != nil {
if test.err == nil {
t.Fatal(err)
@ -182,7 +182,7 @@ func TestIPVSCollectorResponse(t *testing.T) {
if _, err := kingpin.CommandLine.Parse(args); err != nil {
t.Fatal(err)
}
collector, err := NewIPVSCollector(log.NewNopLogger())
collector, err := NewIPVSCollector(slog.New(slog.NewTextHandler(io.Discard, nil)))
if err != nil {
t.Fatal(err)
}

View File

@ -18,9 +18,9 @@ package collector
import (
"fmt"
"log/slog"
"path/filepath"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -31,7 +31,7 @@ var (
type ksmdCollector struct {
metricDescs map[string]*prometheus.Desc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -50,7 +50,7 @@ func getCanonicalMetricName(filename string) string {
}
// NewKsmdCollector returns a new Collector exposing kernel/system statistics.
func NewKsmdCollector(logger log.Logger) (Collector, error) {
func NewKsmdCollector(logger *slog.Logger) (Collector, error) {
subsystem := "ksmd"
descs := make(map[string]*prometheus.Desc)

View File

@ -18,22 +18,22 @@ package collector
import (
"fmt"
"log/slog"
"strconv"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
)
type lnstatCollector struct {
logger log.Logger
logger *slog.Logger
}
func init() {
registerCollector("lnstat", defaultDisabled, NewLnstatCollector)
}
func NewLnstatCollector(logger log.Logger) (Collector, error) {
func NewLnstatCollector(logger *slog.Logger) (Collector, error) {
return &lnstatCollector{logger}, nil
}

View File

@ -11,23 +11,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !noloadavg
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || aix) && !noloadavg
// +build darwin dragonfly freebsd linux netbsd openbsd solaris aix
// +build !noloadavg
package collector
import (
"fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
type loadavgCollector struct {
metric []typedDesc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -35,7 +34,7 @@ func init() {
}
// NewLoadavgCollector returns a new Collector exposing load average stats.
func NewLoadavgCollector(logger log.Logger) (Collector, error) {
func NewLoadavgCollector(logger *slog.Logger) (Collector, error) {
return &loadavgCollector{
metric: []typedDesc{
{prometheus.NewDesc(namespace+"_load1", "1m load average.", nil, nil), prometheus.GaugeValue},
@ -52,7 +51,7 @@ func (c *loadavgCollector) Update(ch chan<- prometheus.Metric) error {
return fmt.Errorf("couldn't get load: %w", err)
}
for i, load := range loads {
level.Debug(c.logger).Log("msg", "return load", "index", i, "load", load)
c.logger.Debug("return load", "index", i, "load", load)
ch <- c.metric[i].mustNewConstMetric(load)
}
return err

30
collector/loadavg_aix.go Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !noloadavg
// +build !noloadavg
package collector
import (
"github.com/power-devops/perfstat"
)
func getLoad() ([]float64, error) {
stat, err := perfstat.CpuTotalStat()
if err != nil {
return nil, err
}
return []float64{float64(stat.LoadAvg1), float64(stat.LoadAvg5), float64(stat.LoadAvg15)}, nil
}

View File

@ -18,10 +18,10 @@ package collector
import (
"fmt"
"log/slog"
"os"
"strconv"
"github.com/go-kit/log"
"github.com/godbus/dbus/v5"
"github.com/prometheus/client_golang/prometheus"
)
@ -46,7 +46,7 @@ var (
)
type logindCollector struct {
logger log.Logger
logger *slog.Logger
}
type logindDbus struct {
@ -86,7 +86,7 @@ func init() {
}
// NewLogindCollector returns a new Collector exposing logind statistics.
func NewLogindCollector(logger log.Logger) (Collector, error) {
func NewLogindCollector(logger *slog.Logger) (Collector, error) {
return &logindCollector{logger}, nil
}

View File

@ -19,16 +19,15 @@ package collector
import (
"errors"
"fmt"
"log/slog"
"os"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
)
type mdadmCollector struct {
logger log.Logger
logger *slog.Logger
}
func init() {
@ -36,7 +35,7 @@ func init() {
}
// NewMdadmCollector returns a new Collector exposing raid statistics.
func NewMdadmCollector(logger log.Logger) (Collector, error) {
func NewMdadmCollector(logger *slog.Logger) (Collector, error) {
return &mdadmCollector{logger}, nil
}
@ -112,7 +111,7 @@ func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil {
if errors.Is(err, os.ErrNotExist) {
level.Debug(c.logger).Log("msg", "Not collecting mdstat, file does not exist", "file", *procPath)
c.logger.Debug("Not collecting mdstat, file does not exist", "file", *procPath)
return ErrNoData
}
@ -120,7 +119,7 @@ func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error {
}
for _, mdStat := range mdStats {
level.Debug(c.logger).Log("msg", "collecting metrics for device", "device", mdStat.Name)
c.logger.Debug("collecting metrics for device", "device", mdStat.Name)
stateVals := make(map[string]float64)
stateVals[mdStat.ActivityState] = 1

View File

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build (darwin || linux || openbsd || netbsd) && !nomeminfo
// +build darwin linux openbsd netbsd
//go:build (darwin || linux || openbsd || netbsd || aix) && !nomeminfo
// +build darwin linux openbsd netbsd aix
// +build !nomeminfo
package collector
@ -21,8 +21,6 @@ import (
"fmt"
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
)
@ -30,19 +28,10 @@ const (
memInfoSubsystem = "memory"
)
type meminfoCollector struct {
logger log.Logger
}
func init() {
registerCollector("meminfo", defaultEnabled, NewMeminfoCollector)
}
// NewMeminfoCollector returns a new Collector exposing memory stats.
func NewMeminfoCollector(logger log.Logger) (Collector, error) {
return &meminfoCollector{logger}, nil
}
// Update calls (*meminfoCollector).getMemInfo to get the platform specific
// memory metrics.
func (c *meminfoCollector) Update(ch chan<- prometheus.Metric) error {
@ -51,7 +40,7 @@ func (c *meminfoCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil {
return fmt.Errorf("couldn't get meminfo: %w", err)
}
level.Debug(c.logger).Log("msg", "Set node_mem", "memInfo", memInfo)
c.logger.Debug("Set node_mem", "memInfo", fmt.Sprintf("%v", memInfo))
for k, v := range memInfo {
if strings.HasSuffix(k, "_total") {
metricType = prometheus.CounterValue

47
collector/meminfo_aix.go Normal file
View File

@ -0,0 +1,47 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !nomeminfo
// +build !nomeminfo
package collector
import (
"log/slog"
"github.com/power-devops/perfstat"
)
type meminfoCollector struct {
logger *slog.Logger
}
// NewMeminfoCollector returns a new Collector exposing memory stats.
func NewMeminfoCollector(logger *slog.Logger) (Collector, error) {
return &meminfoCollector{
logger: logger,
}, nil
}
func (c *meminfoCollector) getMemInfo() (map[string]float64, error) {
stats, err := perfstat.MemoryTotalStat()
if err != nil {
return nil, err
}
return map[string]float64{
"total_bytes": float64(stats.RealTotal * 4096),
"free_bytes": float64(stats.RealFree * 4096),
"available_bytes": float64(stats.RealAvailable * 4096),
}, nil
}

View File

@ -24,11 +24,23 @@ import "C"
import (
"encoding/binary"
"fmt"
"log/slog"
"unsafe"
"golang.org/x/sys/unix"
)
type meminfoCollector struct {
logger *slog.Logger
}
// NewMeminfoCollector returns a new Collector exposing memory stats.
func NewMeminfoCollector(logger *slog.Logger) (Collector, error) {
return &meminfoCollector{
logger: logger,
}, nil
}
func (c *meminfoCollector) getMemInfo() (map[string]float64, error) {
host := C.mach_host_self()
infoCount := C.mach_msg_type_number_t(C.HOST_VM_INFO64_COUNT)

View File

@ -17,59 +17,189 @@
package collector
import (
"bufio"
"fmt"
"io"
"os"
"regexp"
"strconv"
"strings"
"log/slog"
"github.com/prometheus/procfs"
)
var (
reParens = regexp.MustCompile(`\((.*)\)`)
)
type meminfoCollector struct {
fs procfs.FS
logger *slog.Logger
}
// NewMeminfoCollector returns a new Collector exposing memory stats.
func NewMeminfoCollector(logger *slog.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
}
return &meminfoCollector{
logger: logger,
fs: fs,
}, nil
}
func (c *meminfoCollector) getMemInfo() (map[string]float64, error) {
file, err := os.Open(procFilePath("meminfo"))
meminfo, err := c.fs.Meminfo()
if err != nil {
return nil, err
}
defer file.Close()
return parseMemInfo(file)
}
func parseMemInfo(r io.Reader) (map[string]float64, error) {
var (
memInfo = map[string]float64{}
scanner = bufio.NewScanner(r)
)
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(line)
// Workaround for empty lines occasionally occur in CentOS 6.2 kernel 3.10.90.
if len(parts) == 0 {
continue
}
fv, err := strconv.ParseFloat(parts[1], 64)
if err != nil {
return nil, fmt.Errorf("invalid value in meminfo: %w", err)
}
key := parts[0][:len(parts[0])-1] // remove trailing : from key
// Active(anon) -> Active_anon
key = reParens.ReplaceAllString(key, "_${1}")
switch len(parts) {
case 2: // no unit
case 3: // has unit, we presume kB
fv *= 1024
key = key + "_bytes"
default:
return nil, fmt.Errorf("invalid line in meminfo: %s", line)
}
memInfo[key] = fv
return nil, fmt.Errorf("Failed to get memory info: %s", err)
}
return memInfo, scanner.Err()
metrics := make(map[string]float64)
if meminfo.ActiveBytes != nil {
metrics["Active_bytes"] = float64(*meminfo.ActiveBytes)
}
if meminfo.ActiveAnonBytes != nil {
metrics["Active_anon_bytes"] = float64(*meminfo.ActiveAnonBytes)
}
if meminfo.ActiveFileBytes != nil {
metrics["Active_file_bytes"] = float64(*meminfo.ActiveFileBytes)
}
if meminfo.AnonHugePagesBytes != nil {
metrics["AnonHugePages_bytes"] = float64(*meminfo.AnonHugePagesBytes)
}
if meminfo.AnonPagesBytes != nil {
metrics["AnonPages_bytes"] = float64(*meminfo.AnonPagesBytes)
}
if meminfo.BounceBytes != nil {
metrics["Bounce_bytes"] = float64(*meminfo.BounceBytes)
}
if meminfo.BuffersBytes != nil {
metrics["Buffers_bytes"] = float64(*meminfo.BuffersBytes)
}
if meminfo.CachedBytes != nil {
metrics["Cached_bytes"] = float64(*meminfo.CachedBytes)
}
if meminfo.CmaFreeBytes != nil {
metrics["CmaFree_bytes"] = float64(*meminfo.CmaFreeBytes)
}
if meminfo.CmaTotalBytes != nil {
metrics["CmaTotal_bytes"] = float64(*meminfo.CmaTotalBytes)
}
if meminfo.CommitLimitBytes != nil {
metrics["CommitLimit_bytes"] = float64(*meminfo.CommitLimitBytes)
}
if meminfo.CommittedASBytes != nil {
metrics["Committed_AS_bytes"] = float64(*meminfo.CommittedASBytes)
}
if meminfo.DirectMap1GBytes != nil {
metrics["DirectMap1G_bytes"] = float64(*meminfo.DirectMap1GBytes)
}
if meminfo.DirectMap2MBytes != nil {
metrics["DirectMap2M_bytes"] = float64(*meminfo.DirectMap2MBytes)
}
if meminfo.DirectMap4kBytes != nil {
metrics["DirectMap4k_bytes"] = float64(*meminfo.DirectMap4kBytes)
}
if meminfo.DirtyBytes != nil {
metrics["Dirty_bytes"] = float64(*meminfo.DirtyBytes)
}
if meminfo.HardwareCorruptedBytes != nil {
metrics["HardwareCorrupted_bytes"] = float64(*meminfo.HardwareCorruptedBytes)
}
if meminfo.HugepagesizeBytes != nil {
metrics["Hugepagesize_bytes"] = float64(*meminfo.HugepagesizeBytes)
}
if meminfo.InactiveBytes != nil {
metrics["Inactive_bytes"] = float64(*meminfo.InactiveBytes)
}
if meminfo.InactiveAnonBytes != nil {
metrics["Inactive_anon_bytes"] = float64(*meminfo.InactiveAnonBytes)
}
if meminfo.InactiveFileBytes != nil {
metrics["Inactive_file_bytes"] = float64(*meminfo.InactiveFileBytes)
}
if meminfo.KernelStackBytes != nil {
metrics["KernelStack_bytes"] = float64(*meminfo.KernelStackBytes)
}
if meminfo.MappedBytes != nil {
metrics["Mapped_bytes"] = float64(*meminfo.MappedBytes)
}
if meminfo.MemAvailableBytes != nil {
metrics["MemAvailable_bytes"] = float64(*meminfo.MemAvailableBytes)
}
if meminfo.MemFreeBytes != nil {
metrics["MemFree_bytes"] = float64(*meminfo.MemFreeBytes)
}
if meminfo.MemTotalBytes != nil {
metrics["MemTotal_bytes"] = float64(*meminfo.MemTotalBytes)
}
if meminfo.MlockedBytes != nil {
metrics["Mlocked_bytes"] = float64(*meminfo.MlockedBytes)
}
if meminfo.NFSUnstableBytes != nil {
metrics["NFS_Unstable_bytes"] = float64(*meminfo.NFSUnstableBytes)
}
if meminfo.PageTablesBytes != nil {
metrics["PageTables_bytes"] = float64(*meminfo.PageTablesBytes)
}
if meminfo.PercpuBytes != nil {
metrics["Percpu_bytes"] = float64(*meminfo.PercpuBytes)
}
if meminfo.SReclaimableBytes != nil {
metrics["SReclaimable_bytes"] = float64(*meminfo.SReclaimableBytes)
}
if meminfo.SUnreclaimBytes != nil {
metrics["SUnreclaim_bytes"] = float64(*meminfo.SUnreclaimBytes)
}
if meminfo.ShmemBytes != nil {
metrics["Shmem_bytes"] = float64(*meminfo.ShmemBytes)
}
if meminfo.ShmemHugePagesBytes != nil {
metrics["ShmemHugePages_bytes"] = float64(*meminfo.ShmemHugePagesBytes)
}
if meminfo.ShmemPmdMappedBytes != nil {
metrics["ShmemPmdMapped_bytes"] = float64(*meminfo.ShmemPmdMappedBytes)
}
if meminfo.SlabBytes != nil {
metrics["Slab_bytes"] = float64(*meminfo.SlabBytes)
}
if meminfo.SwapCachedBytes != nil {
metrics["SwapCached_bytes"] = float64(*meminfo.SwapCachedBytes)
}
if meminfo.SwapFreeBytes != nil {
metrics["SwapFree_bytes"] = float64(*meminfo.SwapFreeBytes)
}
if meminfo.SwapTotalBytes != nil {
metrics["SwapTotal_bytes"] = float64(*meminfo.SwapTotalBytes)
}
if meminfo.UnevictableBytes != nil {
metrics["Unevictable_bytes"] = float64(*meminfo.UnevictableBytes)
}
if meminfo.VmallocChunkBytes != nil {
metrics["VmallocChunk_bytes"] = float64(*meminfo.VmallocChunkBytes)
}
if meminfo.VmallocTotalBytes != nil {
metrics["VmallocTotal_bytes"] = float64(*meminfo.VmallocTotalBytes)
}
if meminfo.VmallocUsedBytes != nil {
metrics["VmallocUsed_bytes"] = float64(*meminfo.VmallocUsedBytes)
}
if meminfo.WritebackBytes != nil {
metrics["Writeback_bytes"] = float64(*meminfo.WritebackBytes)
}
if meminfo.WritebackTmpBytes != nil {
metrics["WritebackTmp_bytes"] = float64(*meminfo.WritebackTmpBytes)
}
// These fields are always in bytes and do not have `Bytes`
// suffixed counterparts in the procfs.Meminfo struct, nor do
// they have `_bytes` suffix on the metric names.
if meminfo.HugePagesFree != nil {
metrics["HugePages_Free"] = float64(*meminfo.HugePagesFree)
}
if meminfo.HugePagesRsvd != nil {
metrics["HugePages_Rsvd"] = float64(*meminfo.HugePagesRsvd)
}
if meminfo.HugePagesSurp != nil {
metrics["HugePages_Surp"] = float64(*meminfo.HugePagesSurp)
}
if meminfo.HugePagesTotal != nil {
metrics["HugePages_Total"] = float64(*meminfo.HugePagesTotal)
}
return metrics, nil
}

View File

@ -17,20 +17,23 @@
package collector
import (
"os"
"io"
"log/slog"
"testing"
)
func TestMemInfo(t *testing.T) {
file, err := os.Open("fixtures/proc/meminfo")
if err != nil {
t.Fatal(err)
}
defer file.Close()
*procPath = "fixtures/proc"
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
memInfo, err := parseMemInfo(file)
collector, err := NewMeminfoCollector(logger)
if err != nil {
t.Fatal(err)
panic(err)
}
memInfo, err := collector.(*meminfoCollector).getMemInfo()
if err != nil {
panic(err)
}
if want, got := 3831959552.0, memInfo["MemTotal_bytes"]; want != got {

View File

@ -18,8 +18,20 @@ package collector
import (
"golang.org/x/sys/unix"
"log/slog"
)
type meminfoCollector struct {
logger *slog.Logger
}
// NewMeminfoCollector returns a new Collector exposing memory stats.
func NewMeminfoCollector(logger *slog.Logger) (Collector, error) {
return &meminfoCollector{
logger: logger,
}, nil
}
func (c *meminfoCollector) getMemInfo() (map[string]float64, error) {
uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2")
if err != nil {

View File

@ -20,13 +20,13 @@ import (
"bufio"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
)
@ -45,7 +45,7 @@ type meminfoMetric struct {
type meminfoNumaCollector struct {
metricDescs map[string]*prometheus.Desc
logger log.Logger
logger *slog.Logger
}
func init() {
@ -53,7 +53,7 @@ func init() {
}
// NewMeminfoNumaCollector returns a new Collector exposing memory stats.
func NewMeminfoNumaCollector(logger log.Logger) (Collector, error) {
func NewMeminfoNumaCollector(logger *slog.Logger) (Collector, error) {
return &meminfoNumaCollector{
metricDescs: map[string]*prometheus.Desc{},
logger: logger,

View File

@ -18,6 +18,7 @@ package collector
import (
"fmt"
"log/slog"
)
/*
@ -53,6 +54,17 @@ sysctl_bcstats(struct bcachestats *bcstats)
*/
import "C"
type meminfoCollector struct {
logger *slog.Logger
}
// NewMeminfoCollector returns a new Collector exposing memory stats.
func NewMeminfoCollector(logger *slog.Logger) (Collector, error) {
return &meminfoCollector{
logger: logger,
}, nil
}
func (c *meminfoCollector) getMemInfo() (map[string]float64, error) {
var uvmexp C.struct_uvmexp
var bcstats C.struct_bcachestats

View File

@ -17,8 +17,10 @@
package collector
import (
"golang.org/x/sys/unix"
"log/slog"
"unsafe"
"golang.org/x/sys/unix"
)
const (
@ -48,6 +50,17 @@ type bcachestats struct {
Dmaflips int64
}
type meminfoCollector struct {
logger *slog.Logger
}
// NewMeminfoCollector returns a new Collector exposing memory stats.
func NewMeminfoCollector(logger *slog.Logger) (Collector, error) {
return &meminfoCollector{
logger: logger,
}, nil
}
func (c *meminfoCollector) getMemInfo() (map[string]float64, error) {
uvmexpb, err := unix.SysctlRaw("vm.uvmexp")
if err != nil {

View File

@ -19,8 +19,8 @@ package collector
import (
"fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/unix"
)
@ -33,7 +33,7 @@ type memoryCollector struct {
pageSize uint64
sysctls []bsdSysctl
kvm kvm
logger log.Logger
logger *slog.Logger
}
func init() {
@ -41,7 +41,7 @@ func init() {
}
// NewMemoryCollector returns a new Collector exposing memory stats.
func NewMemoryCollector(logger log.Logger) (Collector, error) {
func NewMemoryCollector(logger *slog.Logger) (Collector, error) {
tmp32, err := unix.SysctlUint32("vm.stats.vm.v_page_size")
if err != nil {
return nil, fmt.Errorf("sysctl(vm.stats.vm.v_page_size) failed: %w", err)

View File

@ -18,9 +18,8 @@ package collector
import (
"fmt"
"log/slog"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
)
@ -96,7 +95,7 @@ type mountStatsCollector struct {
proc procfs.Proc
logger log.Logger
logger *slog.Logger
}
// used to uniquely identify an NFS mount to prevent duplicates
@ -111,7 +110,7 @@ func init() {
}
// NewMountStatsCollector returns a new Collector exposing NFS statistics.
func NewMountStatsCollector(logger log.Logger) (Collector, error) {
func NewMountStatsCollector(logger *slog.Logger) (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %w", err)
@ -538,15 +537,16 @@ func (c *mountStatsCollector) Update(ch chan<- prometheus.Metric) error {
mountAddress = miStats.SuperOptions["addr"]
}
deviceIdentifier := nfsDeviceIdentifier{m.Device, stats.Transport.Protocol, mountAddress}
i := deviceList[deviceIdentifier]
if i {
level.Debug(c.logger).Log("msg", "Skipping duplicate device entry", "device", deviceIdentifier)
continue
for k := range stats.Transport {
deviceIdentifier := nfsDeviceIdentifier{m.Device, stats.Transport[k].Protocol, mountAddress}
i := deviceList[deviceIdentifier]
if i {
c.logger.Debug("Skipping duplicate device entry", "device", deviceIdentifier)
break
}
deviceList[deviceIdentifier] = true
c.updateNFSStats(ch, stats, m.Device, stats.Transport[k].Protocol, mountAddress)
}
deviceList[deviceIdentifier] = true
c.updateNFSStats(ch, stats, m.Device, stats.Transport.Protocol, mountAddress)
}
return nil
@ -617,75 +617,77 @@ func (c *mountStatsCollector) updateNFSStats(ch chan<- prometheus.Metric, s *pro
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportBindTotal,
prometheus.CounterValue,
float64(s.Transport.Bind),
labelValues...,
)
for i := range s.Transport {
ch <- prometheus.MustNewConstMetric(
c.NFSTransportBindTotal,
prometheus.CounterValue,
float64(s.Transport[i].Bind),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportConnectTotal,
prometheus.CounterValue,
float64(s.Transport.Connect),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportConnectTotal,
prometheus.CounterValue,
float64(s.Transport[i].Connect),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportIdleTimeSeconds,
prometheus.GaugeValue,
float64(s.Transport.IdleTimeSeconds%float64Mantissa),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportIdleTimeSeconds,
prometheus.GaugeValue,
float64(s.Transport[i].IdleTimeSeconds%float64Mantissa),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportSendsTotal,
prometheus.CounterValue,
float64(s.Transport.Sends),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportSendsTotal,
prometheus.CounterValue,
float64(s.Transport[i].Sends),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportReceivesTotal,
prometheus.CounterValue,
float64(s.Transport.Receives),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportReceivesTotal,
prometheus.CounterValue,
float64(s.Transport[i].Receives),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportBadTransactionIDsTotal,
prometheus.CounterValue,
float64(s.Transport.BadTransactionIDs),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportBadTransactionIDsTotal,
prometheus.CounterValue,
float64(s.Transport[i].BadTransactionIDs),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportBacklogQueueTotal,
prometheus.CounterValue,
float64(s.Transport.CumulativeBacklog),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportBacklogQueueTotal,
prometheus.CounterValue,
float64(s.Transport[i].CumulativeBacklog),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportMaximumRPCSlots,
prometheus.GaugeValue,
float64(s.Transport.MaximumRPCSlotsUsed),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportMaximumRPCSlots,
prometheus.GaugeValue,
float64(s.Transport[i].MaximumRPCSlotsUsed),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportSendingQueueTotal,
prometheus.CounterValue,
float64(s.Transport.CumulativeSendingQueue),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportSendingQueueTotal,
prometheus.CounterValue,
float64(s.Transport[i].CumulativeSendingQueue),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportPendingQueueTotal,
prometheus.CounterValue,
float64(s.Transport.CumulativePendingQueue),
labelValues...,
)
ch <- prometheus.MustNewConstMetric(
c.NFSTransportPendingQueueTotal,
prometheus.CounterValue,
float64(s.Transport[i].CumulativePendingQueue),
labelValues...,
)
}
for _, op := range s.Operations {
opLabelValues := []string{export, protocol, mountAddress, op.Operation}

Some files were not shown because too many files have changed in this diff Show More