Browse Source

Refactor node_exporter to support collectors.

A collector is a type matching 'Collector' interface.

The following collectors where added:
- NativeCollector wrapping the original functionality (attributes, load)
- GmondCollector scraping ganglia's gmond (based on gmond_exporter)
- MuninCollector scraping munin (based on munin_exporter)
pull/1/head
Johannes 'fish' Ziemke 12 years ago
parent
commit
588ef8b62a
  1. 177
      exporter/exporter.go
  2. 61
      exporter/ganglia/format.go
  3. 103
      exporter/gmond_collector.go
  4. 26
      exporter/helper.go
  5. 233
      exporter/munin_collector.go
  6. 154
      exporter/native_collector.go
  7. 22
      main.go
  8. 6
      node_exporter.conf
  9. 224
      node_exporter.go

177
exporter/exporter.go

@ -0,0 +1,177 @@
// Exporter is a prometheus exporter using multiple collectors to collect and export system metrics.
package exporter
import (
"encoding/json"
"flag"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/exp"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"runtime/pprof"
"sync"
"syscall"
"time"
)
var verbose = flag.Bool("verbose", false, "Verbose output.")
// Interface a collector has to implement.
type Collector interface {
// Get new metrics and expose them via prometheus registry.
Update() (n int, err error)
// Returns the name of the collector
Name() string
}
type config struct {
Attributes map[string]string `json:"attributes"`
ListeningAddress string `json:"listeningAddress"`
ScrapeInterval int `json:"scrapeInterval"`
Collectors []string `json:"collectors"`
}
func (e *exporter) loadConfig() (err error) {
log.Printf("Reading config %s", e.configFile)
bytes, err := ioutil.ReadFile(e.configFile)
if err != nil {
return
}
return json.Unmarshal(bytes, &e.config) // Make sure this is safe
}
type exporter struct {
configFile string
listeningAddress string
scrapeInterval time.Duration
scrapeDurations prometheus.Histogram
metricsUpdated prometheus.Gauge
config config
registry prometheus.Registry
collectors []Collector
MemProfile string
}
// New takes the path to a config file and returns an exporter instance
func New(configFile string) (e exporter, err error) {
registry := prometheus.NewRegistry()
e = exporter{
configFile: configFile,
scrapeDurations: prometheus.NewDefaultHistogram(),
metricsUpdated: prometheus.NewGauge(),
listeningAddress: ":8080",
scrapeInterval: 60 * time.Second,
registry: registry,
}
err = e.loadConfig()
if err != nil {
return e, fmt.Errorf("Couldn't read config: %s", err)
}
cn, err := NewNativeCollector(e.config, e.registry)
if err != nil {
log.Fatalf("Couldn't attach collector: %s", err)
}
cg, err := NewGmondCollector(e.config, e.registry)
if err != nil {
log.Fatalf("Couldn't attach collector: %s", err)
}
cm, err := NewMuninCollector(e.config, e.registry)
if err != nil {
log.Fatalf("Couldn't attach collector: %s", err)
}
e.collectors = []Collector{&cn, &cg, &cm}
if e.config.ListeningAddress != "" {
e.listeningAddress = e.config.ListeningAddress
}
if e.config.ScrapeInterval != 0 {
e.scrapeInterval = time.Duration(e.config.ScrapeInterval) * time.Second
}
registry.Register("node_exporter_scrape_duration_seconds", "node_exporter: Duration of a scrape job.", prometheus.NilLabels, e.scrapeDurations)
registry.Register("node_exporter_metrics_updated", "node_exporter: Number of metrics updated.", prometheus.NilLabels, e.metricsUpdated)
return e, nil
}
func (e *exporter) serveStatus() {
exp.Handle(prometheus.ExpositionResource, e.registry.Handler())
http.ListenAndServe(e.listeningAddress, exp.DefaultCoarseMux)
}
func (e *exporter) Execute(c Collector) {
begin := time.Now()
updates, err := c.Update()
duration := time.Since(begin)
label := map[string]string{
"collector": c.Name(),
}
if err != nil {
log.Printf("ERROR: %s failed after %fs: %s", c.Name(), duration.Seconds(), err)
label["result"] = "error"
} else {
log.Printf("OK: %s success after %fs.", c.Name(), duration.Seconds())
label["result"] = "success"
}
e.scrapeDurations.Add(label, duration.Seconds())
e.metricsUpdated.Set(label, float64(updates))
}
func (e *exporter) Loop() {
sigHup := make(chan os.Signal)
sigUsr1 := make(chan os.Signal)
signal.Notify(sigHup, syscall.SIGHUP)
signal.Notify(sigUsr1, syscall.SIGUSR1)
go e.serveStatus()
tick := time.Tick(e.scrapeInterval)
for {
select {
case <-sigHup:
err := e.loadConfig()
if err != nil {
log.Printf("Couldn't reload config: %s", err)
continue
}
log.Printf("Got new config")
tick = time.Tick(e.scrapeInterval)
case <-tick:
log.Printf("Starting new scrape interval")
wg := sync.WaitGroup{}
wg.Add(len(e.collectors))
for _, c := range e.collectors {
go func(c Collector) {
e.Execute(c)
wg.Done()
}(c)
}
wg.Wait()
case <-sigUsr1:
log.Printf("got signal")
if e.MemProfile != "" {
log.Printf("Writing memory profile to %s", e.MemProfile)
f, err := os.Create(e.MemProfile)
if err != nil {
log.Fatal(err)
}
pprof.WriteHeapProfile(f)
f.Close()
}
}
}
}

61
exporter/ganglia/format.go

@ -0,0 +1,61 @@
// Types for unmarshalling gmond's XML output.
//
// Not used elements in gmond's XML output are commented.
// In case you want to use them, please change the names so that one
// can understand without needing to know what the acronym stands for.
package ganglia
import "encoding/xml"
type ExtraElement struct {
Name string `xml:"NAME,attr"`
Val string `xml:"VAL,attr"`
}
type ExtraData struct {
ExtraElements []ExtraElement `xml:"EXTRA_ELEMENT"`
}
type Metric struct {
Name string `xml:"NAME,attr"`
Value float64 `xml:"VAL,attr"`
/*
Unit string `xml:"UNITS,attr"`
Slope string `xml:"SLOPE,attr"`
Tn int `xml:"TN,attr"`
Tmax int `xml:"TMAX,attr"`
Dmax int `xml:"DMAX,attr"`
*/
ExtraData ExtraData `xml:"EXTRA_DATA"`
}
type Host struct {
Name string `xml:"NAME,attr"`
/*
Ip string `xml:"IP,attr"`
Tags string `xml:"TAGS,attr"`
Reported int `xml:"REPORTED,attr"`
Tn int `xml:"TN,attr"`
Tmax int `xml:"TMAX,attr"`
Dmax int `xml:"DMAX,attr"`
Location string `xml:"LOCATION,attr"`
GmondStarted int `xml:"GMOND_STARTED",attr"`
*/
Metrics []Metric `xml:"METRIC"`
}
type Cluster struct {
Name string `xml:"NAME,attr"`
/*
Owner string `xml:"OWNER,attr"`
LatLong string `xml:"LATLONG,attr"`
Url string `xml:"URL,attr"`
Localtime int `xml:"LOCALTIME,attr"`
*/
Hosts []Host `xml:"HOST"`
}
type Ganglia struct {
XMLNAME xml.Name `xml:"GANGLIA_XML"`
Clusters []Cluster `xml:"CLUSTER"`
}

103
exporter/gmond_collector.go

@ -0,0 +1,103 @@
package exporter
import (
"bufio"
"encoding/xml"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/node_exporter/exporter/ganglia"
"io"
"net"
"time"
"strings"
)
const (
gangliaAddress = "127.0.0.1:8649"
gangliaProto = "tcp"
gangliaTimeout = 30 * time.Second
)
type gmondCollector struct {
name string
Metrics map[string]prometheus.Gauge
config config
registry prometheus.Registry
}
// Takes a config struct and prometheus registry and returns a new Collector scraping ganglia.
func NewGmondCollector(config config, registry prometheus.Registry) (collector gmondCollector, err error) {
collector = gmondCollector{
name: "gmond_collector",
config: config,
Metrics: make(map[string]prometheus.Gauge),
registry: registry,
}
return collector, nil
}
func (c *gmondCollector) Name() string { return c.name }
func (c *gmondCollector) setMetric(name string, labels map[string]string, metric ganglia.Metric) {
if _, ok := c.Metrics[name]; !ok {
var desc string
var title string
for _, element := range metric.ExtraData.ExtraElements {
switch element.Name {
case "DESC":
desc = element.Val
case "TITLE":
title = element.Val
}
if title != "" && desc != "" {
break
}
}
debug(c.Name(), "Register %s: %s", name, desc)
gauge := prometheus.NewGauge()
c.Metrics[name] = gauge
c.registry.Register(name, desc, prometheus.NilLabels, gauge) // one gauge per metric!
}
debug(c.Name(), "Set %s{%s}: %f", name, labels, metric.Value)
c.Metrics[name].Set(labels, metric.Value)
}
func (c *gmondCollector) Update() (updates int, err error) {
conn, err := net.Dial(gangliaProto, gangliaAddress)
debug(c.Name(), "gmondCollector Update")
if err != nil {
return updates, fmt.Errorf("Can't connect to gmond: %s", err)
}
conn.SetDeadline(time.Now().Add(gangliaTimeout))
ganglia := ganglia.Ganglia{}
decoder := xml.NewDecoder(bufio.NewReader(conn))
decoder.CharsetReader = toUtf8
err = decoder.Decode(&ganglia)
if err != nil {
return updates, fmt.Errorf("Couldn't parse xml: %s", err)
}
for _, cluster := range ganglia.Clusters {
for _, host := range cluster.Hosts {
for _, metric := range host.Metrics {
name := strings.ToLower(metric.Name)
var labels = map[string]string{
"hostname": host.Name,
"cluster": cluster.Name,
}
c.setMetric(name, labels, metric)
updates++
}
}
}
return updates, err
}
func toUtf8(charset string, input io.Reader) (io.Reader, error) {
return input, nil //FIXME
}

26
exporter/helper.go

@ -0,0 +1,26 @@
package exporter
import (
"fmt"
"log"
"strconv"
"strings"
)
func debug(name string, format string, a ...interface{}) {
if *verbose {
f := fmt.Sprintf("%s: %s", name, format)
log.Printf(f, a...)
}
}
func splitToInts(str string, sep string) (ints []int, err error) {
for _, part := range strings.Split(str, sep) {
i, err := strconv.Atoi(part)
if err != nil {
return nil, fmt.Errorf("Could not split '%s' because %s is no int: %s", str, part, err)
}
ints = append(ints, i)
}
return ints, nil
}

233
exporter/munin_collector.go

@ -0,0 +1,233 @@
package exporter
import (
"bufio"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"io"
"net"
"regexp"
"strconv"
"strings"
)
const (
muninAddress = "127.0.0.1:4949"
muninProto = "tcp"
)
var muninBanner = regexp.MustCompile(`# munin node at (.*)`)
type muninCollector struct {
name string
hostname string
graphs []string
gaugePerMetric map[string]prometheus.Gauge
config config
registry prometheus.Registry
connection net.Conn
}
// Takes a config struct and prometheus registry and returns a new Collector scraping munin.
func NewMuninCollector(config config, registry prometheus.Registry) (c muninCollector, err error) {
c = muninCollector{
name: "munin_collector",
config: config,
registry: registry,
gaugePerMetric: make(map[string]prometheus.Gauge),
}
return c, err
}
func (c *muninCollector) Name() string { return c.name }
func (c *muninCollector) connect() (err error) {
c.connection, err = net.Dial(muninProto, muninAddress)
if err != nil {
return err
}
debug(c.Name(), "Connected.")
reader := bufio.NewReader(c.connection)
head, err := reader.ReadString('\n')
if err != nil {
return err
}
matches := muninBanner.FindStringSubmatch(head)
if len(matches) != 2 { // expect: # munin node at <hostname>
return fmt.Errorf("Unexpected line: %s", head)
}
c.hostname = matches[1]
debug(c.Name(), "Found hostname: %s", c.hostname)
return err
}
func (c *muninCollector) muninCommand(cmd string) (reader *bufio.Reader, err error) {
if c.connection == nil {
err := c.connect()
if err != nil {
return reader, fmt.Errorf("Couldn't connect to munin: %s", err)
}
}
reader = bufio.NewReader(c.connection)
fmt.Fprintf(c.connection, cmd+"\n")
_, err = reader.Peek(1)
switch err {
case io.EOF:
debug(c.Name(), "not connected anymore, closing connection and reconnect.")
c.connection.Close()
err = c.connect()
if err != nil {
return reader, fmt.Errorf("Couldn't connect to %s: %s", muninAddress)
}
return c.muninCommand(cmd)
case nil: //no error
break
default:
return reader, fmt.Errorf("Unexpected error: %s", err)
}
return reader, err
}
func (c *muninCollector) muninList() (items []string, err error) {
munin, err := c.muninCommand("list")
if err != nil {
return items, fmt.Errorf("Couldn't get list: %s", err)
}
response, err := munin.ReadString('\n') // we are only interested in the first line
if err != nil {
return items, fmt.Errorf("Couldn't read response: %s", err)
}
if response[0] == '#' { // # not expected here
return items, fmt.Errorf("Error getting items: %s", response)
}
items = strings.Fields(strings.TrimRight(response, "\n"))
return items, err
}
func (c *muninCollector) muninConfig(name string) (config map[string]map[string]string, graphConfig map[string]string, err error) {
graphConfig = make(map[string]string)
config = make(map[string]map[string]string)
resp, err := c.muninCommand("config " + name)
if err != nil {
return config, graphConfig, fmt.Errorf("Couldn't get config for %s: %s", name, err)
}
for {
line, err := resp.ReadString('\n')
if err == io.EOF {
debug(c.Name(), "EOF, retrying")
return c.muninConfig(name)
}
if err != nil {
return nil, nil, err
}
if line == ".\n" { // munin end marker
break
}
if line[0] == '#' { // here it's just a comment, so ignore it
continue
}
parts := strings.Fields(line)
if len(parts) < 2 {
return nil, nil, fmt.Errorf("Line unexpected: %s", line)
}
key, value := parts[0], strings.TrimRight(strings.Join(parts[1:], " "), "\n")
key_parts := strings.Split(key, ".")
if len(key_parts) > 1 { // it's a metric config (metric.label etc)
if _, ok := config[key_parts[0]]; !ok {
config[key_parts[0]] = make(map[string]string)
}
config[key_parts[0]][key_parts[1]] = value
} else {
graphConfig[key_parts[0]] = value
}
}
return config, graphConfig, err
}
func (c *muninCollector) registerMetrics() (err error) {
items, err := c.muninList()
if err != nil {
return fmt.Errorf("Couldn't get graph list: %s", err)
}
for _, name := range items {
c.graphs = append(c.graphs, name)
configs, graphConfig, err := c.muninConfig(name)
if err != nil {
return fmt.Errorf("Couldn't get config for graph %s: %s", name, err)
}
for metric, config := range configs {
metricName := name + "-" + metric
desc := graphConfig["graph_title"] + ": " + config["label"]
if config["info"] != "" {
desc = desc + ", " + config["info"]
}
gauge := prometheus.NewGauge()
debug(c.Name(), "Register %s: %s", metricName, desc)
c.gaugePerMetric[metricName] = gauge
c.registry.Register(metricName, desc, prometheus.NilLabels, gauge)
}
}
return err
}
func (c *muninCollector) Update() (updates int, err error) {
err = c.registerMetrics()
if err != nil {
return updates, fmt.Errorf("Couldn't register metrics: %s", err)
}
for _, graph := range c.graphs {
munin, err := c.muninCommand("fetch " + graph)
if err != nil {
return updates, err
}
for {
line, err := munin.ReadString('\n')
line = strings.TrimRight(line, "\n")
if err == io.EOF {
debug(c.Name(), "unexpected EOF, retrying")
return c.Update()
}
if err != nil {
return updates, err
}
if len(line) == 1 && line[0] == '.' {
break // end of list
}
parts := strings.Fields(line)
if len(parts) != 2 {
debug(c.Name(), "unexpected line: %s", line)
continue
}
key, value_s := strings.Split(parts[0], ".")[0], parts[1]
value, err := strconv.ParseFloat(value_s, 64)
if err != nil {
debug(c.Name(), "Couldn't parse value in line %s, malformed?", line)
continue
}
labels := map[string]string{
"hostname": c.hostname,
}
name := graph + "-" + key
debug(c.Name(), "Set %s{%s}: %f\n", name, labels, value)
c.gaugePerMetric[name].Set(labels, value)
updates++
}
}
return updates, err
}

154
exporter/native_collector.go

@ -0,0 +1,154 @@
package exporter
import (
"bufio"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"io"
"io/ioutil"
"os"
"os/exec"
"strconv"
"strings"
"time"
)
const (
procLoad = "/proc/loadavg"
)
type nativeCollector struct {
loadAvg prometheus.Gauge
attributes prometheus.Gauge
lastSeen prometheus.Gauge
name string
config config
}
// Takes a config struct and prometheus registry and returns a new Collector exposing
// load, seconds since last login and a list of tags as specified by config.
func NewNativeCollector(config config, registry prometheus.Registry) (collector nativeCollector, err error) {
hostname, err := os.Hostname()
if err != nil {
return nativeCollector{}, fmt.Errorf("Couldn't get hostname: %s", err)
}
collector = nativeCollector{
name: "native_collector",
config: config,
loadAvg: prometheus.NewGauge(),
attributes: prometheus.NewGauge(),
lastSeen: prometheus.NewGauge(),
}
registry.Register(
"node_load",
"node_exporter: system load.",
map[string]string{"hostname": hostname},
collector.loadAvg,
)
registry.Register(
"node_last_login_seconds",
"node_exporter: seconds since last login.",
map[string]string{"hostname": hostname},
collector.lastSeen,
)
registry.Register(
"node_attributes",
"node_exporter: system attributes.",
map[string]string{"hostname": hostname},
collector.attributes,
)
return collector, nil
}
func (c *nativeCollector) Name() string { return c.name }
func (c *nativeCollector) Update() (updates int, err error) {
last, err := getSecondsSinceLastLogin()
if err != nil {
return updates, fmt.Errorf("Couldn't get last seen: %s", err)
} else {
updates++
debug(c.Name(), "Set node_last_login_seconds: %f", last)
c.lastSeen.Set(nil, last)
}
load, err := getLoad()
if err != nil {
return updates, fmt.Errorf("Couldn't get load: %s", err)
} else {
updates++
debug(c.Name(), "Set node_load: %f", load)
c.loadAvg.Set(nil, load)
}
debug(c.Name(), "Set node_attributes{%v}: 1", c.config.Attributes)
c.attributes.Set(c.config.Attributes, 1)
return updates, err
}
func getLoad() (float64, error) {
data, err := ioutil.ReadFile(procLoad)
if err != nil {
return 0, err
}
parts := strings.Fields(string(data))
load, err := strconv.ParseFloat(parts[0], 64)
if err != nil {
return 0, fmt.Errorf("Could not parse load '%s': %s", parts[0], err)
}
return load, nil
}
func getSecondsSinceLastLogin() (float64, error) {
who := exec.Command("who", "/var/log/wtmp", "-l", "-u", "-s")
output, err := who.StdoutPipe()
if err != nil {
return 0, err
}
err = who.Start()
if err != nil {
return 0, err
}
reader := bufio.NewReader(output)
var last time.Time
for {
line, isPrefix, err := reader.ReadLine()
if err == io.EOF {
break
}
if isPrefix {
return 0, fmt.Errorf("line to long: %s(...)", line)
}
fields := strings.Fields(string(line))
lastDate := fields[2]
lastTime := fields[3]
dateParts, err := splitToInts(lastDate, "-") // 2013-04-16
if err != nil {
return 0, fmt.Errorf("Couldn't parse date in line '%s': %s", fields, err)
}
timeParts, err := splitToInts(lastTime, ":") // 11:33
if err != nil {
return 0, fmt.Errorf("Couldn't parse time in line '%s': %s", fields, err)
}
last_t := time.Date(dateParts[0], time.Month(dateParts[1]), dateParts[2], timeParts[0], timeParts[1], 0, 0, time.UTC)
last = last_t
}
err = who.Wait()
if err != nil {
return 0, err
}
return float64(time.Now().Sub(last).Seconds()), nil
}

22
main.go

@ -0,0 +1,22 @@
package main
import (
"flag"
"github.com/prometheus/node_exporter/exporter"
"log"
)
var (
configFile = flag.String("config", "node_exporter.conf", "config file.")
memprofile = flag.String("memprofile", "", "write memory profile to this file")
)
func main() {
flag.Parse()
exporter, err := exporter.New(*configFile)
if err != nil {
log.Fatalf("Couldn't instantiate exporter: %s", err)
}
exporter.Loop()
}

6
node_exporter.conf

@ -1,4 +1,10 @@
{
"scrapeInterval": 10,
"collectors": [
"NativeCollector",
"GmondCollector",
"MuninCollector"
],
"attributes" : {
"web-server" : "1",
"zone" : "a",

224
node_exporter.go

@ -1,224 +0,0 @@
package main
import (
"bufio"
"encoding/json"
"flag"
"fmt"
"github.com/prometheus/client_golang"
"github.com/prometheus/client_golang/metrics"
"io"
"io/ioutil"
"log"
"net/http"
_ "net/http/pprof"
"os"
"os/exec"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
)
const (
proto = "tcp"
procLoad = "/proc/loadavg"
)
var (
verbose = flag.Bool("verbose", false, "Verbose output.")
listeningAddress = flag.String("listeningAddress", ":8080", "Address on which to expose JSON metrics.")
metricsEndpoint = flag.String("metricsEndpoint", "/metrics.json", "Path under which to expose JSON metrics.")
configFile = flag.String("config", "node_exporter.conf", "Config file.")
scrapeInterval = flag.Int("interval", 60, "Scrape interval.")
loadAvg = metrics.NewGauge()
attributes = metrics.NewGauge()
lastSeen = metrics.NewGauge()
)
type config struct {
Attributes map[string]string `json:"attributes"`
}
func init() {
hostname, err := os.Hostname()
if err != nil {
log.Fatalf("Couldn't get hostname: %s", err)
}
registry.DefaultRegistry.Register(
"node_load",
"node_exporter: system load.",
map[string]string{"hostname": hostname},
loadAvg,
)
registry.DefaultRegistry.Register(
"node_last_login_seconds",
"node_exporter: seconds since last login.",
map[string]string{"hostname": hostname},
lastSeen,
)
registry.DefaultRegistry.Register(
"node_attributes",
"node_exporter: system attributes.",
map[string]string{"hostname": hostname},
attributes,
)
}
func debug(format string, a ...interface{}) {
if *verbose {
log.Printf(format, a...)
}
}
func newConfig(filename string) (conf config, err error) {
log.Printf("Reading config %s", filename)
bytes, err := ioutil.ReadFile(filename)
if err != nil {
return
}
err = json.Unmarshal(bytes, &conf)
return
}
func serveStatus() {
exporter := registry.DefaultRegistry.Handler()
http.Handle(*metricsEndpoint, exporter)
http.ListenAndServe(*listeningAddress, nil)
}
// Takes a string, splits it, converts each element to int and returns them as new list.
// It will return an error in case any element isn't an int.
func splitToInts(str string, sep string) (ints []int, err error) {
for _, part := range strings.Split(str, sep) {
i, err := strconv.Atoi(part)
if err != nil {
return nil, fmt.Errorf("Could not split '%s' because %s is no int: %s", str, part, err)
}
ints = append(ints, i)
}
return ints, nil
}
func main() {
flag.Parse()
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGHUP)
configChan := make(chan config)
go func() {
for _ = range sig {
config, err := newConfig(*configFile)
if err != nil {
log.Printf("Couldn't reload config: %s", err)
continue
}
configChan <- config
}
}()
conf, err := newConfig(*configFile)
if err != nil {
log.Fatalf("Couldn't read config: %s", err)
}
go serveStatus()
tick := time.Tick(time.Duration(*scrapeInterval) * time.Second)
for {
select {
case conf = <-configChan:
log.Printf("Got new config")
case <-tick:
log.Printf("Starting new scrape interval")
last, err := getSecondsSinceLastLogin()
if err != nil {
log.Printf("Couldn't get last seen: %s", err)
} else {
debug("last: %f", last)
lastSeen.Set(nil, last)
}
load, err := getLoad()
if err != nil {
log.Printf("Couldn't get load: %s", err)
} else {
debug("load: %f", load)
loadAvg.Set(nil, load)
}
debug("attributes: %s", conf.Attributes)
attributes.Set(conf.Attributes, 1)
}
}
}
func getLoad() (float64, error) {
data, err := ioutil.ReadFile(procLoad)
if err != nil {
return 0, err
}
parts := strings.Fields(string(data))
load, err := strconv.ParseFloat(parts[0], 64)
if err != nil {
return 0, fmt.Errorf("Could not parse load '%s': %s", parts[0], err)
}
return load, nil
}
func getSecondsSinceLastLogin() (float64, error) {
who := exec.Command("who", "/var/log/wtmp", "-l", "-u", "-s")
output, err := who.StdoutPipe()
if err != nil {
return 0, err
}
err = who.Start()
if err != nil {
return 0, err
}
reader := bufio.NewReader(output)
var last time.Time
for {
line, isPrefix, err := reader.ReadLine()
if err == io.EOF {
break
}
if isPrefix {
return 0, fmt.Errorf("line to long: %s(...)", line)
}
fields := strings.Fields(string(line))
lastDate := fields[2]
lastTime := fields[3]
dateParts, err := splitToInts(lastDate, "-") // 2013-04-16
if err != nil {
return 0, err
}
timeParts, err := splitToInts(lastTime, ":") // 11:33
if err != nil {
return 0, err
}
last_t := time.Date(dateParts[0], time.Month(dateParts[1]), dateParts[2], timeParts[0], timeParts[1], 0, 0, time.UTC)
last = last_t
}
err = who.Wait()
if err != nil {
return 0, err
}
return float64(time.Now().Sub(last).Seconds()), nil
}
Loading…
Cancel
Save