mirror of https://github.com/prometheus/prometheus
Update prometheus/client_golang to v1.0.0 (#5682)
Signed-off-by: beorn7 <beorn@grafana.com>pull/5693/head
parent
4cd81dfa5d
commit
372b3438e5
|
@ -348,7 +348,7 @@ func QueryInstant(url, query string, p printer) int {
|
||||||
api := v1.NewAPI(c)
|
api := v1.NewAPI(c)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
val, err := api.Query(ctx, query, time.Now())
|
val, _, err := api.Query(ctx, query, time.Now()) // Ignoring warnings for now.
|
||||||
cancel()
|
cancel()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintln(os.Stderr, "query error:", err)
|
fmt.Fprintln(os.Stderr, "query error:", err)
|
||||||
|
@ -408,7 +408,7 @@ func QueryRange(url, query, start, end string, step time.Duration, p printer) in
|
||||||
api := v1.NewAPI(c)
|
api := v1.NewAPI(c)
|
||||||
r := v1.Range{Start: stime, End: etime, Step: step}
|
r := v1.Range{Start: stime, End: etime, Step: step}
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
val, err := api.QueryRange(ctx, query, r)
|
val, _, err := api.QueryRange(ctx, query, r) // Ignoring warnings for now.
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -462,7 +462,7 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer)
|
||||||
// Run query against client.
|
// Run query against client.
|
||||||
api := v1.NewAPI(c)
|
api := v1.NewAPI(c)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
val, err := api.Series(ctx, matchers, stime, etime)
|
val, _, err := api.Series(ctx, matchers, stime, etime) // Ignoring warnings for now.
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
8
go.mod
8
go.mod
|
@ -48,7 +48,7 @@ require (
|
||||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
|
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
|
||||||
github.com/jackc/pgx v3.2.0+incompatible // indirect
|
github.com/jackc/pgx v3.2.0+incompatible // indirect
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect
|
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect
|
||||||
github.com/json-iterator/go v1.1.5
|
github.com/json-iterator/go v1.1.6
|
||||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||||
github.com/knz/strtime v0.0.0-20181018220328-af2256ee352c // indirect
|
github.com/knz/strtime v0.0.0-20181018220328-af2256ee352c // indirect
|
||||||
github.com/lib/pq v1.0.0 // indirect
|
github.com/lib/pq v1.0.0 // indirect
|
||||||
|
@ -56,8 +56,6 @@ require (
|
||||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||||
github.com/miekg/dns v1.1.10
|
github.com/miekg/dns v1.1.10
|
||||||
github.com/mitchellh/reflectwalk v1.0.1 // indirect
|
github.com/mitchellh/reflectwalk v1.0.1 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
|
||||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
|
||||||
github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 // indirect
|
github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 // indirect
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223
|
||||||
github.com/oklog/run v1.0.0
|
github.com/oklog/run v1.0.0
|
||||||
|
@ -68,9 +66,9 @@ require (
|
||||||
github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea // indirect
|
github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea // indirect
|
||||||
github.com/petermattis/goid v0.0.0-20170504144140-0ded85884ba5 // indirect
|
github.com/petermattis/goid v0.0.0-20170504144140-0ded85884ba5 // indirect
|
||||||
github.com/pkg/errors v0.8.1
|
github.com/pkg/errors v0.8.1
|
||||||
github.com/prometheus/client_golang v0.9.3
|
github.com/prometheus/client_golang v1.0.0
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
|
||||||
github.com/prometheus/common v0.4.0
|
github.com/prometheus/common v0.4.1
|
||||||
github.com/prometheus/tsdb v0.8.0
|
github.com/prometheus/tsdb v0.8.0
|
||||||
github.com/rlmcpherson/s3gof3r v0.5.0 // indirect
|
github.com/rlmcpherson/s3gof3r v0.5.0 // indirect
|
||||||
github.com/rubyist/circuitbreaker v2.2.1+incompatible // indirect
|
github.com/rubyist/circuitbreaker v2.2.1+incompatible // indirect
|
||||||
|
|
18
go.sum
18
go.sum
|
@ -181,8 +181,8 @@ github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGk
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8=
|
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8=
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE=
|
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
|
||||||
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
||||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
|
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
|
||||||
|
@ -269,8 +269,8 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
|
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
|
||||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||||
github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
|
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU=
|
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU=
|
||||||
|
@ -279,15 +279,13 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
|
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
|
||||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
|
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
|
||||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
|
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
|
||||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
|
||||||
github.com/prometheus/tsdb v0.8.0 h1:w1tAGxsBMLkuGrFMhqgcCeBkM5d1YI24udArs+aASuQ=
|
github.com/prometheus/tsdb v0.8.0 h1:w1tAGxsBMLkuGrFMhqgcCeBkM5d1YI24udArs+aASuQ=
|
||||||
github.com/prometheus/tsdb v0.8.0/go.mod h1:fSI0j+IUQrDd7+ZtR9WKIGtoYAYAJUKcKhYLG25tN4g=
|
github.com/prometheus/tsdb v0.8.0/go.mod h1:fSI0j+IUQrDd7+ZtR9WKIGtoYAYAJUKcKhYLG25tN4g=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
|
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
|
||||||
|
|
|
@ -10,10 +10,6 @@ A high-performance 100% compatible drop-in replacement of "encoding/json"
|
||||||
|
|
||||||
You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
|
You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
|
||||||
|
|
||||||
```
|
|
||||||
Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com
|
|
||||||
```
|
|
||||||
|
|
||||||
# Benchmark
|
# Benchmark
|
||||||
|
|
||||||
![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
|
![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
|
||||||
|
|
|
@ -312,6 +312,10 @@ func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||||
|
|
||||||
func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||||
any := *(*Any)(ptr)
|
any := *(*Any)(ptr)
|
||||||
|
if any == nil {
|
||||||
|
stream.WriteNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
any.WriteTo(stream)
|
any.WriteTo(stream)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -77,14 +77,12 @@ func (iter *Iterator) ReadFloat32() (ret float32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (iter *Iterator) readPositiveFloat32() (ret float32) {
|
func (iter *Iterator) readPositiveFloat32() (ret float32) {
|
||||||
value := uint64(0)
|
|
||||||
c := byte(' ')
|
|
||||||
i := iter.head
|
i := iter.head
|
||||||
// first char
|
// first char
|
||||||
if i == iter.tail {
|
if i == iter.tail {
|
||||||
return iter.readFloat32SlowPath()
|
return iter.readFloat32SlowPath()
|
||||||
}
|
}
|
||||||
c = iter.buf[i]
|
c := iter.buf[i]
|
||||||
i++
|
i++
|
||||||
ind := floatDigits[c]
|
ind := floatDigits[c]
|
||||||
switch ind {
|
switch ind {
|
||||||
|
@ -107,7 +105,7 @@ func (iter *Iterator) readPositiveFloat32() (ret float32) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
value = uint64(ind)
|
value := uint64(ind)
|
||||||
// chars before dot
|
// chars before dot
|
||||||
non_decimal_loop:
|
non_decimal_loop:
|
||||||
for ; i < iter.tail; i++ {
|
for ; i < iter.tail; i++ {
|
||||||
|
@ -145,9 +143,7 @@ non_decimal_loop:
|
||||||
}
|
}
|
||||||
// too many decimal places
|
// too many decimal places
|
||||||
return iter.readFloat32SlowPath()
|
return iter.readFloat32SlowPath()
|
||||||
case invalidCharForNumber:
|
case invalidCharForNumber, dotInNumber:
|
||||||
fallthrough
|
|
||||||
case dotInNumber:
|
|
||||||
return iter.readFloat32SlowPath()
|
return iter.readFloat32SlowPath()
|
||||||
}
|
}
|
||||||
decimalPlaces++
|
decimalPlaces++
|
||||||
|
@ -218,14 +214,12 @@ func (iter *Iterator) ReadFloat64() (ret float64) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (iter *Iterator) readPositiveFloat64() (ret float64) {
|
func (iter *Iterator) readPositiveFloat64() (ret float64) {
|
||||||
value := uint64(0)
|
|
||||||
c := byte(' ')
|
|
||||||
i := iter.head
|
i := iter.head
|
||||||
// first char
|
// first char
|
||||||
if i == iter.tail {
|
if i == iter.tail {
|
||||||
return iter.readFloat64SlowPath()
|
return iter.readFloat64SlowPath()
|
||||||
}
|
}
|
||||||
c = iter.buf[i]
|
c := iter.buf[i]
|
||||||
i++
|
i++
|
||||||
ind := floatDigits[c]
|
ind := floatDigits[c]
|
||||||
switch ind {
|
switch ind {
|
||||||
|
@ -248,7 +242,7 @@ func (iter *Iterator) readPositiveFloat64() (ret float64) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
value = uint64(ind)
|
value := uint64(ind)
|
||||||
// chars before dot
|
// chars before dot
|
||||||
non_decimal_loop:
|
non_decimal_loop:
|
||||||
for ; i < iter.tail; i++ {
|
for ; i < iter.tail; i++ {
|
||||||
|
@ -286,9 +280,7 @@ non_decimal_loop:
|
||||||
}
|
}
|
||||||
// too many decimal places
|
// too many decimal places
|
||||||
return iter.readFloat64SlowPath()
|
return iter.readFloat64SlowPath()
|
||||||
case invalidCharForNumber:
|
case invalidCharForNumber, dotInNumber:
|
||||||
fallthrough
|
|
||||||
case dotInNumber:
|
|
||||||
return iter.readFloat64SlowPath()
|
return iter.readFloat64SlowPath()
|
||||||
}
|
}
|
||||||
decimalPlaces++
|
decimalPlaces++
|
||||||
|
|
|
@ -2,12 +2,22 @@
|
||||||
|
|
||||||
package jsoniter
|
package jsoniter
|
||||||
|
|
||||||
import "fmt"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
func (iter *Iterator) skipNumber() {
|
func (iter *Iterator) skipNumber() {
|
||||||
if !iter.trySkipNumber() {
|
if !iter.trySkipNumber() {
|
||||||
iter.unreadByte()
|
iter.unreadByte()
|
||||||
iter.ReadFloat32()
|
if iter.Error != nil && iter.Error != io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
iter.ReadFloat64()
|
||||||
|
if iter.Error != nil && iter.Error != io.EOF {
|
||||||
|
iter.Error = nil
|
||||||
|
iter.ReadBigFloat()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -338,7 +338,7 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
|
||||||
for i := 0; i < structType.NumField(); i++ {
|
for i := 0; i < structType.NumField(); i++ {
|
||||||
field := structType.Field(i)
|
field := structType.Field(i)
|
||||||
tag, hastag := field.Tag().Lookup(ctx.getTagKey())
|
tag, hastag := field.Tag().Lookup(ctx.getTagKey())
|
||||||
if ctx.onlyTaggedField && !hastag {
|
if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
tagParts := strings.Split(tag, ",")
|
tagParts := strings.Split(tag, ",")
|
||||||
|
|
|
@ -64,14 +64,26 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
|
||||||
return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
|
return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
|
||||||
default:
|
default:
|
||||||
ptrType := reflect2.PtrTo(typ)
|
ptrType := reflect2.PtrTo(typ)
|
||||||
if ptrType.Implements(textMarshalerType) {
|
if ptrType.Implements(unmarshalerType) {
|
||||||
|
return &referenceDecoder{
|
||||||
|
&unmarshalerDecoder{
|
||||||
|
valType: ptrType,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if typ.Implements(unmarshalerType) {
|
||||||
|
return &unmarshalerDecoder{
|
||||||
|
valType: typ,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ptrType.Implements(textUnmarshalerType) {
|
||||||
return &referenceDecoder{
|
return &referenceDecoder{
|
||||||
&textUnmarshalerDecoder{
|
&textUnmarshalerDecoder{
|
||||||
valType: ptrType,
|
valType: ptrType,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if typ.Implements(textMarshalerType) {
|
if typ.Implements(textUnmarshalerType) {
|
||||||
return &textUnmarshalerDecoder{
|
return &textUnmarshalerDecoder{
|
||||||
valType: typ,
|
valType: typ,
|
||||||
}
|
}
|
||||||
|
|
|
@ -93,8 +93,7 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||||
stream.WriteNil()
|
stream.WriteNil()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
marshaler := obj.(json.Marshaler)
|
bytes, err := json.Marshal(obj)
|
||||||
bytes, err := marshaler.MarshalJSON()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
stream.Error = err
|
stream.Error = err
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -25,6 +25,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Warnings []string
|
||||||
|
|
||||||
// DefaultRoundTripper is used if no RoundTripper is set in Config.
|
// DefaultRoundTripper is used if no RoundTripper is set in Config.
|
||||||
var DefaultRoundTripper http.RoundTripper = &http.Transport{
|
var DefaultRoundTripper http.RoundTripper = &http.Transport{
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
@ -55,27 +57,30 @@ func (cfg *Config) roundTripper() http.RoundTripper {
|
||||||
// Client is the interface for an API client.
|
// Client is the interface for an API client.
|
||||||
type Client interface {
|
type Client interface {
|
||||||
URL(ep string, args map[string]string) *url.URL
|
URL(ep string, args map[string]string) *url.URL
|
||||||
Do(context.Context, *http.Request) (*http.Response, []byte, error)
|
Do(context.Context, *http.Request) (*http.Response, []byte, Warnings, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DoGetFallback will attempt to do the request as-is, and on a 405 it will fallback to a GET request.
|
// DoGetFallback will attempt to do the request as-is, and on a 405 it will fallback to a GET request.
|
||||||
func DoGetFallback(c Client, ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, error) {
|
func DoGetFallback(c Client, ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, Warnings, error) {
|
||||||
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(args.Encode()))
|
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(args.Encode()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
|
||||||
resp, body, err := c.Do(ctx, req)
|
resp, body, warnings, err := c.Do(ctx, req)
|
||||||
if resp != nil && resp.StatusCode == http.StatusMethodNotAllowed {
|
if resp != nil && resp.StatusCode == http.StatusMethodNotAllowed {
|
||||||
u.RawQuery = args.Encode()
|
u.RawQuery = args.Encode()
|
||||||
req, err = http.NewRequest(http.MethodGet, u.String(), nil)
|
req, err = http.NewRequest(http.MethodGet, u.String(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, warnings, err
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
return resp, body, err
|
if err != nil {
|
||||||
|
return resp, body, warnings, err
|
||||||
|
}
|
||||||
|
return resp, body, warnings, nil
|
||||||
}
|
}
|
||||||
return c.Do(ctx, req)
|
return c.Do(ctx, req)
|
||||||
}
|
}
|
||||||
|
@ -115,7 +120,7 @@ func (c *httpClient) URL(ep string, args map[string]string) *url.URL {
|
||||||
return &u
|
return &u
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, Warnings, error) {
|
||||||
if ctx != nil {
|
if ctx != nil {
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
}
|
}
|
||||||
|
@ -127,7 +132,7 @@ func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response,
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var body []byte
|
var body []byte
|
||||||
|
@ -147,5 +152,5 @@ func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response,
|
||||||
case <-done:
|
case <-done:
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, body, err
|
return resp, body, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,17 +17,104 @@ package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
json "github.com/json-iterator/go"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/api"
|
"github.com/prometheus/client_golang/api"
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
json.RegisterTypeEncoderFunc("model.SamplePair", marshalPointJSON, marshalPointJSONIsEmpty)
|
||||||
|
json.RegisterTypeDecoderFunc("model.SamplePair", unMarshalPointJSON)
|
||||||
|
}
|
||||||
|
|
||||||
|
func unMarshalPointJSON(ptr unsafe.Pointer, iter *json.Iterator) {
|
||||||
|
p := (*model.SamplePair)(ptr)
|
||||||
|
if !iter.ReadArray() {
|
||||||
|
iter.ReportError("unmarshal model.SamplePair", "SamplePair must be [timestamp, value]")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t := iter.ReadNumber()
|
||||||
|
if err := p.Timestamp.UnmarshalJSON([]byte(t)); err != nil {
|
||||||
|
iter.ReportError("unmarshal model.SamplePair", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !iter.ReadArray() {
|
||||||
|
iter.ReportError("unmarshal model.SamplePair", "SamplePair missing value")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := strconv.ParseFloat(iter.ReadString(), 64)
|
||||||
|
if err != nil {
|
||||||
|
iter.ReportError("unmarshal model.SamplePair", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.Value = model.SampleValue(f)
|
||||||
|
|
||||||
|
if iter.ReadArray() {
|
||||||
|
iter.ReportError("unmarshal model.SamplePair", "SamplePair has too many values, must be [timestamp, value]")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func marshalPointJSON(ptr unsafe.Pointer, stream *json.Stream) {
|
||||||
|
p := *((*model.SamplePair)(ptr))
|
||||||
|
stream.WriteArrayStart()
|
||||||
|
// Write out the timestamp as a float divided by 1000.
|
||||||
|
// This is ~3x faster than converting to a float.
|
||||||
|
t := int64(p.Timestamp)
|
||||||
|
if t < 0 {
|
||||||
|
stream.WriteRaw(`-`)
|
||||||
|
t = -t
|
||||||
|
}
|
||||||
|
stream.WriteInt64(t / 1000)
|
||||||
|
fraction := t % 1000
|
||||||
|
if fraction != 0 {
|
||||||
|
stream.WriteRaw(`.`)
|
||||||
|
if fraction < 100 {
|
||||||
|
stream.WriteRaw(`0`)
|
||||||
|
}
|
||||||
|
if fraction < 10 {
|
||||||
|
stream.WriteRaw(`0`)
|
||||||
|
}
|
||||||
|
stream.WriteInt64(fraction)
|
||||||
|
}
|
||||||
|
stream.WriteMore()
|
||||||
|
stream.WriteRaw(`"`)
|
||||||
|
|
||||||
|
// Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround
|
||||||
|
// to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan)
|
||||||
|
buf := stream.Buffer()
|
||||||
|
abs := math.Abs(float64(p.Value))
|
||||||
|
fmt := byte('f')
|
||||||
|
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
|
||||||
|
if abs != 0 {
|
||||||
|
if abs < 1e-6 || abs >= 1e21 {
|
||||||
|
fmt = 'e'
|
||||||
|
fmt = 'e'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf = strconv.AppendFloat(buf, float64(p.Value), fmt, -1, 64)
|
||||||
|
stream.SetBuffer(buf)
|
||||||
|
|
||||||
|
stream.WriteRaw(`"`)
|
||||||
|
stream.WriteArrayEnd()
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
statusAPIError = 422
|
statusAPIError = 422
|
||||||
|
|
||||||
|
@ -37,9 +124,11 @@ const (
|
||||||
epAlertManagers = apiPrefix + "/alertmanagers"
|
epAlertManagers = apiPrefix + "/alertmanagers"
|
||||||
epQuery = apiPrefix + "/query"
|
epQuery = apiPrefix + "/query"
|
||||||
epQueryRange = apiPrefix + "/query_range"
|
epQueryRange = apiPrefix + "/query_range"
|
||||||
|
epLabels = apiPrefix + "/labels"
|
||||||
epLabelValues = apiPrefix + "/label/:name/values"
|
epLabelValues = apiPrefix + "/label/:name/values"
|
||||||
epSeries = apiPrefix + "/series"
|
epSeries = apiPrefix + "/series"
|
||||||
epTargets = apiPrefix + "/targets"
|
epTargets = apiPrefix + "/targets"
|
||||||
|
epTargetsMetadata = apiPrefix + "/targets/metadata"
|
||||||
epRules = apiPrefix + "/rules"
|
epRules = apiPrefix + "/rules"
|
||||||
epSnapshot = apiPrefix + "/admin/tsdb/snapshot"
|
epSnapshot = apiPrefix + "/admin/tsdb/snapshot"
|
||||||
epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series"
|
epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series"
|
||||||
|
@ -63,6 +152,9 @@ type RuleType string
|
||||||
// RuleHealth models the health status of a rule.
|
// RuleHealth models the health status of a rule.
|
||||||
type RuleHealth string
|
type RuleHealth string
|
||||||
|
|
||||||
|
// MetricType models the type of a metric.
|
||||||
|
type MetricType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Possible values for AlertState.
|
// Possible values for AlertState.
|
||||||
AlertStateFiring AlertState = "firing"
|
AlertStateFiring AlertState = "firing"
|
||||||
|
@ -91,6 +183,16 @@ const (
|
||||||
RuleHealthGood = "ok"
|
RuleHealthGood = "ok"
|
||||||
RuleHealthUnknown = "unknown"
|
RuleHealthUnknown = "unknown"
|
||||||
RuleHealthBad = "err"
|
RuleHealthBad = "err"
|
||||||
|
|
||||||
|
// Possible values for MetricType
|
||||||
|
MetricTypeCounter MetricType = "counter"
|
||||||
|
MetricTypeGauge MetricType = "gauge"
|
||||||
|
MetricTypeHistogram MetricType = "histogram"
|
||||||
|
MetricTypeGaugeHistogram MetricType = "gaugehistogram"
|
||||||
|
MetricTypeSummary MetricType = "summary"
|
||||||
|
MetricTypeInfo MetricType = "info"
|
||||||
|
MetricTypeStateset MetricType = "stateset"
|
||||||
|
MetricTypeUnknown MetricType = "unknown"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error is an error returned by the API.
|
// Error is an error returned by the API.
|
||||||
|
@ -126,14 +228,16 @@ type API interface {
|
||||||
DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error
|
DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error
|
||||||
// Flags returns the flag values that Prometheus was launched with.
|
// Flags returns the flag values that Prometheus was launched with.
|
||||||
Flags(ctx context.Context) (FlagsResult, error)
|
Flags(ctx context.Context) (FlagsResult, error)
|
||||||
|
// LabelNames returns all the unique label names present in the block in sorted order.
|
||||||
|
LabelNames(ctx context.Context) ([]string, error)
|
||||||
// LabelValues performs a query for the values of the given label.
|
// LabelValues performs a query for the values of the given label.
|
||||||
LabelValues(ctx context.Context, label string) (model.LabelValues, error)
|
LabelValues(ctx context.Context, label string) (model.LabelValues, error)
|
||||||
// Query performs a query for the given time.
|
// Query performs a query for the given time.
|
||||||
Query(ctx context.Context, query string, ts time.Time) (model.Value, error)
|
Query(ctx context.Context, query string, ts time.Time) (model.Value, api.Warnings, error)
|
||||||
// QueryRange performs a query for the given range.
|
// QueryRange performs a query for the given range.
|
||||||
QueryRange(ctx context.Context, query string, r Range) (model.Value, error)
|
QueryRange(ctx context.Context, query string, r Range) (model.Value, api.Warnings, error)
|
||||||
// Series finds series by label matchers.
|
// Series finds series by label matchers.
|
||||||
Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error)
|
Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, api.Warnings, error)
|
||||||
// Snapshot creates a snapshot of all current data into snapshots/<datetime>-<rand>
|
// Snapshot creates a snapshot of all current data into snapshots/<datetime>-<rand>
|
||||||
// under the TSDB's data directory and returns the directory as response.
|
// under the TSDB's data directory and returns the directory as response.
|
||||||
Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error)
|
Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error)
|
||||||
|
@ -141,6 +245,8 @@ type API interface {
|
||||||
Rules(ctx context.Context) (RulesResult, error)
|
Rules(ctx context.Context) (RulesResult, error)
|
||||||
// Targets returns an overview of the current state of the Prometheus target discovery.
|
// Targets returns an overview of the current state of the Prometheus target discovery.
|
||||||
Targets(ctx context.Context) (TargetsResult, error)
|
Targets(ctx context.Context) (TargetsResult, error)
|
||||||
|
// TargetsMetadata returns metadata about metrics currently scraped by the target.
|
||||||
|
TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AlertsResult contains the result from querying the alerts endpoint.
|
// AlertsResult contains the result from querying the alerts endpoint.
|
||||||
|
@ -226,7 +332,7 @@ type Alert struct {
|
||||||
Annotations model.LabelSet
|
Annotations model.LabelSet
|
||||||
Labels model.LabelSet
|
Labels model.LabelSet
|
||||||
State AlertState
|
State AlertState
|
||||||
Value float64
|
Value string
|
||||||
}
|
}
|
||||||
|
|
||||||
// TargetsResult contains the result from querying the targets endpoint.
|
// TargetsResult contains the result from querying the targets endpoint.
|
||||||
|
@ -250,6 +356,15 @@ type DroppedTarget struct {
|
||||||
DiscoveredLabels map[string]string `json:"discoveredLabels"`
|
DiscoveredLabels map[string]string `json:"discoveredLabels"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MetricMetadata models the metadata of a metric.
|
||||||
|
type MetricMetadata struct {
|
||||||
|
Target map[string]string `json:"target"`
|
||||||
|
Metric string `json:"metric,omitempty"`
|
||||||
|
Type MetricType `json:"type"`
|
||||||
|
Help string `json:"help"`
|
||||||
|
Unit string `json:"unit"`
|
||||||
|
}
|
||||||
|
|
||||||
// queryResult contains result data for a query.
|
// queryResult contains result data for a query.
|
||||||
type queryResult struct {
|
type queryResult struct {
|
||||||
Type model.ValueType `json:"resultType"`
|
Type model.ValueType `json:"resultType"`
|
||||||
|
@ -416,14 +531,13 @@ func (h *httpAPI) Alerts(ctx context.Context) (AlertsResult, error) {
|
||||||
return AlertsResult{}, err
|
return AlertsResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, body, err := h.client.Do(ctx, req)
|
_, body, _, err := h.client.Do(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return AlertsResult{}, err
|
return AlertsResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var res AlertsResult
|
var res AlertsResult
|
||||||
err = json.Unmarshal(body, &res)
|
return res, json.Unmarshal(body, &res)
|
||||||
return res, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) {
|
func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) {
|
||||||
|
@ -434,14 +548,13 @@ func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error
|
||||||
return AlertManagersResult{}, err
|
return AlertManagersResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, body, err := h.client.Do(ctx, req)
|
_, body, _, err := h.client.Do(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return AlertManagersResult{}, err
|
return AlertManagersResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var res AlertManagersResult
|
var res AlertManagersResult
|
||||||
err = json.Unmarshal(body, &res)
|
return res, json.Unmarshal(body, &res)
|
||||||
return res, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *httpAPI) CleanTombstones(ctx context.Context) error {
|
func (h *httpAPI) CleanTombstones(ctx context.Context) error {
|
||||||
|
@ -452,7 +565,7 @@ func (h *httpAPI) CleanTombstones(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = h.client.Do(ctx, req)
|
_, _, _, err = h.client.Do(ctx, req)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -464,14 +577,13 @@ func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) {
|
||||||
return ConfigResult{}, err
|
return ConfigResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, body, err := h.client.Do(ctx, req)
|
_, body, _, err := h.client.Do(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ConfigResult{}, err
|
return ConfigResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var res ConfigResult
|
var res ConfigResult
|
||||||
err = json.Unmarshal(body, &res)
|
return res, json.Unmarshal(body, &res)
|
||||||
return res, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error {
|
func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error {
|
||||||
|
@ -492,7 +604,7 @@ func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = h.client.Do(ctx, req)
|
_, _, _, err = h.client.Do(ctx, req)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -504,14 +616,27 @@ func (h *httpAPI) Flags(ctx context.Context) (FlagsResult, error) {
|
||||||
return FlagsResult{}, err
|
return FlagsResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, body, err := h.client.Do(ctx, req)
|
_, body, _, err := h.client.Do(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return FlagsResult{}, err
|
return FlagsResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var res FlagsResult
|
var res FlagsResult
|
||||||
err = json.Unmarshal(body, &res)
|
return res, json.Unmarshal(body, &res)
|
||||||
return res, err
|
}
|
||||||
|
|
||||||
|
func (h *httpAPI) LabelNames(ctx context.Context) ([]string, error) {
|
||||||
|
u := h.client.URL(epLabels, nil)
|
||||||
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, body, _, err := h.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var labelNames []string
|
||||||
|
return labelNames, json.Unmarshal(body, &labelNames)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *httpAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, error) {
|
func (h *httpAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, error) {
|
||||||
|
@ -520,16 +645,15 @@ func (h *httpAPI) LabelValues(ctx context.Context, label string) (model.LabelVal
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
_, body, err := h.client.Do(ctx, req)
|
_, body, _, err := h.client.Do(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var labelValues model.LabelValues
|
var labelValues model.LabelValues
|
||||||
err = json.Unmarshal(body, &labelValues)
|
return labelValues, json.Unmarshal(body, &labelValues)
|
||||||
return labelValues, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) {
|
func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, api.Warnings, error) {
|
||||||
u := h.client.URL(epQuery, nil)
|
u := h.client.URL(epQuery, nil)
|
||||||
q := u.Query()
|
q := u.Query()
|
||||||
|
|
||||||
|
@ -538,18 +662,16 @@ func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.
|
||||||
q.Set("time", ts.Format(time.RFC3339Nano))
|
q.Set("time", ts.Format(time.RFC3339Nano))
|
||||||
}
|
}
|
||||||
|
|
||||||
_, body, err := api.DoGetFallback(h.client, ctx, u, q)
|
_, body, warnings, err := api.DoGetFallback(h.client, ctx, u, q)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, warnings, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var qres queryResult
|
var qres queryResult
|
||||||
err = json.Unmarshal(body, &qres)
|
return model.Value(qres.v), warnings, json.Unmarshal(body, &qres)
|
||||||
|
|
||||||
return model.Value(qres.v), err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) {
|
func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, api.Warnings, error) {
|
||||||
u := h.client.URL(epQueryRange, nil)
|
u := h.client.URL(epQueryRange, nil)
|
||||||
q := u.Query()
|
q := u.Query()
|
||||||
|
|
||||||
|
@ -564,18 +686,17 @@ func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.
|
||||||
q.Set("end", end)
|
q.Set("end", end)
|
||||||
q.Set("step", step)
|
q.Set("step", step)
|
||||||
|
|
||||||
_, body, err := api.DoGetFallback(h.client, ctx, u, q)
|
_, body, warnings, err := api.DoGetFallback(h.client, ctx, u, q)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, warnings, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var qres queryResult
|
var qres queryResult
|
||||||
err = json.Unmarshal(body, &qres)
|
|
||||||
|
|
||||||
return model.Value(qres.v), err
|
return model.Value(qres.v), warnings, json.Unmarshal(body, &qres)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error) {
|
func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, api.Warnings, error) {
|
||||||
u := h.client.URL(epSeries, nil)
|
u := h.client.URL(epSeries, nil)
|
||||||
q := u.Query()
|
q := u.Query()
|
||||||
|
|
||||||
|
@ -590,17 +711,16 @@ func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.T
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, body, err := h.client.Do(ctx, req)
|
_, body, warnings, err := h.client.Do(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, warnings, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var mset []model.LabelSet
|
var mset []model.LabelSet
|
||||||
err = json.Unmarshal(body, &mset)
|
return mset, warnings, json.Unmarshal(body, &mset)
|
||||||
return mset, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) {
|
func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) {
|
||||||
|
@ -616,14 +736,13 @@ func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult,
|
||||||
return SnapshotResult{}, err
|
return SnapshotResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, body, err := h.client.Do(ctx, req)
|
_, body, _, err := h.client.Do(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return SnapshotResult{}, err
|
return SnapshotResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var res SnapshotResult
|
var res SnapshotResult
|
||||||
err = json.Unmarshal(body, &res)
|
return res, json.Unmarshal(body, &res)
|
||||||
return res, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *httpAPI) Rules(ctx context.Context) (RulesResult, error) {
|
func (h *httpAPI) Rules(ctx context.Context) (RulesResult, error) {
|
||||||
|
@ -634,14 +753,13 @@ func (h *httpAPI) Rules(ctx context.Context) (RulesResult, error) {
|
||||||
return RulesResult{}, err
|
return RulesResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, body, err := h.client.Do(ctx, req)
|
_, body, _, err := h.client.Do(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return RulesResult{}, err
|
return RulesResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var res RulesResult
|
var res RulesResult
|
||||||
err = json.Unmarshal(body, &res)
|
return res, json.Unmarshal(body, &res)
|
||||||
return res, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) {
|
func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) {
|
||||||
|
@ -652,14 +770,37 @@ func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) {
|
||||||
return TargetsResult{}, err
|
return TargetsResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, body, err := h.client.Do(ctx, req)
|
_, body, _, err := h.client.Do(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return TargetsResult{}, err
|
return TargetsResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var res TargetsResult
|
var res TargetsResult
|
||||||
err = json.Unmarshal(body, &res)
|
return res, json.Unmarshal(body, &res)
|
||||||
return res, err
|
}
|
||||||
|
|
||||||
|
func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error) {
|
||||||
|
u := h.client.URL(epTargetsMetadata, nil)
|
||||||
|
q := u.Query()
|
||||||
|
|
||||||
|
q.Set("match_target", matchTarget)
|
||||||
|
q.Set("metric", metric)
|
||||||
|
q.Set("limit", limit)
|
||||||
|
|
||||||
|
u.RawQuery = q.Encode()
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, body, _, err := h.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var res []MetricMetadata
|
||||||
|
return res, json.Unmarshal(body, &res)
|
||||||
}
|
}
|
||||||
|
|
||||||
// apiClient wraps a regular client and processes successful API responses.
|
// apiClient wraps a regular client and processes successful API responses.
|
||||||
|
@ -673,6 +814,7 @@ type apiResponse struct {
|
||||||
Data json.RawMessage `json:"data"`
|
Data json.RawMessage `json:"data"`
|
||||||
ErrorType ErrorType `json:"errorType"`
|
ErrorType ErrorType `json:"errorType"`
|
||||||
Error string `json:"error"`
|
Error string `json:"error"`
|
||||||
|
Warnings []string `json:"warnings,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func apiError(code int) bool {
|
func apiError(code int) bool {
|
||||||
|
@ -690,17 +832,17 @@ func errorTypeAndMsgFor(resp *http.Response) (ErrorType, string) {
|
||||||
return ErrBadResponse, fmt.Sprintf("bad response code %d", resp.StatusCode)
|
return ErrBadResponse, fmt.Sprintf("bad response code %d", resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c apiClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
func (c apiClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, api.Warnings, error) {
|
||||||
resp, body, err := c.Client.Do(ctx, req)
|
resp, body, warnings, err := c.Client.Do(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, body, err
|
return resp, body, warnings, err
|
||||||
}
|
}
|
||||||
|
|
||||||
code := resp.StatusCode
|
code := resp.StatusCode
|
||||||
|
|
||||||
if code/100 != 2 && !apiError(code) {
|
if code/100 != 2 && !apiError(code) {
|
||||||
errorType, errorMsg := errorTypeAndMsgFor(resp)
|
errorType, errorMsg := errorTypeAndMsgFor(resp)
|
||||||
return resp, body, &Error{
|
return resp, body, warnings, &Error{
|
||||||
Type: errorType,
|
Type: errorType,
|
||||||
Msg: errorMsg,
|
Msg: errorMsg,
|
||||||
Detail: string(body),
|
Detail: string(body),
|
||||||
|
@ -710,10 +852,10 @@ func (c apiClient) Do(ctx context.Context, req *http.Request) (*http.Response, [
|
||||||
var result apiResponse
|
var result apiResponse
|
||||||
|
|
||||||
if http.StatusNoContent != code {
|
if http.StatusNoContent != code {
|
||||||
if err = json.Unmarshal(body, &result); err != nil {
|
if jsonErr := json.Unmarshal(body, &result); jsonErr != nil {
|
||||||
return resp, body, &Error{
|
return resp, body, warnings, &Error{
|
||||||
Type: ErrBadResponse,
|
Type: ErrBadResponse,
|
||||||
Msg: err.Error(),
|
Msg: jsonErr.Error(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -732,5 +874,6 @@ func (c apiClient) Do(ctx context.Context, req *http.Request) (*http.Response, [
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, []byte(result.Data), err
|
return resp, []byte(result.Data), warnings, err
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
29
vendor/github.com/prometheus/client_golang/prometheus/build_info.go
generated
vendored
Normal file
29
vendor/github.com/prometheus/client_golang/prometheus/build_info.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build go1.12
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import "runtime/debug"
|
||||||
|
|
||||||
|
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
|
||||||
|
func readBuildInfo() (path, version, sum string) {
|
||||||
|
path, version, sum = "unknown", "unknown", "unknown"
|
||||||
|
if bi, ok := debug.ReadBuildInfo(); ok {
|
||||||
|
path = bi.Main.Path
|
||||||
|
version = bi.Main.Version
|
||||||
|
sum = bi.Main.Sum
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
22
vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
generated
vendored
Normal file
22
vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !go1.12
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
|
||||||
|
// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
|
||||||
|
func readBuildInfo() (path, version, sum string) {
|
||||||
|
return "unknown", "unknown", "unknown"
|
||||||
|
}
|
|
@ -183,7 +183,6 @@
|
||||||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||||
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
|
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
|
||||||
// (The top-level functions in the prometheus package are deprecated.)
|
|
||||||
//
|
//
|
||||||
// Pushing to the Pushgateway
|
// Pushing to the Pushgateway
|
||||||
//
|
//
|
||||||
|
|
|
@ -36,7 +36,7 @@ type goCollector struct {
|
||||||
msMaxAge time.Duration // Maximum allowed age of old memstats.
|
msMaxAge time.Duration // Maximum allowed age of old memstats.
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGoCollector returns a collector which exports metrics about the current Go
|
// NewGoCollector returns a collector that exports metrics about the current Go
|
||||||
// process. This includes memory stats. To collect those, runtime.ReadMemStats
|
// process. This includes memory stats. To collect those, runtime.ReadMemStats
|
||||||
// is called. This requires to “stop the world”, which usually only happens for
|
// is called. This requires to “stop the world”, which usually only happens for
|
||||||
// garbage collection (GC). Take the following implications into account when
|
// garbage collection (GC). Take the following implications into account when
|
||||||
|
@ -364,3 +364,33 @@ type memStatsMetrics []struct {
|
||||||
eval func(*runtime.MemStats) float64
|
eval func(*runtime.MemStats) float64
|
||||||
valType ValueType
|
valType ValueType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewBuildInfoCollector returns a collector collecting a single metric
|
||||||
|
// "go_build_info" with the constant value 1 and three labels "path", "version",
|
||||||
|
// and "checksum". Their label values contain the main module path, version, and
|
||||||
|
// checksum, respectively. The labels will only have meaningful values if the
|
||||||
|
// binary is built with Go module support and from source code retrieved from
|
||||||
|
// the source repository (rather than the local file system). This is usually
|
||||||
|
// accomplished by building from outside of GOPATH, specifying the full address
|
||||||
|
// of the main package, e.g. "GO111MODULE=on go run
|
||||||
|
// github.com/prometheus/client_golang/examples/random". If built without Go
|
||||||
|
// module support, all label values will be "unknown". If built with Go module
|
||||||
|
// support but using the source code from the local file system, the "path" will
|
||||||
|
// be set appropriately, but "checksum" will be empty and "version" will be
|
||||||
|
// "(devel)".
|
||||||
|
//
|
||||||
|
// This collector uses only the build information for the main module. See
|
||||||
|
// https://github.com/povilasv/prommod for an example of a collector for the
|
||||||
|
// module dependencies.
|
||||||
|
func NewBuildInfoCollector() Collector {
|
||||||
|
path, version, sum := readBuildInfo()
|
||||||
|
c := &selfCollector{MustNewConstMetric(
|
||||||
|
NewDesc(
|
||||||
|
"go_build_info",
|
||||||
|
"Build information about the main Go module.",
|
||||||
|
nil, Labels{"path": path, "version": version, "checksum": sum},
|
||||||
|
),
|
||||||
|
GaugeValue, 1)}
|
||||||
|
c.init(c.self)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
|
@ -1,505 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"compress/gzip"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/expfmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO(beorn7): Remove this whole file. It is a partial mirror of
|
|
||||||
// promhttp/http.go (to avoid circular import chains) where everything HTTP
|
|
||||||
// related should live. The functions here are just for avoiding
|
|
||||||
// breakage. Everything is deprecated.
|
|
||||||
|
|
||||||
const (
|
|
||||||
contentTypeHeader = "Content-Type"
|
|
||||||
contentEncodingHeader = "Content-Encoding"
|
|
||||||
acceptEncodingHeader = "Accept-Encoding"
|
|
||||||
)
|
|
||||||
|
|
||||||
var gzipPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return gzip.NewWriter(nil)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
|
||||||
// already instrumented with InstrumentHandler (using "prometheus" as handler
|
|
||||||
// name).
|
|
||||||
//
|
|
||||||
// Deprecated: Please note the issues described in the doc comment of
|
|
||||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead.
|
|
||||||
func Handler() http.Handler {
|
|
||||||
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
|
||||||
}
|
|
||||||
|
|
||||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
|
||||||
//
|
|
||||||
// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
|
|
||||||
// instead. See there for further documentation.
|
|
||||||
func UninstrumentedHandler() http.Handler {
|
|
||||||
return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
|
||||||
mfs, err := DefaultGatherer.Gather()
|
|
||||||
if err != nil {
|
|
||||||
httpError(rsp, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
contentType := expfmt.Negotiate(req.Header)
|
|
||||||
header := rsp.Header()
|
|
||||||
header.Set(contentTypeHeader, string(contentType))
|
|
||||||
|
|
||||||
w := io.Writer(rsp)
|
|
||||||
if gzipAccepted(req.Header) {
|
|
||||||
header.Set(contentEncodingHeader, "gzip")
|
|
||||||
gz := gzipPool.Get().(*gzip.Writer)
|
|
||||||
defer gzipPool.Put(gz)
|
|
||||||
|
|
||||||
gz.Reset(w)
|
|
||||||
defer gz.Close()
|
|
||||||
|
|
||||||
w = gz
|
|
||||||
}
|
|
||||||
|
|
||||||
enc := expfmt.NewEncoder(w, contentType)
|
|
||||||
|
|
||||||
for _, mf := range mfs {
|
|
||||||
if err := enc.Encode(mf); err != nil {
|
|
||||||
httpError(rsp, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var instLabels = []string{"method", "code"}
|
|
||||||
|
|
||||||
type nower interface {
|
|
||||||
Now() time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type nowFunc func() time.Time
|
|
||||||
|
|
||||||
func (n nowFunc) Now() time.Time {
|
|
||||||
return n()
|
|
||||||
}
|
|
||||||
|
|
||||||
var now nower = nowFunc(func() time.Time {
|
|
||||||
return time.Now()
|
|
||||||
})
|
|
||||||
|
|
||||||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
|
||||||
// registers four metric collectors (if not already done) and reports HTTP
|
|
||||||
// metrics to the (newly or already) registered collectors: http_requests_total
|
|
||||||
// (CounterVec), http_request_duration_microseconds (Summary),
|
|
||||||
// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
|
|
||||||
// has a constant label named "handler" with the provided handlerName as
|
|
||||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
|
||||||
// (label name "method") and HTTP status code (label name "code").
|
|
||||||
//
|
|
||||||
// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
|
|
||||||
// package promhttp instead. The issues are the following: (1) It uses Summaries
|
|
||||||
// rather than Histograms. Summaries are not useful if aggregation across
|
|
||||||
// multiple instances is required. (2) It uses microseconds as unit, which is
|
|
||||||
// deprecated and should be replaced by seconds. (3) The size of the request is
|
|
||||||
// calculated in a separate goroutine. Since this calculator requires access to
|
|
||||||
// the request header, it creates a race with any writes to the header performed
|
|
||||||
// during request handling. httputil.ReverseProxy is a prominent example for a
|
|
||||||
// handler performing such writes. (4) It has additional issues with HTTP/2, cf.
|
|
||||||
// https://github.com/prometheus/client_golang/issues/272.
|
|
||||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
|
||||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
|
||||||
// otherwise works in the same way as InstrumentHandler (and shares the same
|
|
||||||
// issues).
|
|
||||||
//
|
|
||||||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
|
||||||
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
|
||||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
|
||||||
return InstrumentHandlerFuncWithOpts(
|
|
||||||
SummaryOpts{
|
|
||||||
Subsystem: "http",
|
|
||||||
ConstLabels: Labels{"handler": handlerName},
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
},
|
|
||||||
handlerFunc,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
|
|
||||||
// issues) but provides more flexibility (at the cost of a more complex call
|
|
||||||
// syntax). As InstrumentHandler, this function registers four metric
|
|
||||||
// collectors, but it uses the provided SummaryOpts to create them. However, the
|
|
||||||
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
|
|
||||||
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
|
|
||||||
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
|
||||||
// help string. The names of the variable labels of the http_requests_total
|
|
||||||
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
|
|
||||||
//
|
|
||||||
// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
|
|
||||||
// behavior of InstrumentHandler:
|
|
||||||
//
|
|
||||||
// prometheus.InstrumentHandlerWithOpts(
|
|
||||||
// prometheus.SummaryOpts{
|
|
||||||
// Subsystem: "http",
|
|
||||||
// ConstLabels: prometheus.Labels{"handler": handlerName},
|
|
||||||
// },
|
|
||||||
// handler,
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
|
|
||||||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
|
||||||
// and all its fields are set to the equally named fields in the provided
|
|
||||||
// SummaryOpts.
|
|
||||||
//
|
|
||||||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
|
||||||
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
|
||||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
|
||||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
|
|
||||||
// the same issues) but provides more flexibility (at the cost of a more complex
|
|
||||||
// call syntax). See InstrumentHandlerWithOpts for details how the provided
|
|
||||||
// SummaryOpts are used.
|
|
||||||
//
|
|
||||||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
|
||||||
// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
|
||||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
|
||||||
reqCnt := NewCounterVec(
|
|
||||||
CounterOpts{
|
|
||||||
Namespace: opts.Namespace,
|
|
||||||
Subsystem: opts.Subsystem,
|
|
||||||
Name: "requests_total",
|
|
||||||
Help: "Total number of HTTP requests made.",
|
|
||||||
ConstLabels: opts.ConstLabels,
|
|
||||||
},
|
|
||||||
instLabels,
|
|
||||||
)
|
|
||||||
if err := Register(reqCnt); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
reqCnt = are.ExistingCollector.(*CounterVec)
|
|
||||||
} else {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Name = "request_duration_microseconds"
|
|
||||||
opts.Help = "The HTTP request latencies in microseconds."
|
|
||||||
reqDur := NewSummary(opts)
|
|
||||||
if err := Register(reqDur); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
reqDur = are.ExistingCollector.(Summary)
|
|
||||||
} else {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Name = "request_size_bytes"
|
|
||||||
opts.Help = "The HTTP request sizes in bytes."
|
|
||||||
reqSz := NewSummary(opts)
|
|
||||||
if err := Register(reqSz); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
reqSz = are.ExistingCollector.(Summary)
|
|
||||||
} else {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Name = "response_size_bytes"
|
|
||||||
opts.Help = "The HTTP response sizes in bytes."
|
|
||||||
resSz := NewSummary(opts)
|
|
||||||
if err := Register(resSz); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
resSz = are.ExistingCollector.(Summary)
|
|
||||||
} else {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
delegate := &responseWriterDelegator{ResponseWriter: w}
|
|
||||||
out := computeApproximateRequestSize(r)
|
|
||||||
|
|
||||||
_, cn := w.(http.CloseNotifier)
|
|
||||||
_, fl := w.(http.Flusher)
|
|
||||||
_, hj := w.(http.Hijacker)
|
|
||||||
_, rf := w.(io.ReaderFrom)
|
|
||||||
var rw http.ResponseWriter
|
|
||||||
if cn && fl && hj && rf {
|
|
||||||
rw = &fancyResponseWriterDelegator{delegate}
|
|
||||||
} else {
|
|
||||||
rw = delegate
|
|
||||||
}
|
|
||||||
handlerFunc(rw, r)
|
|
||||||
|
|
||||||
elapsed := float64(time.Since(now)) / float64(time.Microsecond)
|
|
||||||
|
|
||||||
method := sanitizeMethod(r.Method)
|
|
||||||
code := sanitizeCode(delegate.status)
|
|
||||||
reqCnt.WithLabelValues(method, code).Inc()
|
|
||||||
reqDur.Observe(elapsed)
|
|
||||||
resSz.Observe(float64(delegate.written))
|
|
||||||
reqSz.Observe(float64(<-out))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func computeApproximateRequestSize(r *http.Request) <-chan int {
|
|
||||||
// Get URL length in current goroutine for avoiding a race condition.
|
|
||||||
// HandlerFunc that runs in parallel may modify the URL.
|
|
||||||
s := 0
|
|
||||||
if r.URL != nil {
|
|
||||||
s += len(r.URL.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
out := make(chan int, 1)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
s += len(r.Method)
|
|
||||||
s += len(r.Proto)
|
|
||||||
for name, values := range r.Header {
|
|
||||||
s += len(name)
|
|
||||||
for _, value := range values {
|
|
||||||
s += len(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s += len(r.Host)
|
|
||||||
|
|
||||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
|
||||||
|
|
||||||
if r.ContentLength != -1 {
|
|
||||||
s += int(r.ContentLength)
|
|
||||||
}
|
|
||||||
out <- s
|
|
||||||
close(out)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
type responseWriterDelegator struct {
|
|
||||||
http.ResponseWriter
|
|
||||||
|
|
||||||
status int
|
|
||||||
written int64
|
|
||||||
wroteHeader bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
|
||||||
r.status = code
|
|
||||||
r.wroteHeader = true
|
|
||||||
r.ResponseWriter.WriteHeader(code)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
|
||||||
if !r.wroteHeader {
|
|
||||||
r.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
n, err := r.ResponseWriter.Write(b)
|
|
||||||
r.written += int64(n)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type fancyResponseWriterDelegator struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
|
|
||||||
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
|
||||||
//remove support from client_golang yet.
|
|
||||||
return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fancyResponseWriterDelegator) Flush() {
|
|
||||||
f.ResponseWriter.(http.Flusher).Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
|
||||||
return f.ResponseWriter.(http.Hijacker).Hijack()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
|
|
||||||
if !f.wroteHeader {
|
|
||||||
f.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
|
|
||||||
f.written += n
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func sanitizeMethod(m string) string {
|
|
||||||
switch m {
|
|
||||||
case "GET", "get":
|
|
||||||
return "get"
|
|
||||||
case "PUT", "put":
|
|
||||||
return "put"
|
|
||||||
case "HEAD", "head":
|
|
||||||
return "head"
|
|
||||||
case "POST", "post":
|
|
||||||
return "post"
|
|
||||||
case "DELETE", "delete":
|
|
||||||
return "delete"
|
|
||||||
case "CONNECT", "connect":
|
|
||||||
return "connect"
|
|
||||||
case "OPTIONS", "options":
|
|
||||||
return "options"
|
|
||||||
case "NOTIFY", "notify":
|
|
||||||
return "notify"
|
|
||||||
default:
|
|
||||||
return strings.ToLower(m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sanitizeCode(s int) string {
|
|
||||||
switch s {
|
|
||||||
case 100:
|
|
||||||
return "100"
|
|
||||||
case 101:
|
|
||||||
return "101"
|
|
||||||
|
|
||||||
case 200:
|
|
||||||
return "200"
|
|
||||||
case 201:
|
|
||||||
return "201"
|
|
||||||
case 202:
|
|
||||||
return "202"
|
|
||||||
case 203:
|
|
||||||
return "203"
|
|
||||||
case 204:
|
|
||||||
return "204"
|
|
||||||
case 205:
|
|
||||||
return "205"
|
|
||||||
case 206:
|
|
||||||
return "206"
|
|
||||||
|
|
||||||
case 300:
|
|
||||||
return "300"
|
|
||||||
case 301:
|
|
||||||
return "301"
|
|
||||||
case 302:
|
|
||||||
return "302"
|
|
||||||
case 304:
|
|
||||||
return "304"
|
|
||||||
case 305:
|
|
||||||
return "305"
|
|
||||||
case 307:
|
|
||||||
return "307"
|
|
||||||
|
|
||||||
case 400:
|
|
||||||
return "400"
|
|
||||||
case 401:
|
|
||||||
return "401"
|
|
||||||
case 402:
|
|
||||||
return "402"
|
|
||||||
case 403:
|
|
||||||
return "403"
|
|
||||||
case 404:
|
|
||||||
return "404"
|
|
||||||
case 405:
|
|
||||||
return "405"
|
|
||||||
case 406:
|
|
||||||
return "406"
|
|
||||||
case 407:
|
|
||||||
return "407"
|
|
||||||
case 408:
|
|
||||||
return "408"
|
|
||||||
case 409:
|
|
||||||
return "409"
|
|
||||||
case 410:
|
|
||||||
return "410"
|
|
||||||
case 411:
|
|
||||||
return "411"
|
|
||||||
case 412:
|
|
||||||
return "412"
|
|
||||||
case 413:
|
|
||||||
return "413"
|
|
||||||
case 414:
|
|
||||||
return "414"
|
|
||||||
case 415:
|
|
||||||
return "415"
|
|
||||||
case 416:
|
|
||||||
return "416"
|
|
||||||
case 417:
|
|
||||||
return "417"
|
|
||||||
case 418:
|
|
||||||
return "418"
|
|
||||||
|
|
||||||
case 500:
|
|
||||||
return "500"
|
|
||||||
case 501:
|
|
||||||
return "501"
|
|
||||||
case 502:
|
|
||||||
return "502"
|
|
||||||
case 503:
|
|
||||||
return "503"
|
|
||||||
case 504:
|
|
||||||
return "504"
|
|
||||||
case 505:
|
|
||||||
return "505"
|
|
||||||
|
|
||||||
case 428:
|
|
||||||
return "428"
|
|
||||||
case 429:
|
|
||||||
return "429"
|
|
||||||
case 431:
|
|
||||||
return "431"
|
|
||||||
case 511:
|
|
||||||
return "511"
|
|
||||||
|
|
||||||
default:
|
|
||||||
return strconv.Itoa(s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
|
||||||
func gzipAccepted(header http.Header) bool {
|
|
||||||
a := header.Get(acceptEncodingHeader)
|
|
||||||
parts := strings.Split(a, ",")
|
|
||||||
for _, part := range parts {
|
|
||||||
part = strings.TrimSpace(part)
|
|
||||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// httpError removes any content-encoding header and then calls http.Error with
|
|
||||||
// the provided error and http.StatusInternalServerErrer. Error contents is
|
|
||||||
// supposed to be uncompressed plain text. However, same as with a plain
|
|
||||||
// http.Error, any header settings will be void if the header has already been
|
|
||||||
// sent. The error message will still be written to the writer, but it will
|
|
||||||
// probably be of limited use.
|
|
||||||
func httpError(rsp http.ResponseWriter, err error) {
|
|
||||||
rsp.Header().Del(contentEncodingHeader)
|
|
||||||
http.Error(
|
|
||||||
rsp,
|
|
||||||
"An error has occurred while serving metrics:\n\n"+err.Error(),
|
|
||||||
http.StatusInternalServerError,
|
|
||||||
)
|
|
||||||
}
|
|
|
@ -16,8 +16,6 @@ package prometheus
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/prometheus/procfs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type processCollector struct {
|
type processCollector struct {
|
||||||
|
@ -59,20 +57,9 @@ type ProcessCollectorOpts struct {
|
||||||
// collector for the current process with an empty namespace string and no error
|
// collector for the current process with an empty namespace string and no error
|
||||||
// reporting.
|
// reporting.
|
||||||
//
|
//
|
||||||
// Currently, the collector depends on a Linux-style proc filesystem and
|
// The collector only works on operating systems with a Linux-style proc
|
||||||
// therefore only exports metrics for Linux.
|
// filesystem and on Microsoft Windows. On other operating systems, it will not
|
||||||
//
|
// collect any metrics.
|
||||||
// Note: An older version of this function had the following signature:
|
|
||||||
//
|
|
||||||
// NewProcessCollector(pid int, namespace string) Collector
|
|
||||||
//
|
|
||||||
// Most commonly, it was called as
|
|
||||||
//
|
|
||||||
// NewProcessCollector(os.Getpid(), "")
|
|
||||||
//
|
|
||||||
// The following call of the current version is equivalent to the above:
|
|
||||||
//
|
|
||||||
// NewProcessCollector(ProcessCollectorOpts{})
|
|
||||||
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||||
ns := ""
|
ns := ""
|
||||||
if len(opts.Namespace) > 0 {
|
if len(opts.Namespace) > 0 {
|
||||||
|
@ -126,7 +113,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up process metric collection if supported by the runtime.
|
// Set up process metric collection if supported by the runtime.
|
||||||
if _, err := procfs.NewStat(); err == nil {
|
if canCollectProcess() {
|
||||||
c.collectFn = c.processCollect
|
c.collectFn = c.processCollect
|
||||||
} else {
|
} else {
|
||||||
c.collectFn = func(ch chan<- Metric) {
|
c.collectFn = func(ch chan<- Metric) {
|
||||||
|
@ -153,46 +140,6 @@ func (c *processCollector) Collect(ch chan<- Metric) {
|
||||||
c.collectFn(ch)
|
c.collectFn(ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
|
||||||
pid, err := c.pidFn()
|
|
||||||
if err != nil {
|
|
||||||
c.reportError(ch, nil, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := procfs.NewProc(pid)
|
|
||||||
if err != nil {
|
|
||||||
c.reportError(ch, nil, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if stat, err := p.NewStat(); err == nil {
|
|
||||||
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
|
|
||||||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
|
|
||||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
|
||||||
if startTime, err := stat.StartTime(); err == nil {
|
|
||||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
|
||||||
} else {
|
|
||||||
c.reportError(ch, c.startTime, err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
c.reportError(ch, nil, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if fds, err := p.FileDescriptorsLen(); err == nil {
|
|
||||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
|
||||||
} else {
|
|
||||||
c.reportError(ch, c.openFDs, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if limits, err := p.NewLimits(); err == nil {
|
|
||||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
|
||||||
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
|
|
||||||
} else {
|
|
||||||
c.reportError(ch, nil, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
|
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
|
||||||
if !c.reportErrors {
|
if !c.reportErrors {
|
||||||
return
|
return
|
||||||
|
|
65
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
Normal file
65
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/procfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func canCollectProcess() bool {
|
||||||
|
_, err := procfs.NewDefaultFS()
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||||
|
pid, err := c.pidFn()
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := procfs.NewProc(pid)
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if stat, err := p.Stat(); err == nil {
|
||||||
|
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
|
||||||
|
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
|
||||||
|
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
||||||
|
if startTime, err := stat.StartTime(); err == nil {
|
||||||
|
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, c.startTime, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fds, err := p.FileDescriptorsLen(); err == nil {
|
||||||
|
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, c.openFDs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if limits, err := p.Limits(); err == nil {
|
||||||
|
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
||||||
|
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
}
|
||||||
|
}
|
112
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
Normal file
112
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
func canCollectProcess() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
modpsapi = syscall.NewLazyDLL("psapi.dll")
|
||||||
|
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
|
||||||
|
procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
|
||||||
|
procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
|
||||||
|
)
|
||||||
|
|
||||||
|
type processMemoryCounters struct {
|
||||||
|
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
|
||||||
|
_ uint32
|
||||||
|
PageFaultCount uint32
|
||||||
|
PeakWorkingSetSize uint64
|
||||||
|
WorkingSetSize uint64
|
||||||
|
QuotaPeakPagedPoolUsage uint64
|
||||||
|
QuotaPagedPoolUsage uint64
|
||||||
|
QuotaPeakNonPagedPoolUsage uint64
|
||||||
|
QuotaNonPagedPoolUsage uint64
|
||||||
|
PagefileUsage uint64
|
||||||
|
PeakPagefileUsage uint64
|
||||||
|
PrivateUsage uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
|
||||||
|
mem := processMemoryCounters{}
|
||||||
|
r1, _, err := procGetProcessMemoryInfo.Call(
|
||||||
|
uintptr(handle),
|
||||||
|
uintptr(unsafe.Pointer(&mem)),
|
||||||
|
uintptr(unsafe.Sizeof(mem)),
|
||||||
|
)
|
||||||
|
if r1 != 1 {
|
||||||
|
return mem, err
|
||||||
|
} else {
|
||||||
|
return mem, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProcessHandleCount(handle windows.Handle) (uint32, error) {
|
||||||
|
var count uint32
|
||||||
|
r1, _, err := procGetProcessHandleCount.Call(
|
||||||
|
uintptr(handle),
|
||||||
|
uintptr(unsafe.Pointer(&count)),
|
||||||
|
)
|
||||||
|
if r1 != 1 {
|
||||||
|
return 0, err
|
||||||
|
} else {
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||||
|
h, err := windows.GetCurrentProcess()
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var startTime, exitTime, kernelTime, userTime windows.Filetime
|
||||||
|
err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
|
||||||
|
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
|
||||||
|
|
||||||
|
mem, err := getProcessMemoryInfo(h)
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
|
||||||
|
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
|
||||||
|
|
||||||
|
handles, err := getProcessHandleCount(h)
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
|
||||||
|
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
|
||||||
|
}
|
||||||
|
|
||||||
|
func fileTimeToSeconds(ft windows.Filetime) float64 {
|
||||||
|
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
|
||||||
|
}
|
|
@ -84,10 +84,32 @@ func Handler() http.Handler {
|
||||||
// instrumentation. Use the InstrumentMetricHandler function to apply the same
|
// instrumentation. Use the InstrumentMetricHandler function to apply the same
|
||||||
// kind of instrumentation as it is used by the Handler function.
|
// kind of instrumentation as it is used by the Handler function.
|
||||||
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||||
var inFlightSem chan struct{}
|
var (
|
||||||
|
inFlightSem chan struct{}
|
||||||
|
errCnt = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "promhttp_metric_handler_errors_total",
|
||||||
|
Help: "Total number of internal errors encountered by the promhttp metric handler.",
|
||||||
|
},
|
||||||
|
[]string{"cause"},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if opts.MaxRequestsInFlight > 0 {
|
if opts.MaxRequestsInFlight > 0 {
|
||||||
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
|
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
|
||||||
}
|
}
|
||||||
|
if opts.Registry != nil {
|
||||||
|
// Initialize all possibilites that can occur below.
|
||||||
|
errCnt.WithLabelValues("gathering")
|
||||||
|
errCnt.WithLabelValues("encoding")
|
||||||
|
if err := opts.Registry.Register(errCnt); err != nil {
|
||||||
|
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
||||||
|
errCnt = are.ExistingCollector.(*prometheus.CounterVec)
|
||||||
|
} else {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
||||||
if inFlightSem != nil {
|
if inFlightSem != nil {
|
||||||
|
@ -106,6 +128,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||||
if opts.ErrorLog != nil {
|
if opts.ErrorLog != nil {
|
||||||
opts.ErrorLog.Println("error gathering metrics:", err)
|
opts.ErrorLog.Println("error gathering metrics:", err)
|
||||||
}
|
}
|
||||||
|
errCnt.WithLabelValues("gathering").Inc()
|
||||||
switch opts.ErrorHandling {
|
switch opts.ErrorHandling {
|
||||||
case PanicOnError:
|
case PanicOnError:
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -146,6 +169,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||||
if opts.ErrorLog != nil {
|
if opts.ErrorLog != nil {
|
||||||
opts.ErrorLog.Println("error encoding and sending metric family:", err)
|
opts.ErrorLog.Println("error encoding and sending metric family:", err)
|
||||||
}
|
}
|
||||||
|
errCnt.WithLabelValues("encoding").Inc()
|
||||||
switch opts.ErrorHandling {
|
switch opts.ErrorHandling {
|
||||||
case PanicOnError:
|
case PanicOnError:
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -236,9 +260,12 @@ const (
|
||||||
// Ignore errors and try to serve as many metrics as possible. However,
|
// Ignore errors and try to serve as many metrics as possible. However,
|
||||||
// if no metrics can be served, serve an HTTP status code 500 and the
|
// if no metrics can be served, serve an HTTP status code 500 and the
|
||||||
// last error message in the body. Only use this in deliberate "best
|
// last error message in the body. Only use this in deliberate "best
|
||||||
// effort" metrics collection scenarios. It is recommended to at least
|
// effort" metrics collection scenarios. In this case, it is highly
|
||||||
// log errors (by providing an ErrorLog in HandlerOpts) to not mask
|
// recommended to provide other means of detecting errors: By setting an
|
||||||
// errors completely.
|
// ErrorLog in HandlerOpts, the errors are logged. By providing a
|
||||||
|
// Registry in HandlerOpts, the exposed metrics include an error counter
|
||||||
|
// "promhttp_metric_handler_errors_total", which can be used for
|
||||||
|
// alerts.
|
||||||
ContinueOnError
|
ContinueOnError
|
||||||
// Panic upon the first error encountered (useful for "crash only" apps).
|
// Panic upon the first error encountered (useful for "crash only" apps).
|
||||||
PanicOnError
|
PanicOnError
|
||||||
|
@ -261,6 +288,18 @@ type HandlerOpts struct {
|
||||||
// logged regardless of the configured ErrorHandling provided ErrorLog
|
// logged regardless of the configured ErrorHandling provided ErrorLog
|
||||||
// is not nil.
|
// is not nil.
|
||||||
ErrorHandling HandlerErrorHandling
|
ErrorHandling HandlerErrorHandling
|
||||||
|
// If Registry is not nil, it is used to register a metric
|
||||||
|
// "promhttp_metric_handler_errors_total", partitioned by "cause". A
|
||||||
|
// failed registration causes a panic. Note that this error counter is
|
||||||
|
// different from the instrumentation you get from the various
|
||||||
|
// InstrumentHandler... helpers. It counts errors that don't necessarily
|
||||||
|
// result in a non-2xx HTTP status code. There are two typical cases:
|
||||||
|
// (1) Encoding errors that only happen after streaming of the HTTP body
|
||||||
|
// has already started (and the status code 200 has been sent). This
|
||||||
|
// should only happen with custom collectors. (2) Collection errors with
|
||||||
|
// no effect on the HTTP status code because ErrorHandling is set to
|
||||||
|
// ContinueOnError.
|
||||||
|
Registry prometheus.Registerer
|
||||||
// If DisableCompression is true, the handler will never compress the
|
// If DisableCompression is true, the handler will never compress the
|
||||||
// response, even if requested by the client.
|
// response, even if requested by the client.
|
||||||
DisableCompression bool
|
DisableCompression bool
|
||||||
|
|
|
@ -325,10 +325,18 @@ func (r *Registry) Register(c Collector) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if existing, exists := r.collectorsByID[collectorID]; exists {
|
if existing, exists := r.collectorsByID[collectorID]; exists {
|
||||||
|
switch e := existing.(type) {
|
||||||
|
case *wrappingCollector:
|
||||||
return AlreadyRegisteredError{
|
return AlreadyRegisteredError{
|
||||||
ExistingCollector: existing,
|
ExistingCollector: e.unwrapRecursively(),
|
||||||
NewCollector: c,
|
NewCollector: c,
|
||||||
}
|
}
|
||||||
|
default:
|
||||||
|
return AlreadyRegisteredError{
|
||||||
|
ExistingCollector: e,
|
||||||
|
NewCollector: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// If the collectorID is new, but at least one of the descs existed
|
// If the collectorID is new, but at least one of the descs existed
|
||||||
// before, we are in trouble.
|
// before, we are in trouble.
|
||||||
|
|
|
@ -39,7 +39,7 @@ const quantileLabel = "quantile"
|
||||||
// A typical use-case is the observation of request latencies. By default, a
|
// A typical use-case is the observation of request latencies. By default, a
|
||||||
// Summary provides the median, the 90th and the 99th percentile of the latency
|
// Summary provides the median, the 90th and the 99th percentile of the latency
|
||||||
// as rank estimations. However, the default behavior will change in the
|
// as rank estimations. However, the default behavior will change in the
|
||||||
// upcoming v0.10 of the library. There will be no rank estimations at all by
|
// upcoming v1.0.0 of the library. There will be no rank estimations at all by
|
||||||
// default. For a sane transition, it is recommended to set the desired rank
|
// default. For a sane transition, it is recommended to set the desired rank
|
||||||
// estimations explicitly.
|
// estimations explicitly.
|
||||||
//
|
//
|
||||||
|
@ -58,16 +58,8 @@ type Summary interface {
|
||||||
Observe(float64)
|
Observe(float64)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefObjectives are the default Summary quantile values.
|
var errQuantileLabelNotAllowed = fmt.Errorf(
|
||||||
//
|
|
||||||
// Deprecated: DefObjectives will not be used as the default objectives in
|
|
||||||
// v0.10 of the library. The default Summary will have no quantiles then.
|
|
||||||
var (
|
|
||||||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
|
||||||
|
|
||||||
errQuantileLabelNotAllowed = fmt.Errorf(
|
|
||||||
"%q is not allowed as label name in summaries", quantileLabel,
|
"%q is not allowed as label name in summaries", quantileLabel,
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Default values for SummaryOpts.
|
// Default values for SummaryOpts.
|
||||||
|
@ -86,7 +78,7 @@ const (
|
||||||
// mandatory to set Name to a non-empty string. While all other fields are
|
// mandatory to set Name to a non-empty string. While all other fields are
|
||||||
// optional and can safely be left at their zero value, it is recommended to set
|
// optional and can safely be left at their zero value, it is recommended to set
|
||||||
// a help string and to explicitly set the Objectives field to the desired value
|
// a help string and to explicitly set the Objectives field to the desired value
|
||||||
// as the default value will change in the upcoming v0.10 of the library.
|
// as the default value will change in the upcoming v1.0.0 of the library.
|
||||||
type SummaryOpts struct {
|
type SummaryOpts struct {
|
||||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||||
// name of the Summary (created by joining these components with
|
// name of the Summary (created by joining these components with
|
||||||
|
@ -123,14 +115,8 @@ type SummaryOpts struct {
|
||||||
// Objectives defines the quantile rank estimates with their respective
|
// Objectives defines the quantile rank estimates with their respective
|
||||||
// absolute error. If Objectives[q] = e, then the value reported for q
|
// absolute error. If Objectives[q] = e, then the value reported for q
|
||||||
// will be the φ-quantile value for some φ between q-e and q+e. The
|
// will be the φ-quantile value for some φ between q-e and q+e. The
|
||||||
// default value is DefObjectives. It is used if Objectives is left at
|
// default value is an empty map, resulting in a summary without
|
||||||
// its zero value (i.e. nil). To create a Summary without Objectives,
|
// quantiles.
|
||||||
// set it to an empty map (i.e. map[float64]float64{}).
|
|
||||||
//
|
|
||||||
// Note that the current value of DefObjectives is deprecated. It will
|
|
||||||
// be replaced by an empty map in v0.10 of the library. Please
|
|
||||||
// explicitly set Objectives to the desired value to avoid problems
|
|
||||||
// during the transition.
|
|
||||||
Objectives map[float64]float64
|
Objectives map[float64]float64
|
||||||
|
|
||||||
// MaxAge defines the duration for which an observation stays relevant
|
// MaxAge defines the duration for which an observation stays relevant
|
||||||
|
@ -199,7 +185,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.Objectives == nil {
|
if opts.Objectives == nil {
|
||||||
opts.Objectives = DefObjectives
|
opts.Objectives = map[float64]float64{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.MaxAge < 0 {
|
if opts.MaxAge < 0 {
|
||||||
|
|
|
@ -32,6 +32,12 @@ import (
|
||||||
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
||||||
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
||||||
//
|
//
|
||||||
|
// Conflicts between Collectors registered through the original Registerer with
|
||||||
|
// Collectors registered through the wrapping Registerer will still be
|
||||||
|
// detected. Any AlreadyRegisteredError returned by the Register method of
|
||||||
|
// either Registerer will contain the ExistingCollector in the form it was
|
||||||
|
// provided to the respective registry.
|
||||||
|
//
|
||||||
// The Collector example demonstrates a use of WrapRegistererWith.
|
// The Collector example demonstrates a use of WrapRegistererWith.
|
||||||
func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||||
return &wrappingRegisterer{
|
return &wrappingRegisterer{
|
||||||
|
@ -54,6 +60,12 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||||
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
|
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
|
||||||
// fact, those metrics are already prefixed with “go_” or “process_”,
|
// fact, those metrics are already prefixed with “go_” or “process_”,
|
||||||
// respectively.)
|
// respectively.)
|
||||||
|
//
|
||||||
|
// Conflicts between Collectors registered through the original Registerer with
|
||||||
|
// Collectors registered through the wrapping Registerer will still be
|
||||||
|
// detected. Any AlreadyRegisteredError returned by the Register method of
|
||||||
|
// either Registerer will contain the ExistingCollector in the form it was
|
||||||
|
// provided to the respective registry.
|
||||||
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
|
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
|
||||||
return &wrappingRegisterer{
|
return &wrappingRegisterer{
|
||||||
wrappedRegisterer: reg,
|
wrappedRegisterer: reg,
|
||||||
|
@ -123,6 +135,15 @@ func (c *wrappingCollector) Describe(ch chan<- *Desc) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *wrappingCollector) unwrapRecursively() Collector {
|
||||||
|
switch wc := c.wrappedCollector.(type) {
|
||||||
|
case *wrappingCollector:
|
||||||
|
return wc.unwrapRecursively()
|
||||||
|
default:
|
||||||
|
return wc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type wrappingMetric struct {
|
type wrappingMetric struct {
|
||||||
wrappedMetric Metric
|
wrappedMetric Metric
|
||||||
prefix string
|
prefix string
|
||||||
|
|
|
@ -150,7 +150,13 @@ func (t *Time) UnmarshalJSON(b []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the value was something like -0.1 the negative is lost in the
|
||||||
|
// parsing because of the leading zero, this ensures that we capture it.
|
||||||
|
if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
|
||||||
|
*t = Time(v+va) * -1
|
||||||
|
} else {
|
||||||
*t = Time(v + va)
|
*t = Time(v + va)
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("invalid time %q", string(b))
|
return fmt.Errorf("invalid time %q", string(b))
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
include Makefile.common
|
include Makefile.common
|
||||||
|
|
||||||
%/.unpacked: %.ttar
|
%/.unpacked: %.ttar
|
||||||
|
@echo ">> extracting fixtures"
|
||||||
./ttar -C $(dir $*) -x -f $*.ttar
|
./ttar -C $(dir $*) -x -f $*.ttar
|
||||||
touch $@
|
touch $@
|
||||||
|
|
||||||
|
|
|
@ -69,7 +69,7 @@ else
|
||||||
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
|
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
PROMU_VERSION ?= 0.3.0
|
PROMU_VERSION ?= 0.4.0
|
||||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# procfs
|
# procfs
|
||||||
|
|
||||||
This procfs package provides functions to retrieve system, kernel and process
|
This procfs package provides functions to retrieve system, kernel and process
|
||||||
metrics from the pseudo-filesystem proc.
|
metrics from the pseudo-filesystems /proc and /sys.
|
||||||
|
|
||||||
*WARNING*: This package is a work in progress. Its API may still break in
|
*WARNING*: This package is a work in progress. Its API may still break in
|
||||||
backwards-incompatible ways without warnings. Use it at your own risk.
|
backwards-incompatible ways without warnings. Use it at your own risk.
|
||||||
|
@ -9,3 +9,45 @@ backwards-incompatible ways without warnings. Use it at your own risk.
|
||||||
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
|
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
|
||||||
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
|
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
|
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
The procfs library is organized by packages based on whether the gathered data is coming from
|
||||||
|
/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, /sys, or both. For example, current cpu statistics are gathered from
|
||||||
|
`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
|
||||||
|
point is initialized, and then the stat information is read.
|
||||||
|
|
||||||
|
```go
|
||||||
|
fs, err := procfs.NewFS("/proc")
|
||||||
|
stats, err := fs.Stat()
|
||||||
|
```
|
||||||
|
|
||||||
|
Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
|
||||||
|
|
||||||
|
```go
|
||||||
|
fs, err := blockdevice.NewFS("/proc", "/sys")
|
||||||
|
stats, err := fs.ProcDiskstats()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Building and Testing
|
||||||
|
|
||||||
|
The procfs library is normally built as part of another application. However, when making
|
||||||
|
changes to the library, the `make test` command can be used to run the API test suite.
|
||||||
|
|
||||||
|
### Updating Test Fixtures
|
||||||
|
|
||||||
|
The procfs library includes a set of test fixtures which include many example files from
|
||||||
|
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
|
||||||
|
which is extracted automatically during testing. To add/update the test fixtures, first
|
||||||
|
ensure the `fixtures` directory is up to date by removing the existing directory and then
|
||||||
|
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rm -rf fixtures
|
||||||
|
make test
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, make the required changes to the extracted files in the `fixtures` directory. When
|
||||||
|
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
|
||||||
|
based on the updated `fixtures` directory. And finally, verify the changes using
|
||||||
|
`git diff fixtures.ttar`.
|
||||||
|
|
|
@ -31,18 +31,8 @@ type BuddyInfo struct {
|
||||||
Sizes []float64
|
Sizes []float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBuddyInfo reads the buddyinfo statistics.
|
|
||||||
func NewBuddyInfo() ([]BuddyInfo, error) {
|
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewBuddyInfo()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
||||||
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
|
func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
|
||||||
file, err := os.Open(fs.proc.Path("buddyinfo"))
|
file, err := os.Open(fs.proc.Path("buddyinfo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -75,13 +75,13 @@ Max realtime timeout unlimited unlimited us
|
||||||
Mode: 644
|
Mode: 644
|
||||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
Path: fixtures/proc/26231/mountstats
|
Path: fixtures/proc/26231/mountstats
|
||||||
Lines: 19
|
Lines: 20
|
||||||
device rootfs mounted on / with fstype rootfs
|
device rootfs mounted on / with fstype rootfs
|
||||||
device sysfs mounted on /sys with fstype sysfs
|
device sysfs mounted on /sys with fstype sysfs
|
||||||
device proc mounted on /proc with fstype proc
|
device proc mounted on /proc with fstype proc
|
||||||
device /dev/sda1 mounted on / with fstype ext4
|
device /dev/sda1 mounted on / with fstype ext4
|
||||||
device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
|
device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
|
||||||
opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none
|
opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none
|
||||||
age: 13968
|
age: 13968
|
||||||
caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
|
caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
|
||||||
nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
|
nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
|
||||||
|
@ -94,6 +94,7 @@ device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=
|
||||||
NULL: 0 0 0 0 0 0 0 0
|
NULL: 0 0 0 0 0 0 0 0
|
||||||
READ: 1298 1298 0 207680 1210292152 6 79386 79407
|
READ: 1298 1298 0 207680 1210292152 6 79386 79407
|
||||||
WRITE: 0 0 0 0 0 0 0 0
|
WRITE: 0 0 0 0 0 0 0 0
|
||||||
|
ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717
|
||||||
|
|
||||||
Mode: 644
|
Mode: 644
|
||||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
@ -125,6 +126,63 @@ Lines: 1
|
||||||
26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
|
26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
|
||||||
Mode: 644
|
Mode: 644
|
||||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/proc/26231/status
|
||||||
|
Lines: 53
|
||||||
|
|
||||||
|
Name: prometheus
|
||||||
|
Umask: 0022
|
||||||
|
State: S (sleeping)
|
||||||
|
Tgid: 1
|
||||||
|
Ngid: 0
|
||||||
|
Pid: 1
|
||||||
|
PPid: 0
|
||||||
|
TracerPid: 0
|
||||||
|
Uid: 0 0 0 0
|
||||||
|
Gid: 0 0 0 0
|
||||||
|
FDSize: 128
|
||||||
|
Groups:
|
||||||
|
NStgid: 1
|
||||||
|
NSpid: 1
|
||||||
|
NSpgid: 1
|
||||||
|
NSsid: 1
|
||||||
|
VmPeak: 58472 kB
|
||||||
|
VmSize: 58440 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmPin: 0 kB
|
||||||
|
VmHWM: 8028 kB
|
||||||
|
VmRSS: 6716 kB
|
||||||
|
RssAnon: 2092 kB
|
||||||
|
RssFile: 4624 kB
|
||||||
|
RssShmem: 0 kB
|
||||||
|
VmData: 2580 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 948 kB
|
||||||
|
VmLib: 6816 kB
|
||||||
|
VmPTE: 128 kB
|
||||||
|
VmPMD: 12 kB
|
||||||
|
VmSwap: 660 kB
|
||||||
|
HugetlbPages: 0 kB
|
||||||
|
Threads: 1
|
||||||
|
SigQ: 8/63965
|
||||||
|
SigPnd: 0000000000000000
|
||||||
|
ShdPnd: 0000000000000000
|
||||||
|
SigBlk: 7be3c0fe28014a03
|
||||||
|
SigIgn: 0000000000001000
|
||||||
|
SigCgt: 00000001800004ec
|
||||||
|
CapInh: 0000000000000000
|
||||||
|
CapPrm: 0000003fffffffff
|
||||||
|
CapEff: 0000003fffffffff
|
||||||
|
CapBnd: 0000003fffffffff
|
||||||
|
CapAmb: 0000000000000000
|
||||||
|
Seccomp: 0
|
||||||
|
Cpus_allowed: ff
|
||||||
|
Cpus_allowed_list: 0-7
|
||||||
|
Mems_allowed: 00000000,00000001
|
||||||
|
Mems_allowed_list: 0
|
||||||
|
voluntary_ctxt_switches: 4742839
|
||||||
|
nonvoluntary_ctxt_switches: 1727500
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
Directory: fixtures/proc/26232
|
Directory: fixtures/proc/26232
|
||||||
Mode: 755
|
Mode: 755
|
||||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
@ -402,6 +460,26 @@ proc4 2 2 10853
|
||||||
proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
Mode: 644
|
Mode: 644
|
||||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/proc/net/unix
|
||||||
|
Lines: 6
|
||||||
|
Num RefCount Protocol Flags Type St Inode Path
|
||||||
|
0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432
|
||||||
|
0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control
|
||||||
|
0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log
|
||||||
|
0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432
|
||||||
|
0000000000000000: 00000003 00000000 00000000 0001 03 5091797
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/proc/net/unix_without_inode
|
||||||
|
Lines: 6
|
||||||
|
Num RefCount Protocol Flags Type St Path
|
||||||
|
0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432
|
||||||
|
0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control
|
||||||
|
0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log
|
||||||
|
0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432
|
||||||
|
0000000000000000: 00000003 00000000 00000000 0001 03
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
Path: fixtures/proc/net/xfrm_stat
|
Path: fixtures/proc/net/xfrm_stat
|
||||||
Lines: 28
|
Lines: 28
|
||||||
XfrmInError 1
|
XfrmInError 1
|
||||||
|
@ -1107,6 +1185,22 @@ Mode: 644
|
||||||
Directory: fixtures/sys/devices/system
|
Directory: fixtures/sys/devices/system
|
||||||
Mode: 775
|
Mode: 775
|
||||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/sys/devices/system/clocksource
|
||||||
|
Mode: 775
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Directory: fixtures/sys/devices/system/clocksource/clocksource0
|
||||||
|
Mode: 775
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource
|
||||||
|
Lines: 1
|
||||||
|
tsc hpet acpi_pm
|
||||||
|
Mode: 444
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource
|
||||||
|
Lines: 1
|
||||||
|
tsc
|
||||||
|
Mode: 644
|
||||||
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
Directory: fixtures/sys/devices/system/cpu
|
Directory: fixtures/sys/devices/system/cpu
|
||||||
Mode: 775
|
Mode: 775
|
||||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
|
|
@ -26,8 +26,14 @@ type FS struct {
|
||||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||||
const DefaultMountPoint = fs.DefaultProcMountPoint
|
const DefaultMountPoint = fs.DefaultProcMountPoint
|
||||||
|
|
||||||
|
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
|
||||||
|
// It will error if the mount point directory can't be read or is a file.
|
||||||
|
func NewDefaultFS() (FS, error) {
|
||||||
|
return NewFS(DefaultMountPoint)
|
||||||
|
}
|
||||||
|
|
||||||
// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
|
// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
|
||||||
// if the mount point dirctory can't be read or is a file.
|
// if the mount point directory can't be read or is a file.
|
||||||
func NewFS(mountPoint string) (FS, error) {
|
func NewFS(mountPoint string) (FS, error) {
|
||||||
fs, err := fs.NewFS(mountPoint)
|
fs, err := fs.NewFS(mountPoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -62,18 +62,8 @@ type IPVSBackendStatus struct {
|
||||||
Weight uint64
|
Weight uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIPVSStats reads the IPVS statistics.
|
// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
|
||||||
func NewIPVSStats() (IPVSStats, error) {
|
func (fs FS) IPVSStats() (IPVSStats, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
|
||||||
if err != nil {
|
|
||||||
return IPVSStats{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewIPVSStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
|
|
||||||
func (fs FS) NewIPVSStats() (IPVSStats, error) {
|
|
||||||
file, err := os.Open(fs.proc.Path("net/ip_vs_stats"))
|
file, err := os.Open(fs.proc.Path("net/ip_vs_stats"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return IPVSStats{}, err
|
return IPVSStats{}, err
|
||||||
|
@ -131,18 +121,8 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
|
// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
|
||||||
func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
|
||||||
if err != nil {
|
|
||||||
return []IPVSBackendStatus{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewIPVSBackendStatus()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
|
|
||||||
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
|
||||||
file, err := os.Open(fs.proc.Path("net/ip_vs"))
|
file, err := os.Open(fs.proc.Path("net/ip_vs"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -42,64 +42,64 @@ type MDStat struct {
|
||||||
BlocksSynced int64
|
BlocksSynced int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
|
// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
|
||||||
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
// structs containing the relevant info. More information available here:
|
||||||
mdStatusFilePath := fs.proc.Path("mdstat")
|
// https://raid.wiki.kernel.org/index.php/Mdstat
|
||||||
content, err := ioutil.ReadFile(mdStatusFilePath)
|
func (fs FS) MDStat() ([]MDStat, error) {
|
||||||
|
data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
|
||||||
}
|
}
|
||||||
|
mdstat, err := parseMDStat(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
|
||||||
|
}
|
||||||
|
return mdstat, nil
|
||||||
|
}
|
||||||
|
|
||||||
mdStates := []MDStat{}
|
// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
|
||||||
lines := strings.Split(string(content), "\n")
|
// structs containing the relevant info.
|
||||||
|
func parseMDStat(mdstatData []byte) ([]MDStat, error) {
|
||||||
|
mdStats := []MDStat{}
|
||||||
|
lines := strings.Split(string(mdstatData), "\n")
|
||||||
for i, l := range lines {
|
for i, l := range lines {
|
||||||
if l == "" {
|
if strings.TrimSpace(l) == "" || l[0] == ' ' ||
|
||||||
continue
|
strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
|
||||||
}
|
|
||||||
if l[0] == ' ' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
mainLine := strings.Split(l, " ")
|
deviceFields := strings.Fields(l)
|
||||||
if len(mainLine) < 3 {
|
if len(deviceFields) < 3 {
|
||||||
return mdStates, fmt.Errorf("error parsing mdline: %s", l)
|
return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", l)
|
||||||
}
|
}
|
||||||
mdName := mainLine[0]
|
mdName := deviceFields[0]
|
||||||
activityState := mainLine[2]
|
activityState := deviceFields[2]
|
||||||
|
|
||||||
if len(lines) <= i+3 {
|
if len(lines) <= i+3 {
|
||||||
return mdStates, fmt.Errorf(
|
return mdStats, fmt.Errorf("missing lines for md device %s", mdName)
|
||||||
"error parsing %s: too few lines for md device %s",
|
|
||||||
mdStatusFilePath,
|
|
||||||
mdName,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
active, total, size, err := evalStatusline(lines[i+1])
|
active, total, size, err := evalStatusLine(lines[i+1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// j is the line number of the syncing-line.
|
syncLineIdx := i + 2
|
||||||
j := i + 2
|
|
||||||
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
|
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
|
||||||
j = i + 3
|
syncLineIdx++
|
||||||
}
|
}
|
||||||
|
|
||||||
// If device is syncing at the moment, get the number of currently
|
// If device is recovering/syncing at the moment, get the number of currently
|
||||||
// synced bytes, otherwise that number equals the size of the device.
|
// synced bytes, otherwise that number equals the size of the device.
|
||||||
syncedBlocks := size
|
syncedBlocks := size
|
||||||
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
|
if strings.Contains(lines[syncLineIdx], "recovery") || strings.Contains(lines[syncLineIdx], "resync") {
|
||||||
syncedBlocks, err = evalBuildline(lines[j])
|
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mdStates = append(mdStates, MDStat{
|
mdStats = append(mdStats, MDStat{
|
||||||
Name: mdName,
|
Name: mdName,
|
||||||
ActivityState: activityState,
|
ActivityState: activityState,
|
||||||
DisksActive: active,
|
DisksActive: active,
|
||||||
|
@ -109,10 +109,10 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return mdStates, nil
|
return mdStats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalStatusline(statusline string) (active, total, size int64, err error) {
|
func evalStatusLine(statusline string) (active, total, size int64, err error) {
|
||||||
matches := statuslineRE.FindStringSubmatch(statusline)
|
matches := statuslineRE.FindStringSubmatch(statusline)
|
||||||
if len(matches) != 4 {
|
if len(matches) != 4 {
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
|
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
|
||||||
|
@ -136,7 +136,7 @@ func evalStatusline(statusline string) (active, total, size int64, err error) {
|
||||||
return active, total, size, nil
|
return active, total, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalBuildline(buildline string) (syncedBlocks int64, err error) {
|
func evalRecoveryLine(buildline string) (syncedBlocks int64, err error) {
|
||||||
matches := buildlineRE.FindStringSubmatch(buildline)
|
matches := buildlineRE.FindStringSubmatch(buildline)
|
||||||
if len(matches) != 2 {
|
if len(matches) != 2 {
|
||||||
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
|
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
|
||||||
|
|
|
@ -69,8 +69,8 @@ type MountStats interface {
|
||||||
type MountStatsNFS struct {
|
type MountStatsNFS struct {
|
||||||
// The version of statistics provided.
|
// The version of statistics provided.
|
||||||
StatVersion string
|
StatVersion string
|
||||||
// The optional mountaddr of the NFS mount.
|
// The mount options of the NFS mount.
|
||||||
MountAddress string
|
Opts map[string]string
|
||||||
// The age of the NFS mount.
|
// The age of the NFS mount.
|
||||||
Age time.Duration
|
Age time.Duration
|
||||||
// Statistics related to byte counters for various operations.
|
// Statistics related to byte counters for various operations.
|
||||||
|
@ -181,11 +181,11 @@ type NFSOperationStats struct {
|
||||||
// Number of bytes received for this operation, including RPC headers and payload.
|
// Number of bytes received for this operation, including RPC headers and payload.
|
||||||
BytesReceived uint64
|
BytesReceived uint64
|
||||||
// Duration all requests spent queued for transmission before they were sent.
|
// Duration all requests spent queued for transmission before they were sent.
|
||||||
CumulativeQueueTime time.Duration
|
CumulativeQueueMilliseconds uint64
|
||||||
// Duration it took to get a reply back after the request was transmitted.
|
// Duration it took to get a reply back after the request was transmitted.
|
||||||
CumulativeTotalResponseTime time.Duration
|
CumulativeTotalResponseMilliseconds uint64
|
||||||
// Duration from when a request was enqueued to when it was completely handled.
|
// Duration from when a request was enqueued to when it was completely handled.
|
||||||
CumulativeTotalRequestTime time.Duration
|
CumulativeTotalRequestMilliseconds uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
||||||
|
@ -204,7 +204,7 @@ type NFSTransportStats struct {
|
||||||
// spent waiting for connections to the server to be established.
|
// spent waiting for connections to the server to be established.
|
||||||
ConnectIdleTime uint64
|
ConnectIdleTime uint64
|
||||||
// Duration since the NFS mount last saw any RPC traffic.
|
// Duration since the NFS mount last saw any RPC traffic.
|
||||||
IdleTime time.Duration
|
IdleTimeSeconds uint64
|
||||||
// Number of RPC requests for this mount sent to the NFS server.
|
// Number of RPC requests for this mount sent to the NFS server.
|
||||||
Sends uint64
|
Sends uint64
|
||||||
// Number of RPC responses for this mount received from the NFS server.
|
// Number of RPC responses for this mount received from the NFS server.
|
||||||
|
@ -342,10 +342,15 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
|
||||||
|
|
||||||
switch ss[0] {
|
switch ss[0] {
|
||||||
case fieldOpts:
|
case fieldOpts:
|
||||||
|
if stats.Opts == nil {
|
||||||
|
stats.Opts = map[string]string{}
|
||||||
|
}
|
||||||
for _, opt := range strings.Split(ss[1], ",") {
|
for _, opt := range strings.Split(ss[1], ",") {
|
||||||
split := strings.Split(opt, "=")
|
split := strings.Split(opt, "=")
|
||||||
if len(split) == 2 && split[0] == "mountaddr" {
|
if len(split) == 2 {
|
||||||
stats.MountAddress = split[1]
|
stats.Opts[split[0]] = split[1]
|
||||||
|
} else {
|
||||||
|
stats.Opts[opt] = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case fieldAge:
|
case fieldAge:
|
||||||
|
@ -525,9 +530,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||||
MajorTimeouts: ns[2],
|
MajorTimeouts: ns[2],
|
||||||
BytesSent: ns[3],
|
BytesSent: ns[3],
|
||||||
BytesReceived: ns[4],
|
BytesReceived: ns[4],
|
||||||
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
|
CumulativeQueueMilliseconds: ns[5],
|
||||||
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
|
CumulativeTotalResponseMilliseconds: ns[6],
|
||||||
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
|
CumulativeTotalRequestMilliseconds: ns[7],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -603,7 +608,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||||
Bind: ns[1],
|
Bind: ns[1],
|
||||||
Connect: ns[2],
|
Connect: ns[2],
|
||||||
ConnectIdleTime: ns[3],
|
ConnectIdleTime: ns[3],
|
||||||
IdleTime: time.Duration(ns[4]) * time.Second,
|
IdleTimeSeconds: ns[4],
|
||||||
Sends: ns[5],
|
Sends: ns[5],
|
||||||
Receives: ns[6],
|
Receives: ns[6],
|
||||||
BadTransactionIDs: ns[7],
|
BadTransactionIDs: ns[7],
|
||||||
|
|
|
@ -47,23 +47,13 @@ type NetDevLine struct {
|
||||||
// are interface names.
|
// are interface names.
|
||||||
type NetDev map[string]NetDevLine
|
type NetDev map[string]NetDevLine
|
||||||
|
|
||||||
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
|
// NetDev returns kernel/system statistics read from /proc/net/dev.
|
||||||
func NewNetDev() (NetDev, error) {
|
func (fs FS) NetDev() (NetDev, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewNetDev()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
|
|
||||||
func (fs FS) NewNetDev() (NetDev, error) {
|
|
||||||
return newNetDev(fs.proc.Path("net/dev"))
|
return newNetDev(fs.proc.Path("net/dev"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
|
// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
|
||||||
func (p Proc) NewNetDev() (NetDev, error) {
|
func (p Proc) NetDev() (NetDev, error) {
|
||||||
return newNetDev(p.path("net/dev"))
|
return newNetDev(p.path("net/dev"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,7 +65,7 @@ func newNetDev(file string) (NetDev, error) {
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
nd := NetDev{}
|
netDev := NetDev{}
|
||||||
s := bufio.NewScanner(f)
|
s := bufio.NewScanner(f)
|
||||||
for n := 0; s.Scan(); n++ {
|
for n := 0; s.Scan(); n++ {
|
||||||
// Skip the 2 header lines.
|
// Skip the 2 header lines.
|
||||||
|
@ -83,20 +73,20 @@ func newNetDev(file string) (NetDev, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
line, err := nd.parseLine(s.Text())
|
line, err := netDev.parseLine(s.Text())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nd, err
|
return netDev, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nd[line.Name] = *line
|
netDev[line.Name] = *line
|
||||||
}
|
}
|
||||||
|
|
||||||
return nd, s.Err()
|
return netDev, s.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseLine parses a single line from the /proc/net/dev file. Header lines
|
// parseLine parses a single line from the /proc/net/dev file. Header lines
|
||||||
// must be filtered prior to calling this method.
|
// must be filtered prior to calling this method.
|
||||||
func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
||||||
parts := strings.SplitN(rawLine, ":", 2)
|
parts := strings.SplitN(rawLine, ":", 2)
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return nil, errors.New("invalid net/dev line, missing colon")
|
return nil, errors.New("invalid net/dev line, missing colon")
|
||||||
|
@ -185,11 +175,11 @@ func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
||||||
|
|
||||||
// Total aggregates the values across interfaces and returns a new NetDevLine.
|
// Total aggregates the values across interfaces and returns a new NetDevLine.
|
||||||
// The Name field will be a sorted comma separated list of interface names.
|
// The Name field will be a sorted comma separated list of interface names.
|
||||||
func (nd NetDev) Total() NetDevLine {
|
func (netDev NetDev) Total() NetDevLine {
|
||||||
total := NetDevLine{}
|
total := NetDevLine{}
|
||||||
|
|
||||||
names := make([]string, 0, len(nd))
|
names := make([]string, 0, len(netDev))
|
||||||
for _, ifc := range nd {
|
for _, ifc := range netDev {
|
||||||
names = append(names, ifc.Name)
|
names = append(names, ifc.Name)
|
||||||
total.RxBytes += ifc.RxBytes
|
total.RxBytes += ifc.RxBytes
|
||||||
total.RxPackets += ifc.RxPackets
|
total.RxPackets += ifc.RxPackets
|
||||||
|
|
|
@ -0,0 +1,275 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// For the proc file format details,
|
||||||
|
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
|
||||||
|
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
|
||||||
|
|
||||||
|
const (
|
||||||
|
netUnixKernelPtrIdx = iota
|
||||||
|
netUnixRefCountIdx
|
||||||
|
_
|
||||||
|
netUnixFlagsIdx
|
||||||
|
netUnixTypeIdx
|
||||||
|
netUnixStateIdx
|
||||||
|
netUnixInodeIdx
|
||||||
|
|
||||||
|
// Inode and Path are optional.
|
||||||
|
netUnixStaticFieldsCnt = 6
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
netUnixTypeStream = 1
|
||||||
|
netUnixTypeDgram = 2
|
||||||
|
netUnixTypeSeqpacket = 5
|
||||||
|
|
||||||
|
netUnixFlagListen = 1 << 16
|
||||||
|
|
||||||
|
netUnixStateUnconnected = 1
|
||||||
|
netUnixStateConnecting = 2
|
||||||
|
netUnixStateConnected = 3
|
||||||
|
netUnixStateDisconnected = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
|
||||||
|
|
||||||
|
// NetUnixType is the type of the type field.
|
||||||
|
type NetUnixType uint64
|
||||||
|
|
||||||
|
// NetUnixFlags is the type of the flags field.
|
||||||
|
type NetUnixFlags uint64
|
||||||
|
|
||||||
|
// NetUnixState is the type of the state field.
|
||||||
|
type NetUnixState uint64
|
||||||
|
|
||||||
|
// NetUnixLine represents a line of /proc/net/unix.
|
||||||
|
type NetUnixLine struct {
|
||||||
|
KernelPtr string
|
||||||
|
RefCount uint64
|
||||||
|
Protocol uint64
|
||||||
|
Flags NetUnixFlags
|
||||||
|
Type NetUnixType
|
||||||
|
State NetUnixState
|
||||||
|
Inode uint64
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetUnix holds the data read from /proc/net/unix.
|
||||||
|
type NetUnix struct {
|
||||||
|
Rows []*NetUnixLine
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetUnix returns data read from /proc/net/unix.
|
||||||
|
func NewNetUnix() (*NetUnix, error) {
|
||||||
|
fs, err := NewFS(DefaultMountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.NewNetUnix()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetUnix returns data read from /proc/net/unix.
|
||||||
|
func (fs FS) NewNetUnix() (*NetUnix, error) {
|
||||||
|
return NewNetUnixByPath(fs.proc.Path("net/unix"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
|
||||||
|
// It might returns an error with partial parsed data, if an error occur after some data parsed.
|
||||||
|
func NewNetUnixByPath(path string) (*NetUnix, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
return NewNetUnixByReader(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
|
||||||
|
// It might returns an error with partial parsed data, if an error occur after some data parsed.
|
||||||
|
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
|
||||||
|
nu := &NetUnix{
|
||||||
|
Rows: make([]*NetUnixLine, 0, 32),
|
||||||
|
}
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
// Omit the header line.
|
||||||
|
scanner.Scan()
|
||||||
|
header := scanner.Text()
|
||||||
|
// From the man page of proc(5), it does not contain an Inode field,
|
||||||
|
// but in actually it exists.
|
||||||
|
// This code works for both cases.
|
||||||
|
hasInode := strings.Contains(header, "Inode")
|
||||||
|
|
||||||
|
minFieldsCnt := netUnixStaticFieldsCnt
|
||||||
|
if hasInode {
|
||||||
|
minFieldsCnt++
|
||||||
|
}
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
item, err := nu.parseLine(line, hasInode, minFieldsCnt)
|
||||||
|
if err != nil {
|
||||||
|
return nu, err
|
||||||
|
}
|
||||||
|
nu.Rows = append(nu.Rows, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nu, scanner.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
|
||||||
|
fields := strings.Fields(line)
|
||||||
|
fieldsLen := len(fields)
|
||||||
|
if fieldsLen < minFieldsCnt {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Parse Unix domain failed: expect at least %d fields but got %d",
|
||||||
|
minFieldsCnt, fieldsLen)
|
||||||
|
}
|
||||||
|
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
|
||||||
|
}
|
||||||
|
users, err := u.parseUsers(fields[netUnixRefCountIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
|
||||||
|
}
|
||||||
|
flags, err := u.parseFlags(fields[netUnixFlagsIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
|
||||||
|
}
|
||||||
|
typ, err := u.parseType(fields[netUnixTypeIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
|
||||||
|
}
|
||||||
|
state, err := u.parseState(fields[netUnixStateIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
|
||||||
|
}
|
||||||
|
var inode uint64
|
||||||
|
if hasInode {
|
||||||
|
inodeStr := fields[netUnixInodeIdx]
|
||||||
|
inode, err = u.parseInode(inodeStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nuLine := &NetUnixLine{
|
||||||
|
KernelPtr: kernelPtr,
|
||||||
|
RefCount: users,
|
||||||
|
Type: typ,
|
||||||
|
Flags: flags,
|
||||||
|
State: state,
|
||||||
|
Inode: inode,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path field is optional.
|
||||||
|
if fieldsLen > minFieldsCnt {
|
||||||
|
pathIdx := netUnixInodeIdx + 1
|
||||||
|
if !hasInode {
|
||||||
|
pathIdx--
|
||||||
|
}
|
||||||
|
nuLine.Path = fields[pathIdx]
|
||||||
|
}
|
||||||
|
|
||||||
|
return nuLine, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseKernelPtr(str string) (string, error) {
|
||||||
|
if !strings.HasSuffix(str, ":") {
|
||||||
|
return "", errInvalidKernelPtrFmt
|
||||||
|
}
|
||||||
|
return str[:len(str)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
|
||||||
|
return strconv.ParseUint(hexStr, 16, 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseProtocol(hexStr string) (uint64, error) {
|
||||||
|
return strconv.ParseUint(hexStr, 16, 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
|
||||||
|
typ, err := strconv.ParseUint(hexStr, 16, 16)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return NetUnixType(typ), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
|
||||||
|
flags, err := strconv.ParseUint(hexStr, 16, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return NetUnixFlags(flags), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
|
||||||
|
st, err := strconv.ParseInt(hexStr, 16, 8)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return NetUnixState(st), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
|
||||||
|
return strconv.ParseUint(inodeStr, 10, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t NetUnixType) String() string {
|
||||||
|
switch t {
|
||||||
|
case netUnixTypeStream:
|
||||||
|
return "stream"
|
||||||
|
case netUnixTypeDgram:
|
||||||
|
return "dgram"
|
||||||
|
case netUnixTypeSeqpacket:
|
||||||
|
return "seqpacket"
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f NetUnixFlags) String() string {
|
||||||
|
switch f {
|
||||||
|
case netUnixFlagListen:
|
||||||
|
return "listen"
|
||||||
|
default:
|
||||||
|
return "default"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s NetUnixState) String() string {
|
||||||
|
switch s {
|
||||||
|
case netUnixStateUnconnected:
|
||||||
|
return "unconnected"
|
||||||
|
case netUnixStateConnecting:
|
||||||
|
return "connecting"
|
||||||
|
case netUnixStateConnected:
|
||||||
|
return "connected"
|
||||||
|
case netUnixStateDisconnected:
|
||||||
|
return "disconnected"
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
|
@ -54,7 +54,7 @@ func NewProc(pid int) (Proc, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return fs.NewProc(pid)
|
return fs.Proc(pid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllProcs returns a list of all currently available processes under /proc.
|
// AllProcs returns a list of all currently available processes under /proc.
|
||||||
|
@ -76,11 +76,18 @@ func (fs FS) Self() (Proc, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return fs.NewProc(pid)
|
return fs.Proc(pid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProc returns a process for the given pid.
|
// NewProc returns a process for the given pid.
|
||||||
|
//
|
||||||
|
// Deprecated: use fs.Proc() instead
|
||||||
func (fs FS) NewProc(pid int) (Proc, error) {
|
func (fs FS) NewProc(pid int) (Proc, error) {
|
||||||
|
return fs.Proc(pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proc returns a process for the given pid.
|
||||||
|
func (fs FS) Proc(pid int) (Proc, error) {
|
||||||
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
|
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,8 +39,8 @@ type ProcIO struct {
|
||||||
CancelledWriteBytes int64
|
CancelledWriteBytes int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIO creates a new ProcIO instance from a given Proc instance.
|
// IO creates a new ProcIO instance from a given Proc instance.
|
||||||
func (p Proc) NewIO() (ProcIO, error) {
|
func (p Proc) IO() (ProcIO, error) {
|
||||||
pio := ProcIO{}
|
pio := ProcIO{}
|
||||||
|
|
||||||
f, err := os.Open(p.path("io"))
|
f, err := os.Open(p.path("io"))
|
||||||
|
|
|
@ -78,7 +78,14 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewLimits returns the current soft limits of the process.
|
// NewLimits returns the current soft limits of the process.
|
||||||
|
//
|
||||||
|
// Deprecated: use p.Limits() instead
|
||||||
func (p Proc) NewLimits() (ProcLimits, error) {
|
func (p Proc) NewLimits() (ProcLimits, error) {
|
||||||
|
return p.Limits()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limits returns the current soft limits of the process.
|
||||||
|
func (p Proc) Limits() (ProcLimits, error) {
|
||||||
f, err := os.Open(p.path("limits"))
|
f, err := os.Open(p.path("limits"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcLimits{}, err
|
return ProcLimits{}, err
|
||||||
|
|
|
@ -29,9 +29,9 @@ type Namespace struct {
|
||||||
// Namespaces contains all of the namespaces that the process is contained in.
|
// Namespaces contains all of the namespaces that the process is contained in.
|
||||||
type Namespaces map[string]Namespace
|
type Namespaces map[string]Namespace
|
||||||
|
|
||||||
// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
|
// Namespaces reads from /proc/<pid>/ns/* to get the namespaces of which the
|
||||||
// process is a member.
|
// process is a member.
|
||||||
func (p Proc) NewNamespaces() (Namespaces, error) {
|
func (p Proc) Namespaces() (Namespaces, error) {
|
||||||
d, err := os.Open(p.path("ns"))
|
d, err := os.Open(p.path("ns"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -51,19 +51,10 @@ type PSIStats struct {
|
||||||
Full *PSILine
|
Full *PSILine
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPSIStatsForResource reads pressure stall information for the specified
|
// PSIStatsForResource reads pressure stall information for the specified
|
||||||
// resource. At time of writing this can be either "cpu", "memory" or "io".
|
// resource from /proc/pressure/<resource>. At time of writing this can be
|
||||||
func NewPSIStatsForResource(resource string) (PSIStats, error) {
|
// either "cpu", "memory" or "io".
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
|
||||||
if err != nil {
|
|
||||||
return PSIStats{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewPSIStatsForResource(resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPSIStatsForResource reads pressure stall information from /proc/pressure/<resource>
|
|
||||||
func (fs FS) NewPSIStatsForResource(resource string) (PSIStats, error) {
|
|
||||||
file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
|
file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
|
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
|
||||||
|
|
|
@ -105,7 +105,14 @@ type ProcStat struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns the current status information of the process.
|
// NewStat returns the current status information of the process.
|
||||||
|
//
|
||||||
|
// Deprecated: use NewStat() instead
|
||||||
func (p Proc) NewStat() (ProcStat, error) {
|
func (p Proc) NewStat() (ProcStat, error) {
|
||||||
|
return p.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns the current status information of the process.
|
||||||
|
func (p Proc) Stat() (ProcStat, error) {
|
||||||
f, err := os.Open(p.path("stat"))
|
f, err := os.Open(p.path("stat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcStat{}, err
|
return ProcStat{}, err
|
||||||
|
@ -178,7 +185,7 @@ func (s ProcStat) ResidentMemory() int {
|
||||||
// StartTime returns the unix timestamp of the process in seconds.
|
// StartTime returns the unix timestamp of the process in seconds.
|
||||||
func (s ProcStat) StartTime() (float64, error) {
|
func (s ProcStat) StartTime() (float64, error) {
|
||||||
fs := FS{proc: s.proc}
|
fs := FS{proc: s.proc}
|
||||||
stat, err := fs.NewStat()
|
stat, err := fs.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,162 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProcStat provides status information about the process,
|
||||||
|
// read from /proc/[pid]/stat.
|
||||||
|
type ProcStatus struct {
|
||||||
|
// The process ID.
|
||||||
|
PID int
|
||||||
|
// The process name.
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Peak virtual memory size.
|
||||||
|
VmPeak uint64
|
||||||
|
// Virtual memory size.
|
||||||
|
VmSize uint64
|
||||||
|
// Locked memory size.
|
||||||
|
VmLck uint64
|
||||||
|
// Pinned memory size.
|
||||||
|
VmPin uint64
|
||||||
|
// Peak resident set size.
|
||||||
|
VmHWM uint64
|
||||||
|
// Resident set size (sum of RssAnnon RssFile and RssShmem).
|
||||||
|
VmRSS uint64
|
||||||
|
// Size of resident anonymous memory.
|
||||||
|
RssAnon uint64
|
||||||
|
// Size of resident file mappings.
|
||||||
|
RssFile uint64
|
||||||
|
// Size of resident shared memory.
|
||||||
|
RssShmem uint64
|
||||||
|
// Size of data segments.
|
||||||
|
VmData uint64
|
||||||
|
// Size of stack segments.
|
||||||
|
VmStk uint64
|
||||||
|
// Size of text segments.
|
||||||
|
VmExe uint64
|
||||||
|
// Shared library code size.
|
||||||
|
VmLib uint64
|
||||||
|
// Page table entries size.
|
||||||
|
VmPTE uint64
|
||||||
|
// Size of second-level page tables.
|
||||||
|
VmPMD uint64
|
||||||
|
// Swapped-out virtual memory size by anonymous private.
|
||||||
|
VmSwap uint64
|
||||||
|
// Size of hugetlb memory portions
|
||||||
|
HugetlbPages uint64
|
||||||
|
|
||||||
|
// Number of voluntary context switches.
|
||||||
|
VoluntaryCtxtSwitches uint64
|
||||||
|
// Number of involuntary context switches.
|
||||||
|
NonVoluntaryCtxtSwitches uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStatus returns the current status information of the process.
|
||||||
|
func (p Proc) NewStatus() (ProcStatus, error) {
|
||||||
|
f, err := os.Open(p.path("status"))
|
||||||
|
if err != nil {
|
||||||
|
return ProcStatus{}, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
data, err := ioutil.ReadAll(f)
|
||||||
|
if err != nil {
|
||||||
|
return ProcStatus{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s := ProcStatus{PID: p.PID}
|
||||||
|
|
||||||
|
lines := strings.Split(string(data), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
if !bytes.Contains([]byte(line), []byte(":")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
kv := strings.SplitN(line, ":", 2)
|
||||||
|
|
||||||
|
// removes spaces
|
||||||
|
k := string(strings.TrimSpace(kv[0]))
|
||||||
|
v := string(strings.TrimSpace(kv[1]))
|
||||||
|
// removes "kB"
|
||||||
|
v = string(bytes.Trim([]byte(v), " kB"))
|
||||||
|
|
||||||
|
// value to int when possible
|
||||||
|
// we can skip error check here, 'cause vKBytes is not used when value is a string
|
||||||
|
vKBytes, _ := strconv.ParseUint(v, 10, 64)
|
||||||
|
// convert kB to B
|
||||||
|
vBytes := vKBytes * 1024
|
||||||
|
|
||||||
|
s.fillStatus(k, v, vKBytes, vBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
|
||||||
|
switch k {
|
||||||
|
case "Name":
|
||||||
|
s.Name = vString
|
||||||
|
case "VmPeak":
|
||||||
|
s.VmPeak = vUintBytes
|
||||||
|
case "VmSize":
|
||||||
|
s.VmSize = vUintBytes
|
||||||
|
case "VmLck":
|
||||||
|
s.VmLck = vUintBytes
|
||||||
|
case "VmPin":
|
||||||
|
s.VmPin = vUintBytes
|
||||||
|
case "VmHWM":
|
||||||
|
s.VmHWM = vUintBytes
|
||||||
|
case "VmRSS":
|
||||||
|
s.VmRSS = vUintBytes
|
||||||
|
case "RssAnon":
|
||||||
|
s.RssAnon = vUintBytes
|
||||||
|
case "RssFile":
|
||||||
|
s.RssFile = vUintBytes
|
||||||
|
case "RssShmem":
|
||||||
|
s.RssShmem = vUintBytes
|
||||||
|
case "VmData":
|
||||||
|
s.VmData = vUintBytes
|
||||||
|
case "VmStk":
|
||||||
|
s.VmStk = vUintBytes
|
||||||
|
case "VmExe":
|
||||||
|
s.VmExe = vUintBytes
|
||||||
|
case "VmLib":
|
||||||
|
s.VmLib = vUintBytes
|
||||||
|
case "VmPTE":
|
||||||
|
s.VmPTE = vUintBytes
|
||||||
|
case "VmPMD":
|
||||||
|
s.VmPMD = vUintBytes
|
||||||
|
case "VmSwap":
|
||||||
|
s.VmSwap = vUintBytes
|
||||||
|
case "HugetlbPages":
|
||||||
|
s.HugetlbPages = vUintBytes
|
||||||
|
case "voluntary_ctxt_switches":
|
||||||
|
s.VoluntaryCtxtSwitches = vUint
|
||||||
|
case "nonvoluntary_ctxt_switches":
|
||||||
|
s.NonVoluntaryCtxtSwitches = vUint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalCtxtSwitches returns the total context switch.
|
||||||
|
func (s ProcStatus) TotalCtxtSwitches() uint64 {
|
||||||
|
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
|
||||||
|
}
|
|
@ -20,6 +20,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CPUStat shows how much time the cpu spend in various stages.
|
// CPUStat shows how much time the cpu spend in various stages.
|
||||||
|
@ -78,16 +80,6 @@ type Stat struct {
|
||||||
SoftIRQ SoftIRQStat
|
SoftIRQ SoftIRQStat
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns kernel/system statistics read from /proc/stat.
|
|
||||||
func NewStat() (Stat, error) {
|
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
|
||||||
if err != nil {
|
|
||||||
return Stat{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewStat()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
|
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
|
||||||
func parseCPUStat(line string) (CPUStat, int64, error) {
|
func parseCPUStat(line string) (CPUStat, int64, error) {
|
||||||
cpuStat := CPUStat{}
|
cpuStat := CPUStat{}
|
||||||
|
@ -149,9 +141,29 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
|
||||||
return softIRQStat, total, nil
|
return softIRQStat, total, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns an information about current kernel/system statistics.
|
// NewStat returns information about current cpu/process statistics.
|
||||||
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
|
//
|
||||||
|
// Deprecated: use fs.Stat() instead
|
||||||
|
func NewStat() (Stat, error) {
|
||||||
|
fs, err := NewFS(fs.DefaultProcMountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return Stat{}, err
|
||||||
|
}
|
||||||
|
return fs.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStat returns information about current cpu/process statistics.
|
||||||
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
|
//
|
||||||
|
// Deprecated: use fs.Stat() instead
|
||||||
func (fs FS) NewStat() (Stat, error) {
|
func (fs FS) NewStat() (Stat, error) {
|
||||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
return fs.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns information about current cpu/process statistics.
|
||||||
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
|
func (fs FS) Stat() (Stat, error) {
|
||||||
|
|
||||||
f, err := os.Open(fs.proc.Path("stat"))
|
f, err := os.Open(fs.proc.Path("stat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -88,6 +88,8 @@ Usage: $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
|
||||||
Options:
|
Options:
|
||||||
-C <DIR> (change directory)
|
-C <DIR> (change directory)
|
||||||
-v (verbose)
|
-v (verbose)
|
||||||
|
--recursive-unlink (recursively delete existing directory if path
|
||||||
|
collides with file or directory to extract)
|
||||||
|
|
||||||
Example: Change to sysfs directory, create ttar file from fixtures directory
|
Example: Change to sysfs directory, create ttar file from fixtures directory
|
||||||
$bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
|
$bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
|
||||||
|
@ -111,8 +113,9 @@ function set_cmd {
|
||||||
}
|
}
|
||||||
|
|
||||||
unset VERBOSE
|
unset VERBOSE
|
||||||
|
unset RECURSIVE_UNLINK
|
||||||
|
|
||||||
while getopts :cf:htxvC: opt; do
|
while getopts :cf:-:htxvC: opt; do
|
||||||
case $opt in
|
case $opt in
|
||||||
c)
|
c)
|
||||||
set_cmd "create"
|
set_cmd "create"
|
||||||
|
@ -136,6 +139,18 @@ while getopts :cf:htxvC: opt; do
|
||||||
C)
|
C)
|
||||||
CDIR=$OPTARG
|
CDIR=$OPTARG
|
||||||
;;
|
;;
|
||||||
|
-)
|
||||||
|
case $OPTARG in
|
||||||
|
recursive-unlink)
|
||||||
|
RECURSIVE_UNLINK="yes"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo -e "Error: invalid option -$OPTARG"
|
||||||
|
echo
|
||||||
|
usage 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
echo >&2 "ERROR: invalid option -$OPTARG"
|
echo >&2 "ERROR: invalid option -$OPTARG"
|
||||||
echo
|
echo
|
||||||
|
@ -212,16 +227,16 @@ function extract {
|
||||||
local eof_without_newline
|
local eof_without_newline
|
||||||
if [ "$size" -gt 0 ]; then
|
if [ "$size" -gt 0 ]; then
|
||||||
if [[ "$line" =~ [^\\]EOF ]]; then
|
if [[ "$line" =~ [^\\]EOF ]]; then
|
||||||
# An EOF not preceeded by a backslash indicates that the line
|
# An EOF not preceded by a backslash indicates that the line
|
||||||
# does not end with a newline
|
# does not end with a newline
|
||||||
eof_without_newline=1
|
eof_without_newline=1
|
||||||
else
|
else
|
||||||
eof_without_newline=0
|
eof_without_newline=0
|
||||||
fi
|
fi
|
||||||
# Replace NULLBYTE with null byte if at beginning of line
|
# Replace NULLBYTE with null byte if at beginning of line
|
||||||
# Replace NULLBYTE with null byte unless preceeded by backslash
|
# Replace NULLBYTE with null byte unless preceded by backslash
|
||||||
# Remove one backslash in front of NULLBYTE (if any)
|
# Remove one backslash in front of NULLBYTE (if any)
|
||||||
# Remove EOF unless preceeded by backslash
|
# Remove EOF unless preceded by backslash
|
||||||
# Remove one backslash in front of EOF
|
# Remove one backslash in front of EOF
|
||||||
if [ $USE_PYTHON -eq 1 ]; then
|
if [ $USE_PYTHON -eq 1 ]; then
|
||||||
echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
|
echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
|
||||||
|
@ -245,7 +260,16 @@ function extract {
|
||||||
fi
|
fi
|
||||||
if [[ $line =~ ^Path:\ (.*)$ ]]; then
|
if [[ $line =~ ^Path:\ (.*)$ ]]; then
|
||||||
path=${BASH_REMATCH[1]}
|
path=${BASH_REMATCH[1]}
|
||||||
if [ -e "$path" ] || [ -L "$path" ]; then
|
if [ -L "$path" ]; then
|
||||||
|
rm "$path"
|
||||||
|
elif [ -d "$path" ]; then
|
||||||
|
if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then
|
||||||
|
rm -r "$path"
|
||||||
|
else
|
||||||
|
# Safe because symlinks to directories are dealt with above
|
||||||
|
rmdir "$path"
|
||||||
|
fi
|
||||||
|
elif [ -e "$path" ]; then
|
||||||
rm "$path"
|
rm "$path"
|
||||||
fi
|
fi
|
||||||
elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
|
elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
|
||||||
|
|
|
@ -183,7 +183,7 @@ github.com/influxdata/influxdb/models
|
||||||
github.com/influxdata/influxdb/pkg/escape
|
github.com/influxdata/influxdb/pkg/escape
|
||||||
# github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7
|
# github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7
|
||||||
github.com/jmespath/go-jmespath
|
github.com/jmespath/go-jmespath
|
||||||
# github.com/json-iterator/go v1.1.5
|
# github.com/json-iterator/go v1.1.6
|
||||||
github.com/json-iterator/go
|
github.com/json-iterator/go
|
||||||
# github.com/julienschmidt/httprouter v1.2.0
|
# github.com/julienschmidt/httprouter v1.2.0
|
||||||
github.com/julienschmidt/httprouter
|
github.com/julienschmidt/httprouter
|
||||||
|
@ -219,7 +219,7 @@ github.com/petermattis/goid
|
||||||
github.com/pkg/errors
|
github.com/pkg/errors
|
||||||
# github.com/pmezard/go-difflib v1.0.0
|
# github.com/pmezard/go-difflib v1.0.0
|
||||||
github.com/pmezard/go-difflib/difflib
|
github.com/pmezard/go-difflib/difflib
|
||||||
# github.com/prometheus/client_golang v0.9.3
|
# github.com/prometheus/client_golang v1.0.0
|
||||||
github.com/prometheus/client_golang/prometheus
|
github.com/prometheus/client_golang/prometheus
|
||||||
github.com/prometheus/client_golang/api
|
github.com/prometheus/client_golang/api
|
||||||
github.com/prometheus/client_golang/api/prometheus/v1
|
github.com/prometheus/client_golang/api/prometheus/v1
|
||||||
|
@ -229,7 +229,7 @@ github.com/prometheus/client_golang/prometheus/internal
|
||||||
github.com/prometheus/client_golang/prometheus/testutil
|
github.com/prometheus/client_golang/prometheus/testutil
|
||||||
# github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
|
# github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
|
||||||
github.com/prometheus/client_model/go
|
github.com/prometheus/client_model/go
|
||||||
# github.com/prometheus/common v0.4.0
|
# github.com/prometheus/common v0.4.1
|
||||||
github.com/prometheus/common/model
|
github.com/prometheus/common/model
|
||||||
github.com/prometheus/common/promlog
|
github.com/prometheus/common/promlog
|
||||||
github.com/prometheus/common/promlog/flag
|
github.com/prometheus/common/promlog/flag
|
||||||
|
@ -239,7 +239,7 @@ github.com/prometheus/common/expfmt
|
||||||
github.com/prometheus/common/route
|
github.com/prometheus/common/route
|
||||||
github.com/prometheus/common/server
|
github.com/prometheus/common/server
|
||||||
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
|
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
|
||||||
# github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084
|
# github.com/prometheus/procfs v0.0.2
|
||||||
github.com/prometheus/procfs
|
github.com/prometheus/procfs
|
||||||
github.com/prometheus/procfs/internal/fs
|
github.com/prometheus/procfs/internal/fs
|
||||||
# github.com/prometheus/tsdb v0.8.0
|
# github.com/prometheus/tsdb v0.8.0
|
||||||
|
@ -316,8 +316,8 @@ golang.org/x/oauth2/jwt
|
||||||
golang.org/x/sync/errgroup
|
golang.org/x/sync/errgroup
|
||||||
golang.org/x/sync/semaphore
|
golang.org/x/sync/semaphore
|
||||||
# golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e
|
# golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e
|
||||||
golang.org/x/sys/unix
|
|
||||||
golang.org/x/sys/windows
|
golang.org/x/sys/windows
|
||||||
|
golang.org/x/sys/unix
|
||||||
# golang.org/x/text v0.3.0
|
# golang.org/x/text v0.3.0
|
||||||
golang.org/x/text/secure/bidirule
|
golang.org/x/text/secure/bidirule
|
||||||
golang.org/x/text/unicode/bidi
|
golang.org/x/text/unicode/bidi
|
||||||
|
|
Loading…
Reference in New Issue