Browse Source
Fixes: https://github.com/prometheus/node_exporter/issues/1721 Signed-off-by: Ben Kochie <superq@gmail.com>pull/1735/head
Ben Kochie
5 years ago
51 changed files with 2699 additions and 292 deletions
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs |
||||
|
||||
var parseCPUInfo = parseCPUInfoARM |
@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
// +build arm64
|
||||
|
||||
package procfs |
||||
|
||||
var parseCPUInfo = parseCPUInfoARM |
@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
// +build 386 amd64
|
||||
|
||||
package procfs |
||||
|
||||
var parseCPUInfo = parseCPUInfoX86 |
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs |
||||
|
||||
var parseCPUInfo = parseCPUInfoMips |
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs |
||||
|
||||
var parseCPUInfo = parseCPUInfoMips |
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs |
||||
|
||||
var parseCPUInfo = parseCPUInfoMips |
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs |
||||
|
||||
var parseCPUInfo = parseCPUInfoMips |
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs |
||||
|
||||
var parseCPUInfo = parseCPUInfoPPC |
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs |
||||
|
||||
var parseCPUInfo = parseCPUInfoPPC |
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs |
||||
|
||||
var parseCPUInfo = parseCPUInfoS390X |
@ -0,0 +1,422 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"bufio" |
||||
"bytes" |
||||
"fmt" |
||||
"io" |
||||
"strconv" |
||||
"strings" |
||||
|
||||
"github.com/prometheus/procfs/internal/util" |
||||
) |
||||
|
||||
// Fscacheinfo represents fscache statistics.
|
||||
type Fscacheinfo struct { |
||||
// Number of index cookies allocated
|
||||
IndexCookiesAllocated uint64 |
||||
// data storage cookies allocated
|
||||
DataStorageCookiesAllocated uint64 |
||||
// Number of special cookies allocated
|
||||
SpecialCookiesAllocated uint64 |
||||
// Number of objects allocated
|
||||
ObjectsAllocated uint64 |
||||
// Number of object allocation failures
|
||||
ObjectAllocationsFailure uint64 |
||||
// Number of objects that reached the available state
|
||||
ObjectsAvailable uint64 |
||||
// Number of objects that reached the dead state
|
||||
ObjectsDead uint64 |
||||
// Number of objects that didn't have a coherency check
|
||||
ObjectsWithoutCoherencyCheck uint64 |
||||
// Number of objects that passed a coherency check
|
||||
ObjectsWithCoherencyCheck uint64 |
||||
// Number of objects that needed a coherency data update
|
||||
ObjectsNeedCoherencyCheckUpdate uint64 |
||||
// Number of objects that were declared obsolete
|
||||
ObjectsDeclaredObsolete uint64 |
||||
// Number of pages marked as being cached
|
||||
PagesMarkedAsBeingCached uint64 |
||||
// Number of uncache page requests seen
|
||||
UncachePagesRequestSeen uint64 |
||||
// Number of acquire cookie requests seen
|
||||
AcquireCookiesRequestSeen uint64 |
||||
// Number of acq reqs given a NULL parent
|
||||
AcquireRequestsWithNullParent uint64 |
||||
// Number of acq reqs rejected due to no cache available
|
||||
AcquireRequestsRejectedNoCacheAvailable uint64 |
||||
// Number of acq reqs succeeded
|
||||
AcquireRequestsSucceeded uint64 |
||||
// Number of acq reqs rejected due to error
|
||||
AcquireRequestsRejectedDueToError uint64 |
||||
// Number of acq reqs failed on ENOMEM
|
||||
AcquireRequestsFailedDueToEnomem uint64 |
||||
// Number of lookup calls made on cache backends
|
||||
LookupsNumber uint64 |
||||
// Number of negative lookups made
|
||||
LookupsNegative uint64 |
||||
// Number of positive lookups made
|
||||
LookupsPositive uint64 |
||||
// Number of objects created by lookup
|
||||
ObjectsCreatedByLookup uint64 |
||||
// Number of lookups timed out and requeued
|
||||
LookupsTimedOutAndRequed uint64 |
||||
InvalidationsNumber uint64 |
||||
InvalidationsRunning uint64 |
||||
// Number of update cookie requests seen
|
||||
UpdateCookieRequestSeen uint64 |
||||
// Number of upd reqs given a NULL parent
|
||||
UpdateRequestsWithNullParent uint64 |
||||
// Number of upd reqs granted CPU time
|
||||
UpdateRequestsRunning uint64 |
||||
// Number of relinquish cookie requests seen
|
||||
RelinquishCookiesRequestSeen uint64 |
||||
// Number of rlq reqs given a NULL parent
|
||||
RelinquishCookiesWithNullParent uint64 |
||||
// Number of rlq reqs waited on completion of creation
|
||||
RelinquishRequestsWaitingCompleteCreation uint64 |
||||
// Relinqs rtr
|
||||
RelinquishRetries uint64 |
||||
// Number of attribute changed requests seen
|
||||
AttributeChangedRequestsSeen uint64 |
||||
// Number of attr changed requests queued
|
||||
AttributeChangedRequestsQueued uint64 |
||||
// Number of attr changed rejected -ENOBUFS
|
||||
AttributeChangedRejectDueToEnobufs uint64 |
||||
// Number of attr changed failed -ENOMEM
|
||||
AttributeChangedFailedDueToEnomem uint64 |
||||
// Number of attr changed ops given CPU time
|
||||
AttributeChangedOps uint64 |
||||
// Number of allocation requests seen
|
||||
AllocationRequestsSeen uint64 |
||||
// Number of successful alloc reqs
|
||||
AllocationOkRequests uint64 |
||||
// Number of alloc reqs that waited on lookup completion
|
||||
AllocationWaitingOnLookup uint64 |
||||
// Number of alloc reqs rejected -ENOBUFS
|
||||
AllocationsRejectedDueToEnobufs uint64 |
||||
// Number of alloc reqs aborted -ERESTARTSYS
|
||||
AllocationsAbortedDueToErestartsys uint64 |
||||
// Number of alloc reqs submitted
|
||||
AllocationOperationsSubmitted uint64 |
||||
// Number of alloc reqs waited for CPU time
|
||||
AllocationsWaitedForCPU uint64 |
||||
// Number of alloc reqs aborted due to object death
|
||||
AllocationsAbortedDueToObjectDeath uint64 |
||||
// Number of retrieval (read) requests seen
|
||||
RetrievalsReadRequests uint64 |
||||
// Number of successful retr reqs
|
||||
RetrievalsOk uint64 |
||||
// Number of retr reqs that waited on lookup completion
|
||||
RetrievalsWaitingLookupCompletion uint64 |
||||
// Number of retr reqs returned -ENODATA
|
||||
RetrievalsReturnedEnodata uint64 |
||||
// Number of retr reqs rejected -ENOBUFS
|
||||
RetrievalsRejectedDueToEnobufs uint64 |
||||
// Number of retr reqs aborted -ERESTARTSYS
|
||||
RetrievalsAbortedDueToErestartsys uint64 |
||||
// Number of retr reqs failed -ENOMEM
|
||||
RetrievalsFailedDueToEnomem uint64 |
||||
// Number of retr reqs submitted
|
||||
RetrievalsRequests uint64 |
||||
// Number of retr reqs waited for CPU time
|
||||
RetrievalsWaitingCPU uint64 |
||||
// Number of retr reqs aborted due to object death
|
||||
RetrievalsAbortedDueToObjectDeath uint64 |
||||
// Number of storage (write) requests seen
|
||||
StoreWriteRequests uint64 |
||||
// Number of successful store reqs
|
||||
StoreSuccessfulRequests uint64 |
||||
// Number of store reqs on a page already pending storage
|
||||
StoreRequestsOnPendingStorage uint64 |
||||
// Number of store reqs rejected -ENOBUFS
|
||||
StoreRequestsRejectedDueToEnobufs uint64 |
||||
// Number of store reqs failed -ENOMEM
|
||||
StoreRequestsFailedDueToEnomem uint64 |
||||
// Number of store reqs submitted
|
||||
StoreRequestsSubmitted uint64 |
||||
// Number of store reqs granted CPU time
|
||||
StoreRequestsRunning uint64 |
||||
// Number of pages given store req processing time
|
||||
StorePagesWithRequestsProcessing uint64 |
||||
// Number of store reqs deleted from tracking tree
|
||||
StoreRequestsDeleted uint64 |
||||
// Number of store reqs over store limit
|
||||
StoreRequestsOverStoreLimit uint64 |
||||
// Number of release reqs against pages with no pending store
|
||||
ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 |
||||
// Number of release reqs against pages stored by time lock granted
|
||||
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 |
||||
// Number of release reqs ignored due to in-progress store
|
||||
ReleaseRequestsIgnoredDueToInProgressStore uint64 |
||||
// Number of page stores cancelled due to release req
|
||||
PageStoresCancelledByReleaseRequests uint64 |
||||
VmscanWaiting uint64 |
||||
// Number of times async ops added to pending queues
|
||||
OpsPending uint64 |
||||
// Number of times async ops given CPU time
|
||||
OpsRunning uint64 |
||||
// Number of times async ops queued for processing
|
||||
OpsEnqueued uint64 |
||||
// Number of async ops cancelled
|
||||
OpsCancelled uint64 |
||||
// Number of async ops rejected due to object lookup/create failure
|
||||
OpsRejected uint64 |
||||
// Number of async ops initialised
|
||||
OpsInitialised uint64 |
||||
// Number of async ops queued for deferred release
|
||||
OpsDeferred uint64 |
||||
// Number of async ops released (should equal ini=N when idle)
|
||||
OpsReleased uint64 |
||||
// Number of deferred-release async ops garbage collected
|
||||
OpsGarbageCollected uint64 |
||||
// Number of in-progress alloc_object() cache ops
|
||||
CacheopAllocationsinProgress uint64 |
||||
// Number of in-progress lookup_object() cache ops
|
||||
CacheopLookupObjectInProgress uint64 |
||||
// Number of in-progress lookup_complete() cache ops
|
||||
CacheopLookupCompleteInPorgress uint64 |
||||
// Number of in-progress grab_object() cache ops
|
||||
CacheopGrabObjectInProgress uint64 |
||||
CacheopInvalidations uint64 |
||||
// Number of in-progress update_object() cache ops
|
||||
CacheopUpdateObjectInProgress uint64 |
||||
// Number of in-progress drop_object() cache ops
|
||||
CacheopDropObjectInProgress uint64 |
||||
// Number of in-progress put_object() cache ops
|
||||
CacheopPutObjectInProgress uint64 |
||||
// Number of in-progress attr_changed() cache ops
|
||||
CacheopAttributeChangeInProgress uint64 |
||||
// Number of in-progress sync_cache() cache ops
|
||||
CacheopSyncCacheInProgress uint64 |
||||
// Number of in-progress read_or_alloc_page() cache ops
|
||||
CacheopReadOrAllocPageInProgress uint64 |
||||
// Number of in-progress read_or_alloc_pages() cache ops
|
||||
CacheopReadOrAllocPagesInProgress uint64 |
||||
// Number of in-progress allocate_page() cache ops
|
||||
CacheopAllocatePageInProgress uint64 |
||||
// Number of in-progress allocate_pages() cache ops
|
||||
CacheopAllocatePagesInProgress uint64 |
||||
// Number of in-progress write_page() cache ops
|
||||
CacheopWritePagesInProgress uint64 |
||||
// Number of in-progress uncache_page() cache ops
|
||||
CacheopUncachePagesInProgress uint64 |
||||
// Number of in-progress dissociate_pages() cache ops
|
||||
CacheopDissociatePagesInProgress uint64 |
||||
// Number of object lookups/creations rejected due to lack of space
|
||||
CacheevLookupsAndCreationsRejectedLackSpace uint64 |
||||
// Number of stale objects deleted
|
||||
CacheevStaleObjectsDeleted uint64 |
||||
// Number of objects retired when relinquished
|
||||
CacheevRetiredWhenReliquished uint64 |
||||
// Number of objects culled
|
||||
CacheevObjectsCulled uint64 |
||||
} |
||||
|
||||
// Fscacheinfo returns information about current fscache statistics.
|
||||
// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
|
||||
func (fs FS) Fscacheinfo() (Fscacheinfo, error) { |
||||
b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) |
||||
if err != nil { |
||||
return Fscacheinfo{}, err |
||||
} |
||||
|
||||
m, err := parseFscacheinfo(bytes.NewReader(b)) |
||||
if err != nil { |
||||
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err) |
||||
} |
||||
|
||||
return *m, nil |
||||
} |
||||
|
||||
func setFSCacheFields(fields []string, setFields ...*uint64) error { |
||||
var err error |
||||
if len(fields) < len(setFields) { |
||||
return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) |
||||
} |
||||
|
||||
for i := range setFields { |
||||
*setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { |
||||
var m Fscacheinfo |
||||
s := bufio.NewScanner(r) |
||||
for s.Scan() { |
||||
fields := strings.Fields(s.Text()) |
||||
if len(fields) < 2 { |
||||
return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) |
||||
} |
||||
|
||||
switch fields[0] { |
||||
case "Cookies:": |
||||
err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, |
||||
&m.SpecialCookiesAllocated) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
case "Objects:": |
||||
err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, |
||||
&m.ObjectsAvailable, &m.ObjectsDead) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
case "ChkAux": |
||||
err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, |
||||
&m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
case "Pages": |
||||
err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
case "Acquire:": |
||||
err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, |
||||
&m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, |
||||
&m.AcquireRequestsFailedDueToEnomem) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
case "Lookups:": |
||||
err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, |
||||
&m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
case "Invals": |
||||
err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
case "Updates:": |
||||
err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, |
||||
&m.UpdateRequestsRunning) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
case "Relinqs:": |
||||
err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, |
||||
&m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
case "AttrChg:": |
||||
err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, |
||||
&m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
case "Allocs": |
||||
if strings.Split(fields[2], "=")[0] == "n" { |
||||
err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, |
||||
&m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
} else { |
||||
err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, |
||||
&m.AllocationsAbortedDueToObjectDeath) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
} |
||||
case "Retrvls:": |
||||
if strings.Split(fields[1], "=")[0] == "n" { |
||||
err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, |
||||
&m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, |
||||
&m.RetrievalsFailedDueToEnomem) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
} else { |
||||
err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
} |
||||
case "Stores": |
||||
if strings.Split(fields[2], "=")[0] == "n" { |
||||
err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, |
||||
&m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
} else { |
||||
err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, |
||||
&m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
} |
||||
case "VmScan": |
||||
err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, |
||||
&m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, |
||||
&m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
case "Ops": |
||||
if strings.Split(fields[2], "=")[0] == "pend" { |
||||
err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
} else { |
||||
err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
} |
||||
case "CacheOp:": |
||||
if strings.Split(fields[1], "=")[0] == "alo" { |
||||
err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, |
||||
&m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
} else if strings.Split(fields[1], "=")[0] == "inv" { |
||||
err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, |
||||
&m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, |
||||
&m.CacheopSyncCacheInProgress) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
} else { |
||||
err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, |
||||
&m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, |
||||
&m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
} |
||||
case "CacheEv:": |
||||
err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, |
||||
&m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) |
||||
if err != nil { |
||||
return &m, err |
||||
} |
||||
} |
||||
} |
||||
|
||||
return &m, nil |
||||
} |
@ -0,0 +1,62 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"os" |
||||
|
||||
"github.com/prometheus/procfs/internal/util" |
||||
) |
||||
|
||||
// KernelRandom contains information about to the kernel's random number generator.
|
||||
type KernelRandom struct { |
||||
// EntropyAvaliable gives the available entropy, in bits.
|
||||
EntropyAvaliable *uint64 |
||||
// PoolSize gives the size of the entropy pool, in bytes.
|
||||
PoolSize *uint64 |
||||
// URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
|
||||
URandomMinReseedSeconds *uint64 |
||||
// WriteWakeupThreshold the number of bits of entropy below which we wake up processes
|
||||
// that do a select(2) or poll(2) for write access to /dev/random.
|
||||
WriteWakeupThreshold *uint64 |
||||
// ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
|
||||
// waiting for entropy from /dev/random.
|
||||
ReadWakeupThreshold *uint64 |
||||
} |
||||
|
||||
// KernelRandom returns values from /proc/sys/kernel/random.
|
||||
func (fs FS) KernelRandom() (KernelRandom, error) { |
||||
random := KernelRandom{} |
||||
|
||||
for file, p := range map[string]**uint64{ |
||||
"entropy_avail": &random.EntropyAvaliable, |
||||
"poolsize": &random.PoolSize, |
||||
"urandom_min_reseed_secs": &random.URandomMinReseedSeconds, |
||||
"write_wakeup_threshold": &random.WriteWakeupThreshold, |
||||
"read_wakeup_threshold": &random.ReadWakeupThreshold, |
||||
} { |
||||
val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) |
||||
if os.IsNotExist(err) { |
||||
continue |
||||
} |
||||
if err != nil { |
||||
return random, err |
||||
} |
||||
*p = &val |
||||
} |
||||
|
||||
return random, nil |
||||
} |
@ -0,0 +1,98 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"bufio" |
||||
"bytes" |
||||
"fmt" |
||||
"strconv" |
||||
"strings" |
||||
|
||||
"github.com/prometheus/procfs/internal/util" |
||||
) |
||||
|
||||
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
|
||||
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
|
||||
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
|
||||
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
|
||||
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
|
||||
// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID
|
||||
// in this hierarchy
|
||||
//
|
||||
// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
|
||||
type Cgroup struct { |
||||
// HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one
|
||||
// hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number
|
||||
HierarchyID int |
||||
// Controllers using this hierarchy of processes. Controllers are also known as subsystems. For
|
||||
// Cgroups V2 this may be empty, as all active controllers use the same hierarchy
|
||||
Controllers []string |
||||
// Path of this control group, relative to the mount point of the cgroupfs representing this specific
|
||||
// hierarchy
|
||||
Path string |
||||
} |
||||
|
||||
// parseCgroupString parses each line of the /proc/[pid]/cgroup file
|
||||
// Line format is hierarchyID:[controller1,controller2]:path
|
||||
func parseCgroupString(cgroupStr string) (*Cgroup, error) { |
||||
var err error |
||||
|
||||
fields := strings.Split(cgroupStr, ":") |
||||
if len(fields) < 3 { |
||||
return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) |
||||
} |
||||
|
||||
cgroup := &Cgroup{ |
||||
Path: fields[2], |
||||
Controllers: nil, |
||||
} |
||||
cgroup.HierarchyID, err = strconv.Atoi(fields[0]) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to parse hierarchy ID") |
||||
} |
||||
if fields[1] != "" { |
||||
ssNames := strings.Split(fields[1], ",") |
||||
cgroup.Controllers = append(cgroup.Controllers, ssNames...) |
||||
} |
||||
return cgroup, nil |
||||
} |
||||
|
||||
// parseCgroups reads each line of the /proc/[pid]/cgroup file
|
||||
func parseCgroups(data []byte) ([]Cgroup, error) { |
||||
var cgroups []Cgroup |
||||
scanner := bufio.NewScanner(bytes.NewReader(data)) |
||||
for scanner.Scan() { |
||||
mountString := scanner.Text() |
||||
parsedMounts, err := parseCgroupString(mountString) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
cgroups = append(cgroups, *parsedMounts) |
||||
} |
||||
|
||||
err := scanner.Err() |
||||
return cgroups, err |
||||
} |
||||
|
||||
// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
|
||||
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
|
||||
// so the len of the returned struct is equal to the number of active hierarchies on this system
|
||||
func (p Proc) Cgroups() ([]Cgroup, error) { |
||||
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return parseCgroups(data) |
||||
} |
@ -0,0 +1,165 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"bufio" |
||||
"errors" |
||||
"fmt" |
||||
"os" |
||||
"regexp" |
||||
"strconv" |
||||
"strings" |
||||
|
||||
"github.com/prometheus/procfs/internal/util" |
||||
) |
||||
|
||||
var ( |
||||
// match the header line before each mapped zone in /proc/pid/smaps
|
||||
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) |
||||
) |
||||
|
||||
type ProcSMapsRollup struct { |
||||
// Amount of the mapping that is currently resident in RAM
|
||||
Rss uint64 |
||||
// Process's proportional share of this mapping
|
||||
Pss uint64 |
||||
// Size in bytes of clean shared pages
|
||||
SharedClean uint64 |
||||
// Size in bytes of dirty shared pages
|
||||
SharedDirty uint64 |
||||
// Size in bytes of clean private pages
|
||||
PrivateClean uint64 |
||||
// Size in bytes of dirty private pages
|
||||
PrivateDirty uint64 |
||||
// Amount of memory currently marked as referenced or accessed
|
||||
Referenced uint64 |
||||
// Amount of memory that does not belong to any file
|
||||
Anonymous uint64 |
||||
// Amount would-be-anonymous memory currently on swap
|
||||
Swap uint64 |
||||
// Process's proportional memory on swap
|
||||
SwapPss uint64 |
||||
} |
||||
|
||||
// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the
|
||||
// process.
|
||||
//
|
||||
// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will
|
||||
// we read and summed.
|
||||
func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { |
||||
data, err := util.ReadFileNoStat(p.path("smaps_rollup")) |
||||
if err != nil && os.IsNotExist(err) { |
||||
return p.procSMapsRollupManual() |
||||
} |
||||
if err != nil { |
||||
return ProcSMapsRollup{}, err |
||||
} |
||||
|
||||
lines := strings.Split(string(data), "\n") |
||||
smaps := ProcSMapsRollup{} |
||||
|
||||
// skip first line which don't contains information we need
|
||||
lines = lines[1:] |
||||
for _, line := range lines { |
||||
if line == "" { |
||||
continue |
||||
} |
||||
|
||||
if err := smaps.parseLine(line); err != nil { |
||||
return ProcSMapsRollup{}, err |
||||
} |
||||
} |
||||
|
||||
return smaps, nil |
||||
} |
||||
|
||||
// Read /proc/pid/smaps and do the roll-up in Go code.
|
||||
func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { |
||||
file, err := os.Open(p.path("smaps")) |
||||
if err != nil { |
||||
return ProcSMapsRollup{}, err |
||||
} |
||||
defer file.Close() |
||||
|
||||
smaps := ProcSMapsRollup{} |
||||
scan := bufio.NewScanner(file) |
||||
|
||||
for scan.Scan() { |
||||
line := scan.Text() |
||||
|
||||
if procSMapsHeaderLine.MatchString(line) { |
||||
continue |
||||
} |
||||
|
||||
if err := smaps.parseLine(line); err != nil { |
||||
return ProcSMapsRollup{}, err |
||||
} |
||||
} |
||||
|
||||
return smaps, nil |
||||
} |
||||
|
||||
func (s *ProcSMapsRollup) parseLine(line string) error { |
||||
kv := strings.SplitN(line, ":", 2) |
||||
if len(kv) != 2 { |
||||
fmt.Println(line) |
||||
return errors.New("invalid net/dev line, missing colon") |
||||
} |
||||
|
||||
k := kv[0] |
||||
if k == "VmFlags" { |
||||
return nil |
||||
} |
||||
|
||||
v := strings.TrimSpace(kv[1]) |
||||
v = strings.TrimRight(v, " kB") |
||||
|
||||
vKBytes, err := strconv.ParseUint(v, 10, 64) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
vBytes := vKBytes * 1024 |
||||
|
||||
s.addValue(k, v, vKBytes, vBytes) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { |
||||
switch k { |
||||
case "Rss": |
||||
s.Rss += vUintBytes |
||||
case "Pss": |
||||
s.Pss += vUintBytes |
||||
case "Shared_Clean": |
||||
s.SharedClean += vUintBytes |
||||
case "Shared_Dirty": |
||||
s.SharedDirty += vUintBytes |
||||
case "Private_Clean": |
||||
s.PrivateClean += vUintBytes |
||||
case "Private_Dirty": |
||||
s.PrivateDirty += vUintBytes |
||||
case "Referenced": |
||||
s.Referenced += vUintBytes |
||||
case "Anonymous": |
||||
s.Anonymous += vUintBytes |
||||
case "Swap": |
||||
s.Swap += vUintBytes |
||||
case "SwapPss": |
||||
s.SwapPss += vUintBytes |
||||
} |
||||
} |
@ -0,0 +1,249 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package sysfs |
||||
|
||||
import ( |
||||
"fmt" |
||||
"io/ioutil" |
||||
"os" |
||||
"path/filepath" |
||||
|
||||
"github.com/prometheus/procfs/internal/util" |
||||
) |
||||
|
||||
const fibrechannelClassPath = "class/fc_host" |
||||
|
||||
type FibreChannelCounters struct { |
||||
DumpedFrames uint64 // /sys/class/fc_host/<Name>/statistics/dumped_frames
|
||||
ErrorFrames uint64 // /sys/class/fc_host/<Name>/statistics/error_frames
|
||||
InvalidCRCCount uint64 // /sys/class/fc_host/<Name>/statistics/invalid_crc_count
|
||||
RXFrames uint64 // /sys/class/fc_host/<Name>/statistics/rx_frames
|
||||
RXWords uint64 // /sys/class/fc_host/<Name>/statistics/rx_words
|
||||
TXFrames uint64 // /sys/class/fc_host/<Name>/statistics/tx_frames
|
||||
TXWords uint64 // /sys/class/fc_host/<Name>/statistics/tx_words
|
||||
SecondsSinceLastReset uint64 // /sys/class/fc_host/<Name>/statistics/seconds_since_last_reset
|
||||
InvalidTXWordCount uint64 // /sys/class/fc_host/<Name>/statistics/invalid_tx_word_count
|
||||
LinkFailureCount uint64 // /sys/class/fc_host/<Name>/statistics/link_failure_count
|
||||
LossOfSyncCount uint64 // /sys/class/fc_host/<Name>/statistics/loss_of_sync_count
|
||||
LossOfSignalCount uint64 // /sys/class/fc_host/<Name>/statistics/loss_of_signal_count
|
||||
NosCount uint64 // /sys/class/fc_host/<Name>/statistics/nos_count
|
||||
FCPPacketAborts uint64 // / sys/class/fc_host/<Name>/statistics/fcp_packet_aborts
|
||||
} |
||||
|
||||
type FibreChannelHost struct { |
||||
Name string // /sys/class/fc_host/<Name>
|
||||
Speed string // /sys/class/fc_host/<Name>/speed
|
||||
PortState string // /sys/class/fc_host/<Name>/port_state
|
||||
PortType string // /sys/class/fc_host/<Name>/port_type
|
||||
SymbolicName string // /sys/class/fc_host/<Name>/symbolic_name
|
||||
NodeName string // /sys/class/fc_host/<Name>/node_name
|
||||
PortID string // /sys/class/fc_host/<Name>/port_id
|
||||
PortName string // /sys/class/fc_host/<Name>/port_name
|
||||
FabricName string // /sys/class/fc_host/<Name>/fabric_name
|
||||
DevLossTMO string // /sys/class/fc_host/<Name>/dev_loss_tmo
|
||||
SupportedClasses string // /sys/class/fc_host/<Name>/supported_classes
|
||||
SupportedSpeeds string // /sys/class/fc_host/<Name>/supported_speeds
|
||||
Counters FibreChannelCounters // /sys/class/fc_host/<Name>/statistics/*
|
||||
} |
||||
|
||||
type FibreChannelClass map[string]FibreChannelHost |
||||
|
||||
// FibreChannelClass parses everything in /sys/class/fc_host.
|
||||
func (fs FS) FibreChannelClass() (FibreChannelClass, error) { |
||||
path := fs.sys.Path(fibrechannelClassPath) |
||||
|
||||
dirs, err := ioutil.ReadDir(path) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
fcc := make(FibreChannelClass, len(dirs)) |
||||
for _, d := range dirs { |
||||
host, err := fs.parseFibreChannelHost(d.Name()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
fcc[host.Name] = *host |
||||
} |
||||
|
||||
return fcc, nil |
||||
} |
||||
|
||||
// Parse a single FC host
|
||||
func (fs FS) parseFibreChannelHost(name string) (*FibreChannelHost, error) { |
||||
path := fs.sys.Path(fibrechannelClassPath, name) |
||||
host := FibreChannelHost{Name: name} |
||||
|
||||
for _, f := range [...]string{"speed", "port_state", "port_type", "node_name", "port_id", "port_name", "fabric_name", "dev_loss_tmo", "symbolic_name", "supported_classes", "supported_speeds"} { |
||||
name := filepath.Join(path, f) |
||||
value, err := util.SysReadFile(name) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to read file %q: %v", name, err) |
||||
} |
||||
|
||||
switch f { |
||||
case "speed": |
||||
host.Speed = value |
||||
case "port_state": |
||||
host.PortState = value |
||||
case "port_type": |
||||
host.PortType = value |
||||
case "node_name": |
||||
if len(value) > 2 { |
||||
value = value[2:] |
||||
} |
||||
host.NodeName = value |
||||
case "port_id": |
||||
if len(value) > 2 { |
||||
value = value[2:] |
||||
} |
||||
host.PortID = value |
||||
case "port_name": |
||||
if len(value) > 2 { |
||||
value = value[2:] |
||||
} |
||||
host.PortName = value |
||||
case "fabric_name": |
||||
if len(value) > 2 { |
||||
value = value[2:] |
||||
} |
||||
host.FabricName = value |
||||
case "dev_loss_tmo": |
||||
host.DevLossTMO = value |
||||
case "supported_classes": |
||||
host.SupportedClasses = value |
||||
case "supported_speeds": |
||||
host.SupportedSpeeds = value |
||||
case "symbolic_name": |
||||
host.SymbolicName = value |
||||
} |
||||
} |
||||
|
||||
counters, err := parseFibreChannelStatistics(path) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
host.Counters = *counters |
||||
|
||||
return &host, nil |
||||
} |
||||
|
||||
// parseFibreChannelStatistics parses metrics from a single FC host.
|
||||
func parseFibreChannelStatistics(hostPath string) (*FibreChannelCounters, error) { |
||||
var counters FibreChannelCounters |
||||
|
||||
path := filepath.Join(hostPath, "statistics") |
||||
files, err := ioutil.ReadDir(path) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
for _, f := range files { |
||||
if !f.Mode().IsRegular() || f.Name() == "reset_statistics" { |
||||
continue |
||||
} |
||||
|
||||
name := filepath.Join(path, f.Name()) |
||||
value, err := util.SysReadFile(name) |
||||
if err != nil { |
||||
// there are some write-only files in this directory; we can safely skip over them
|
||||
if os.IsNotExist(err) || err.Error() == "operation not supported" || err.Error() == "invalid argument" { |
||||
continue |
||||
} |
||||
return nil, fmt.Errorf("failed to read file %q: %v", name, err) |
||||
} |
||||
|
||||
vp := util.NewValueParser(value) |
||||
|
||||
// Below switch was automatically generated. Don't need everything in there yet, so the unwanted bits are commented out.
|
||||
switch f.Name() { |
||||
case "dumped_frames": |
||||
counters.DumpedFrames = *vp.PUInt64() |
||||
case "error_frames": |
||||
counters.ErrorFrames = *vp.PUInt64() |
||||
/* |
||||
case "fc_no_free_exch": |
||||
counters.FcNoFreeExch = *vp.PUInt64() |
||||
case "fc_no_free_exch_xid": |
||||
counters.FcNoFreeExchXid = *vp.PUInt64() |
||||
case "fc_non_bls_resp": |
||||
counters.FcNonBlsResp = *vp.PUInt64() |
||||
case "fc_seq_not_found": |
||||
counters.FcSeqNotFound = *vp.PUInt64() |
||||
case "fc_xid_busy": |
||||
counters.FcXidBusy = *vp.PUInt64() |
||||
case "fc_xid_not_found": |
||||
counters.FcXidNotFound = *vp.PUInt64() |
||||
case "fcp_control_requests": |
||||
counters.FcpControlRequests = *vp.PUInt64() |
||||
case "fcp_frame_alloc_failures": |
||||
counters.FcpFrameAllocFailures = *vp.PUInt64() |
||||
case "fcp_input_megabytes": |
||||
counters.FcpInputMegabytes = *vp.PUInt64() |
||||
case "fcp_input_requests": |
||||
counters.FcpInputRequests = *vp.PUInt64() |
||||
case "fcp_output_megabytes": |
||||
counters.FcpOutputMegabytes = *vp.PUInt64() |
||||
case "fcp_output_requests": |
||||
counters.FcpOutputRequests = *vp.PUInt64() |
||||
*/ |
||||
case "fcp_packet_aborts": |
||||
counters.FCPPacketAborts = *vp.PUInt64() |
||||
/* |
||||
case "fcp_packet_alloc_failures": |
||||
counters.FcpPacketAllocFailures = *vp.PUInt64() |
||||
*/ |
||||
case "invalid_tx_word_count": |
||||
counters.InvalidTXWordCount = *vp.PUInt64() |
||||
case "invalid_crc_count": |
||||
counters.InvalidCRCCount = *vp.PUInt64() |
||||
case "link_failure_count": |
||||
counters.LinkFailureCount = *vp.PUInt64() |
||||
/* |
||||
case "lip_count": |
||||
counters.LipCount = *vp.PUInt64() |
||||
*/ |
||||
case "loss_of_signal_count": |
||||
counters.LossOfSignalCount = *vp.PUInt64() |
||||
case "loss_of_sync_count": |
||||
counters.LossOfSyncCount = *vp.PUInt64() |
||||
case "nos_count": |
||||
counters.NosCount = *vp.PUInt64() |
||||
/* |
||||
case "prim_seq_protocol_err_count": |
||||
counters.PrimSeqProtocolErrCount = *vp.PUInt64() |
||||
*/ |
||||
case "rx_frames": |
||||
counters.RXFrames = *vp.PUInt64() |
||||
case "rx_words": |
||||
counters.RXWords = *vp.PUInt64() |
||||
case "seconds_since_last_reset": |
||||
counters.SecondsSinceLastReset = *vp.PUInt64() |
||||
case "tx_frames": |
||||
counters.TXFrames = *vp.PUInt64() |
||||
case "tx_words": |
||||
counters.TXWords = *vp.PUInt64() |
||||
} |
||||
|
||||
if err := vp.Err(); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
} |
||||
|
||||
return &counters, nil |
||||
} |
Loading…
Reference in new issue