Skip to content

Commit

Permalink
Update mmdf collector to collect pool data (#52)
Browse files Browse the repository at this point in the history
* Update mmdf collector to collect pool data
Refactor how mmdf parsing is handled
  • Loading branch information
treydock committed Sep 19, 2022
1 parent 813273f commit 26ff27e
Show file tree
Hide file tree
Showing 4 changed files with 249 additions and 113 deletions.
19 changes: 18 additions & 1 deletion cmd/gpfs_mmdf_exporter/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,10 @@ mmdf:fsTotal:HEADER:version:reserved:reserved:fsSize:freeBlocks:freeBlocksPct:fr
mmdf:inode:HEADER:version:reserved:reserved:usedInodes:freeInodes:allocatedInodes:maxInodes:
mmdf:nsd:0:1:::P_META_VD102:system:771751936:300:Yes:No:320274944:41:5005384:1::
mmdf:nsd:0:1:::P_DATA_VD02:data:46766489600:200:No:Yes:6092915712:13:154966272:0::
mmdf:poolTotal:0:1:::data:3647786188800:475190722560:13:12059515296:0:3860104580096:
mmdf:poolTotal:0:1:::system:783308292096:380564840448:49:10024464464:1:1153081262080:
mmdf:data:0:1:::3647786188800:475190722560:13:12059515296:0:
mmdf:metadata:0:1:::13891534848:6011299328:43:58139768:0:
mmdf:poolTotal:0:1:::data:3064453922816:1342362296320:44:1999215152:0:10143773212672:
mmdf:fsTotal:0:1:::3661677723648:481202021888:14:12117655064:0:
mmdf:inode:0:1:::430741822:484301506:915043328:1332164000:
`
Expand All @@ -61,6 +62,22 @@ gpfs_fs_metadata_free_bytes{fs="project"} 6.155570511872e+12
# HELP gpfs_fs_metadata_size_bytes GPFS total metadata size in bytes
# TYPE gpfs_fs_metadata_size_bytes gauge
gpfs_fs_metadata_size_bytes{fs="project"} 1.4224931684352e+13
# HELP gpfs_fs_pool_free_bytes GPFS pool free size in bytes
# TYPE gpfs_fs_pool_free_bytes gauge
gpfs_fs_pool_free_bytes{fs="project",pool="data"} 1.37457899143168e+15
gpfs_fs_pool_free_bytes{fs="project",pool="system"} 3.89698396618752e+14
# HELP gpfs_fs_pool_free_fragments_bytes GPFS pool free fragments in bytes
# TYPE gpfs_fs_pool_free_fragments_bytes gauge
gpfs_fs_pool_free_fragments_bytes{fs="project",pool="data"} 2.047196315648e+12
gpfs_fs_pool_free_fragments_bytes{fs="project",pool="system"} 1.0265051611136e+13
# HELP gpfs_fs_pool_max_disk_size_bytes GPFS pool max disk size in bytes
# TYPE gpfs_fs_pool_max_disk_size_bytes gauge
gpfs_fs_pool_max_disk_size_bytes{fs="project",pool="data"} 1.0387223769776128e+16
gpfs_fs_pool_max_disk_size_bytes{fs="project",pool="system"} 1.18075521236992e+15
# HELP gpfs_fs_pool_total_bytes GPFS pool total size in bytes
# TYPE gpfs_fs_pool_total_bytes gauge
gpfs_fs_pool_total_bytes{fs="project",pool="data"} 3.138000816963584e+15
gpfs_fs_pool_total_bytes{fs="project",pool="system"} 8.02107691106304e+14
# HELP gpfs_fs_size_bytes GPFS filesystem total size in bytes
# TYPE gpfs_fs_size_bytes gauge
gpfs_fs_size_bytes{fs="project"} 3.749557989015552e+15
Expand Down
23 changes: 23 additions & 0 deletions collectors/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,13 @@ import (
"os"
"os/exec"
"sort"
"strconv"
"strings"
"sync"
"time"

"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/alecthomas/kingpin.v2"
)
Expand Down Expand Up @@ -140,6 +142,27 @@ func SliceContains(slice []string, str string) bool {
return false
}

func SliceIndex(slice []string, str string) int {
for i, v := range slice {
if v == str {
return i
}
}
return -1
}

func ParseFloat(str string, toBytes bool, logger log.Logger) (float64, error) {
if val, err := strconv.ParseFloat(str, 64); err == nil {
if toBytes {
val = val * 1024
}
return val, nil
} else {
level.Error(logger).Log("msg", fmt.Sprintf("Error parsing %s: %s", str, err.Error()))
return 0, err
}
}

func FileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
Expand Down
181 changes: 118 additions & 63 deletions collectors/mmdf.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@ import (
"bytes"
"context"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"time"
Expand All @@ -32,19 +30,8 @@ import (
var (
configFilesystems = kingpin.Flag("collector.mmdf.filesystems", "Filesystems to query with mmdf, comma separated. Defaults to all filesystems.").Default("").String()
mmdfTimeout = kingpin.Flag("collector.mmdf.timeout", "Timeout for mmdf execution").Default("60").Int()
mappedSections = []string{"inode", "fsTotal", "metadata"}
KbToBytes = []string{"fsSize", "freeBlocks", "totalMetadata"}
dfMap = map[string]string{
"inode:usedInodes": "InodesUsed",
"inode:freeInodes": "InodesFree",
"inode:allocatedInodes": "InodesAllocated",
"inode:maxInodes": "InodesTotal",
"fsTotal:fsSize": "FSTotal",
"fsTotal:freeBlocks": "FSFree",
"metadata:totalMetadata": "MetadataTotal",
"metadata:freeBlocks": "MetadataFree",
}
MmdfExec = mmdf
mappedSections = []string{"inode", "fsTotal", "metadata", "poolTotal"}
MmdfExec = mmdf
)

type DFMetric struct {
Expand All @@ -58,18 +45,31 @@ type DFMetric struct {
Metadata bool
MetadataTotal float64
MetadataFree float64
Pools []PoolMetric
}

type PoolMetric struct {
PoolName string
PoolTotal float64
PoolFree float64
PoolFreeFragments float64
PoolMaxDiskSize float64
}

type MmdfCollector struct {
InodesUsed *prometheus.Desc
InodesFree *prometheus.Desc
InodesAllocated *prometheus.Desc
InodesTotal *prometheus.Desc
FSTotal *prometheus.Desc
FSFree *prometheus.Desc
MetadataTotal *prometheus.Desc
MetadataFree *prometheus.Desc
logger log.Logger
InodesUsed *prometheus.Desc
InodesFree *prometheus.Desc
InodesAllocated *prometheus.Desc
InodesTotal *prometheus.Desc
FSTotal *prometheus.Desc
FSFree *prometheus.Desc
MetadataTotal *prometheus.Desc
MetadataFree *prometheus.Desc
PoolTotal *prometheus.Desc
PoolFree *prometheus.Desc
PoolFreeFragments *prometheus.Desc
PoolMaxDiskSize *prometheus.Desc
logger log.Logger
}

func init() {
Expand All @@ -94,6 +94,14 @@ func NewMmdfCollector(logger log.Logger) Collector {
"GPFS total metadata size in bytes", []string{"fs"}, nil),
MetadataFree: prometheus.NewDesc(prometheus.BuildFQName(namespace, "fs", "metadata_free_bytes"),
"GPFS metadata free size in bytes", []string{"fs"}, nil),
PoolTotal: prometheus.NewDesc(prometheus.BuildFQName(namespace, "fs", "pool_total_bytes"),
"GPFS pool total size in bytes", []string{"fs", "pool"}, nil),
PoolFree: prometheus.NewDesc(prometheus.BuildFQName(namespace, "fs", "pool_free_bytes"),
"GPFS pool free size in bytes", []string{"fs", "pool"}, nil),
PoolFreeFragments: prometheus.NewDesc(prometheus.BuildFQName(namespace, "fs", "pool_free_fragments_bytes"),
"GPFS pool free fragments in bytes", []string{"fs", "pool"}, nil),
PoolMaxDiskSize: prometheus.NewDesc(prometheus.BuildFQName(namespace, "fs", "pool_max_disk_size_bytes"),
"GPFS pool max disk size in bytes", []string{"fs", "pool"}, nil),
logger: logger,
}
}
Expand All @@ -107,6 +115,8 @@ func (c *MmdfCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.FSFree
ch <- c.MetadataTotal
ch <- c.MetadataFree
ch <- c.PoolTotal
ch <- c.PoolFree
}

func (c *MmdfCollector) Collect(ch chan<- prometheus.Metric) {
Expand Down Expand Up @@ -162,6 +172,12 @@ func (c *MmdfCollector) Collect(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric(c.MetadataTotal, prometheus.GaugeValue, metric.MetadataTotal, fs)
ch <- prometheus.MustNewConstMetric(c.MetadataFree, prometheus.GaugeValue, metric.MetadataFree, fs)
}
for _, pool := range metric.Pools {
ch <- prometheus.MustNewConstMetric(c.PoolTotal, prometheus.GaugeValue, pool.PoolTotal, fs, pool.PoolName)
ch <- prometheus.MustNewConstMetric(c.PoolFree, prometheus.GaugeValue, pool.PoolFree, fs, pool.PoolName)
ch <- prometheus.MustNewConstMetric(c.PoolFreeFragments, prometheus.GaugeValue, pool.PoolFreeFragments, fs, pool.PoolName)
ch <- prometheus.MustNewConstMetric(c.PoolMaxDiskSize, prometheus.GaugeValue, pool.PoolMaxDiskSize, fs, pool.PoolName)
}
}
ch <- prometheus.MustNewConstMetric(lastExecution, prometheus.GaugeValue, float64(time.Now().Unix()), label)
}(fs)
Expand All @@ -176,8 +192,8 @@ func (c *MmdfCollector) mmdfCollect(fs string) (DFMetric, error) {
if err != nil {
return DFMetric{}, err
}
dfMetric, err := parse_mmdf(out, c.logger)
return dfMetric, err
dfMetric := parse_mmdf(out, c.logger)
return dfMetric, nil
}

func mmdf(fs string, ctx context.Context) (string, error) {
Expand All @@ -193,10 +209,10 @@ func mmdf(fs string, ctx context.Context) (string, error) {
return out.String(), nil
}

func parse_mmdf(out string, logger log.Logger) (DFMetric, error) {
dfMetrics := DFMetric{Metadata: true}
func parse_mmdf(out string, logger log.Logger) DFMetric {
dfMetrics := DFMetric{Metadata: false}
pools := []PoolMetric{}
headers := make(map[string][]string)
values := make(map[string][]string)
lines := strings.Split(out, "\n")
for _, l := range lines {
if !strings.HasPrefix(l, "mmdf") {
Expand All @@ -206,50 +222,89 @@ func parse_mmdf(out string, logger log.Logger) (DFMetric, error) {
if len(items) < 3 {
continue
}
if !SliceContains(mappedSections, items[1]) {
section := items[1]
if !SliceContains(mappedSections, section) {
continue
}
if items[2] == "HEADER" {
headers[items[1]] = append(headers[items[1]], items...)
} else {
values[items[1]] = append(values[items[1]], items...)
continue
}
}
ps := reflect.ValueOf(&dfMetrics) // pointer to struct - addressable
s := ps.Elem() // struct
for k, vals := range headers {
if _, ok := values[k]; !ok {
if k == "metadata" {
dfMetrics.Metadata = false
continue
} else {
level.Error(logger).Log("msg", "Header section missing from values", "header", k)
return dfMetrics, fmt.Errorf("Header section missing from values: %s", k)
if section == "inode" {
if inodesUsedIndex := SliceIndex(headers["inode"], "usedInodes"); inodesUsedIndex != -1 {
if inodesUsed, err := ParseFloat(items[inodesUsedIndex], false, logger); err == nil {
dfMetrics.InodesUsed = inodesUsed
}
}
if inodesFreeIndex := SliceIndex(headers["inode"], "freeInodes"); inodesFreeIndex != -1 {
if inodesFree, err := ParseFloat(items[inodesFreeIndex], false, logger); err == nil {
dfMetrics.InodesFree = inodesFree
}
}
if inodesAllocatedIndex := SliceIndex(headers["inode"], "allocatedInodes"); inodesAllocatedIndex != -1 {
if inodesAllocated, err := ParseFloat(items[inodesAllocatedIndex], false, logger); err == nil {
dfMetrics.InodesAllocated = inodesAllocated
}
}
if inodesTotalIndex := SliceIndex(headers["inode"], "maxInodes"); inodesTotalIndex != -1 {
if inodesTotal, err := ParseFloat(items[inodesTotalIndex], false, logger); err == nil {
dfMetrics.InodesTotal = inodesTotal
}
}
}
if len(vals) != len(values[k]) {
level.Error(logger).Log("msg", "Length of headers does not equal length of values", "header", k, "values", len(values[k]), "headers", len(vals))
return dfMetrics, fmt.Errorf("Length of headers does not equal length of values: %s", k)
if section == "fsTotal" {
if fsTotalIndex := SliceIndex(headers["fsTotal"], "fsSize"); fsTotalIndex != -1 {
if fsTotal, err := ParseFloat(items[fsTotalIndex], true, logger); err == nil {
dfMetrics.FSTotal = fsTotal
}
}
if fsFreeIndex := SliceIndex(headers["fsTotal"], "freeBlocks"); fsFreeIndex != -1 {
if fsFree, err := ParseFloat(items[fsFreeIndex], true, logger); err == nil {
dfMetrics.FSFree = fsFree
}
}
}
if section == "metadata" {
dfMetrics.Metadata = true
if metadataTotalIndex := SliceIndex(headers["metadata"], "totalMetadata"); metadataTotalIndex != -1 {
if metadataTotal, err := ParseFloat(items[metadataTotalIndex], true, logger); err == nil {
dfMetrics.MetadataTotal = metadataTotal
}
}
if metadataFreeIndex := SliceIndex(headers["metadata"], "freeBlocks"); metadataFreeIndex != -1 {
if metadataFree, err := ParseFloat(items[metadataFreeIndex], true, logger); err == nil {
dfMetrics.MetadataFree = metadataFree
}
}
}
for i, v := range vals {
mapKey := fmt.Sprintf("%s:%s", k, v)
value := values[k][i]
if field, ok := dfMap[mapKey]; ok {
f := s.FieldByName(field)
if f.Kind() == reflect.String {
f.SetString(value)
} else if f.Kind() == reflect.Float64 {
if val, err := strconv.ParseFloat(value, 64); err == nil {
if SliceContains(KbToBytes, v) {
val = val * 1024
}
f.SetFloat(val)
} else {
level.Error(logger).Log("msg", fmt.Sprintf("Error parsing %s value %s: %s", mapKey, value, err.Error()))
}
if section == "poolTotal" {
poolMetric := PoolMetric{}
if poolNameIndex := SliceIndex(headers["poolTotal"], "poolName"); poolNameIndex != -1 {
poolMetric.PoolName = items[poolNameIndex]
}
if poolTotalIndex := SliceIndex(headers["poolTotal"], "poolSize"); poolTotalIndex != -1 {
if poolTotal, err := ParseFloat(items[poolTotalIndex], true, logger); err == nil {
poolMetric.PoolTotal = poolTotal
}
}
if poolFreeIndex := SliceIndex(headers["poolTotal"], "freeBlocks"); poolFreeIndex != -1 {
if poolFree, err := ParseFloat(items[poolFreeIndex], true, logger); err == nil {
poolMetric.PoolFree = poolFree
}
}
if poolFreeFragmentsIndex := SliceIndex(headers["poolTotal"], "freeFragments"); poolFreeFragmentsIndex != -1 {
if poolFreeFragments, err := ParseFloat(items[poolFreeFragmentsIndex], true, logger); err == nil {
poolMetric.PoolFreeFragments = poolFreeFragments
}
}
if poolMaxDiskSizeIndex := SliceIndex(headers["poolTotal"], "maxDiskSize"); poolMaxDiskSizeIndex != -1 {
if poolMaxDiskSize, err := ParseFloat(items[poolMaxDiskSizeIndex], true, logger); err == nil {
poolMetric.PoolMaxDiskSize = poolMaxDiskSize
}
}
pools = append(pools, poolMetric)
}
}
return dfMetrics, nil
dfMetrics.Pools = pools
return dfMetrics
}
Loading

0 comments on commit 26ff27e

Please sign in to comment.