From 26ff27e8bdf76ea02549080851a22be84a440702 Mon Sep 17 00:00:00 2001 From: treydock Date: Mon, 19 Sep 2022 09:41:30 -0400 Subject: [PATCH] Update mmdf collector to collect pool data (#52) * Update mmdf collector to collect pool data Refactor how mmdf parsing is handled --- cmd/gpfs_mmdf_exporter/main_test.go | 19 ++- collectors/collector.go | 23 ++++ collectors/mmdf.go | 181 ++++++++++++++++++---------- collectors/mmdf_test.go | 139 +++++++++++++-------- 4 files changed, 249 insertions(+), 113 deletions(-) diff --git a/cmd/gpfs_mmdf_exporter/main_test.go b/cmd/gpfs_mmdf_exporter/main_test.go index 6191324..aa19f47 100644 --- a/cmd/gpfs_mmdf_exporter/main_test.go +++ b/cmd/gpfs_mmdf_exporter/main_test.go @@ -36,9 +36,10 @@ mmdf:fsTotal:HEADER:version:reserved:reserved:fsSize:freeBlocks:freeBlocksPct:fr mmdf:inode:HEADER:version:reserved:reserved:usedInodes:freeInodes:allocatedInodes:maxInodes: mmdf:nsd:0:1:::P_META_VD102:system:771751936:300:Yes:No:320274944:41:5005384:1:: mmdf:nsd:0:1:::P_DATA_VD02:data:46766489600:200:No:Yes:6092915712:13:154966272:0:: -mmdf:poolTotal:0:1:::data:3647786188800:475190722560:13:12059515296:0:3860104580096: +mmdf:poolTotal:0:1:::system:783308292096:380564840448:49:10024464464:1:1153081262080: mmdf:data:0:1:::3647786188800:475190722560:13:12059515296:0: mmdf:metadata:0:1:::13891534848:6011299328:43:58139768:0: +mmdf:poolTotal:0:1:::data:3064453922816:1342362296320:44:1999215152:0:10143773212672: mmdf:fsTotal:0:1:::3661677723648:481202021888:14:12117655064:0: mmdf:inode:0:1:::430741822:484301506:915043328:1332164000: ` @@ -61,6 +62,22 @@ gpfs_fs_metadata_free_bytes{fs="project"} 6.155570511872e+12 # HELP gpfs_fs_metadata_size_bytes GPFS total metadata size in bytes # TYPE gpfs_fs_metadata_size_bytes gauge gpfs_fs_metadata_size_bytes{fs="project"} 1.4224931684352e+13 +# HELP gpfs_fs_pool_free_bytes GPFS pool free size in bytes +# TYPE gpfs_fs_pool_free_bytes gauge +gpfs_fs_pool_free_bytes{fs="project",pool="data"} 1.37457899143168e+15 +gpfs_fs_pool_free_bytes{fs="project",pool="system"} 3.89698396618752e+14 +# HELP gpfs_fs_pool_free_fragments_bytes GPFS pool free fragments in bytes +# TYPE gpfs_fs_pool_free_fragments_bytes gauge +gpfs_fs_pool_free_fragments_bytes{fs="project",pool="data"} 2.047196315648e+12 +gpfs_fs_pool_free_fragments_bytes{fs="project",pool="system"} 1.0265051611136e+13 +# HELP gpfs_fs_pool_max_disk_size_bytes GPFS pool max disk size in bytes +# TYPE gpfs_fs_pool_max_disk_size_bytes gauge +gpfs_fs_pool_max_disk_size_bytes{fs="project",pool="data"} 1.0387223769776128e+16 +gpfs_fs_pool_max_disk_size_bytes{fs="project",pool="system"} 1.18075521236992e+15 +# HELP gpfs_fs_pool_total_bytes GPFS pool total size in bytes +# TYPE gpfs_fs_pool_total_bytes gauge +gpfs_fs_pool_total_bytes{fs="project",pool="data"} 3.138000816963584e+15 +gpfs_fs_pool_total_bytes{fs="project",pool="system"} 8.02107691106304e+14 # HELP gpfs_fs_size_bytes GPFS filesystem total size in bytes # TYPE gpfs_fs_size_bytes gauge gpfs_fs_size_bytes{fs="project"} 3.749557989015552e+15 diff --git a/collectors/collector.go b/collectors/collector.go index a57abdf..0aa639a 100644 --- a/collectors/collector.go +++ b/collectors/collector.go @@ -21,11 +21,13 @@ import ( "os" "os/exec" "sort" + "strconv" "strings" "sync" "time" "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "gopkg.in/alecthomas/kingpin.v2" ) @@ -140,6 +142,27 @@ func SliceContains(slice []string, str string) bool { return false } +func SliceIndex(slice []string, str string) int { + for i, v := range slice { + if v == str { + return i + } + } + return -1 +} + +func ParseFloat(str string, toBytes bool, logger log.Logger) (float64, error) { + if val, err := strconv.ParseFloat(str, 64); err == nil { + if toBytes { + val = val * 1024 + } + return val, nil + } else { + level.Error(logger).Log("msg", fmt.Sprintf("Error parsing %s: %s", str, err.Error())) + return 0, err + } +} + func FileExists(filename string) bool { info, err := os.Stat(filename) if os.IsNotExist(err) { diff --git a/collectors/mmdf.go b/collectors/mmdf.go index 38fb5cf..48f043c 100644 --- a/collectors/mmdf.go +++ b/collectors/mmdf.go @@ -17,8 +17,6 @@ import ( "bytes" "context" "fmt" - "reflect" - "strconv" "strings" "sync" "time" @@ -32,19 +30,8 @@ import ( var ( configFilesystems = kingpin.Flag("collector.mmdf.filesystems", "Filesystems to query with mmdf, comma separated. Defaults to all filesystems.").Default("").String() mmdfTimeout = kingpin.Flag("collector.mmdf.timeout", "Timeout for mmdf execution").Default("60").Int() - mappedSections = []string{"inode", "fsTotal", "metadata"} - KbToBytes = []string{"fsSize", "freeBlocks", "totalMetadata"} - dfMap = map[string]string{ - "inode:usedInodes": "InodesUsed", - "inode:freeInodes": "InodesFree", - "inode:allocatedInodes": "InodesAllocated", - "inode:maxInodes": "InodesTotal", - "fsTotal:fsSize": "FSTotal", - "fsTotal:freeBlocks": "FSFree", - "metadata:totalMetadata": "MetadataTotal", - "metadata:freeBlocks": "MetadataFree", - } - MmdfExec = mmdf + mappedSections = []string{"inode", "fsTotal", "metadata", "poolTotal"} + MmdfExec = mmdf ) type DFMetric struct { @@ -58,18 +45,31 @@ type DFMetric struct { Metadata bool MetadataTotal float64 MetadataFree float64 + Pools []PoolMetric +} + +type PoolMetric struct { + PoolName string + PoolTotal float64 + PoolFree float64 + PoolFreeFragments float64 + PoolMaxDiskSize float64 } type MmdfCollector struct { - InodesUsed *prometheus.Desc - InodesFree *prometheus.Desc - InodesAllocated *prometheus.Desc - InodesTotal *prometheus.Desc - FSTotal *prometheus.Desc - FSFree *prometheus.Desc - MetadataTotal *prometheus.Desc - MetadataFree *prometheus.Desc - logger log.Logger + InodesUsed *prometheus.Desc + InodesFree *prometheus.Desc + InodesAllocated *prometheus.Desc + InodesTotal *prometheus.Desc + FSTotal *prometheus.Desc + FSFree *prometheus.Desc + MetadataTotal *prometheus.Desc + MetadataFree *prometheus.Desc + PoolTotal *prometheus.Desc + PoolFree *prometheus.Desc + PoolFreeFragments *prometheus.Desc + PoolMaxDiskSize *prometheus.Desc + logger log.Logger } func init() { @@ -94,6 +94,14 @@ func NewMmdfCollector(logger log.Logger) Collector { "GPFS total metadata size in bytes", []string{"fs"}, nil), MetadataFree: prometheus.NewDesc(prometheus.BuildFQName(namespace, "fs", "metadata_free_bytes"), "GPFS metadata free size in bytes", []string{"fs"}, nil), + PoolTotal: prometheus.NewDesc(prometheus.BuildFQName(namespace, "fs", "pool_total_bytes"), + "GPFS pool total size in bytes", []string{"fs", "pool"}, nil), + PoolFree: prometheus.NewDesc(prometheus.BuildFQName(namespace, "fs", "pool_free_bytes"), + "GPFS pool free size in bytes", []string{"fs", "pool"}, nil), + PoolFreeFragments: prometheus.NewDesc(prometheus.BuildFQName(namespace, "fs", "pool_free_fragments_bytes"), + "GPFS pool free fragments in bytes", []string{"fs", "pool"}, nil), + PoolMaxDiskSize: prometheus.NewDesc(prometheus.BuildFQName(namespace, "fs", "pool_max_disk_size_bytes"), + "GPFS pool max disk size in bytes", []string{"fs", "pool"}, nil), logger: logger, } } @@ -107,6 +115,8 @@ func (c *MmdfCollector) Describe(ch chan<- *prometheus.Desc) { ch <- c.FSFree ch <- c.MetadataTotal ch <- c.MetadataFree + ch <- c.PoolTotal + ch <- c.PoolFree } func (c *MmdfCollector) Collect(ch chan<- prometheus.Metric) { @@ -162,6 +172,12 @@ func (c *MmdfCollector) Collect(ch chan<- prometheus.Metric) { ch <- prometheus.MustNewConstMetric(c.MetadataTotal, prometheus.GaugeValue, metric.MetadataTotal, fs) ch <- prometheus.MustNewConstMetric(c.MetadataFree, prometheus.GaugeValue, metric.MetadataFree, fs) } + for _, pool := range metric.Pools { + ch <- prometheus.MustNewConstMetric(c.PoolTotal, prometheus.GaugeValue, pool.PoolTotal, fs, pool.PoolName) + ch <- prometheus.MustNewConstMetric(c.PoolFree, prometheus.GaugeValue, pool.PoolFree, fs, pool.PoolName) + ch <- prometheus.MustNewConstMetric(c.PoolFreeFragments, prometheus.GaugeValue, pool.PoolFreeFragments, fs, pool.PoolName) + ch <- prometheus.MustNewConstMetric(c.PoolMaxDiskSize, prometheus.GaugeValue, pool.PoolMaxDiskSize, fs, pool.PoolName) + } } ch <- prometheus.MustNewConstMetric(lastExecution, prometheus.GaugeValue, float64(time.Now().Unix()), label) }(fs) @@ -176,8 +192,8 @@ func (c *MmdfCollector) mmdfCollect(fs string) (DFMetric, error) { if err != nil { return DFMetric{}, err } - dfMetric, err := parse_mmdf(out, c.logger) - return dfMetric, err + dfMetric := parse_mmdf(out, c.logger) + return dfMetric, nil } func mmdf(fs string, ctx context.Context) (string, error) { @@ -193,10 +209,10 @@ func mmdf(fs string, ctx context.Context) (string, error) { return out.String(), nil } -func parse_mmdf(out string, logger log.Logger) (DFMetric, error) { - dfMetrics := DFMetric{Metadata: true} +func parse_mmdf(out string, logger log.Logger) DFMetric { + dfMetrics := DFMetric{Metadata: false} + pools := []PoolMetric{} headers := make(map[string][]string) - values := make(map[string][]string) lines := strings.Split(out, "\n") for _, l := range lines { if !strings.HasPrefix(l, "mmdf") { @@ -206,50 +222,89 @@ func parse_mmdf(out string, logger log.Logger) (DFMetric, error) { if len(items) < 3 { continue } - if !SliceContains(mappedSections, items[1]) { + section := items[1] + if !SliceContains(mappedSections, section) { continue } if items[2] == "HEADER" { headers[items[1]] = append(headers[items[1]], items...) - } else { - values[items[1]] = append(values[items[1]], items...) + continue } - } - ps := reflect.ValueOf(&dfMetrics) // pointer to struct - addressable - s := ps.Elem() // struct - for k, vals := range headers { - if _, ok := values[k]; !ok { - if k == "metadata" { - dfMetrics.Metadata = false - continue - } else { - level.Error(logger).Log("msg", "Header section missing from values", "header", k) - return dfMetrics, fmt.Errorf("Header section missing from values: %s", k) + if section == "inode" { + if inodesUsedIndex := SliceIndex(headers["inode"], "usedInodes"); inodesUsedIndex != -1 { + if inodesUsed, err := ParseFloat(items[inodesUsedIndex], false, logger); err == nil { + dfMetrics.InodesUsed = inodesUsed + } + } + if inodesFreeIndex := SliceIndex(headers["inode"], "freeInodes"); inodesFreeIndex != -1 { + if inodesFree, err := ParseFloat(items[inodesFreeIndex], false, logger); err == nil { + dfMetrics.InodesFree = inodesFree + } + } + if inodesAllocatedIndex := SliceIndex(headers["inode"], "allocatedInodes"); inodesAllocatedIndex != -1 { + if inodesAllocated, err := ParseFloat(items[inodesAllocatedIndex], false, logger); err == nil { + dfMetrics.InodesAllocated = inodesAllocated + } + } + if inodesTotalIndex := SliceIndex(headers["inode"], "maxInodes"); inodesTotalIndex != -1 { + if inodesTotal, err := ParseFloat(items[inodesTotalIndex], false, logger); err == nil { + dfMetrics.InodesTotal = inodesTotal + } } } - if len(vals) != len(values[k]) { - level.Error(logger).Log("msg", "Length of headers does not equal length of values", "header", k, "values", len(values[k]), "headers", len(vals)) - return dfMetrics, fmt.Errorf("Length of headers does not equal length of values: %s", k) + if section == "fsTotal" { + if fsTotalIndex := SliceIndex(headers["fsTotal"], "fsSize"); fsTotalIndex != -1 { + if fsTotal, err := ParseFloat(items[fsTotalIndex], true, logger); err == nil { + dfMetrics.FSTotal = fsTotal + } + } + if fsFreeIndex := SliceIndex(headers["fsTotal"], "freeBlocks"); fsFreeIndex != -1 { + if fsFree, err := ParseFloat(items[fsFreeIndex], true, logger); err == nil { + dfMetrics.FSFree = fsFree + } + } + } + if section == "metadata" { + dfMetrics.Metadata = true + if metadataTotalIndex := SliceIndex(headers["metadata"], "totalMetadata"); metadataTotalIndex != -1 { + if metadataTotal, err := ParseFloat(items[metadataTotalIndex], true, logger); err == nil { + dfMetrics.MetadataTotal = metadataTotal + } + } + if metadataFreeIndex := SliceIndex(headers["metadata"], "freeBlocks"); metadataFreeIndex != -1 { + if metadataFree, err := ParseFloat(items[metadataFreeIndex], true, logger); err == nil { + dfMetrics.MetadataFree = metadataFree + } + } } - for i, v := range vals { - mapKey := fmt.Sprintf("%s:%s", k, v) - value := values[k][i] - if field, ok := dfMap[mapKey]; ok { - f := s.FieldByName(field) - if f.Kind() == reflect.String { - f.SetString(value) - } else if f.Kind() == reflect.Float64 { - if val, err := strconv.ParseFloat(value, 64); err == nil { - if SliceContains(KbToBytes, v) { - val = val * 1024 - } - f.SetFloat(val) - } else { - level.Error(logger).Log("msg", fmt.Sprintf("Error parsing %s value %s: %s", mapKey, value, err.Error())) - } + if section == "poolTotal" { + poolMetric := PoolMetric{} + if poolNameIndex := SliceIndex(headers["poolTotal"], "poolName"); poolNameIndex != -1 { + poolMetric.PoolName = items[poolNameIndex] + } + if poolTotalIndex := SliceIndex(headers["poolTotal"], "poolSize"); poolTotalIndex != -1 { + if poolTotal, err := ParseFloat(items[poolTotalIndex], true, logger); err == nil { + poolMetric.PoolTotal = poolTotal + } + } + if poolFreeIndex := SliceIndex(headers["poolTotal"], "freeBlocks"); poolFreeIndex != -1 { + if poolFree, err := ParseFloat(items[poolFreeIndex], true, logger); err == nil { + poolMetric.PoolFree = poolFree + } + } + if poolFreeFragmentsIndex := SliceIndex(headers["poolTotal"], "freeFragments"); poolFreeFragmentsIndex != -1 { + if poolFreeFragments, err := ParseFloat(items[poolFreeFragmentsIndex], true, logger); err == nil { + poolMetric.PoolFreeFragments = poolFreeFragments + } + } + if poolMaxDiskSizeIndex := SliceIndex(headers["poolTotal"], "maxDiskSize"); poolMaxDiskSizeIndex != -1 { + if poolMaxDiskSize, err := ParseFloat(items[poolMaxDiskSizeIndex], true, logger); err == nil { + poolMetric.PoolMaxDiskSize = poolMaxDiskSize } } + pools = append(pools, poolMetric) } } - return dfMetrics, nil + dfMetrics.Pools = pools + return dfMetrics } diff --git a/collectors/mmdf_test.go b/collectors/mmdf_test.go index b8f5511..25af26d 100644 --- a/collectors/mmdf_test.go +++ b/collectors/mmdf_test.go @@ -36,9 +36,10 @@ mmdf:fsTotal:HEADER:version:reserved:reserved:fsSize:freeBlocks:freeBlocksPct:fr mmdf:inode:HEADER:version:reserved:reserved:usedInodes:freeInodes:allocatedInodes:maxInodes: mmdf:nsd:0:1:::P_META_VD102:system:771751936:300:Yes:No:320274944:41:5005384:1:: mmdf:nsd:0:1:::P_DATA_VD02:data:46766489600:200:No:Yes:6092915712:13:154966272:0:: -mmdf:poolTotal:0:1:::data:3647786188800:475190722560:13:12059515296:0:3860104580096: +mmdf:poolTotal:0:1:::system:783308292096:380564840448:49:10024464464:1:1153081262080: mmdf:data:0:1:::3647786188800:475190722560:13:12059515296:0: mmdf:metadata:0:1:::13891534848:6011299328:43:58139768:0: +mmdf:poolTotal:0:1:::data:3064453922816:1342362296320:44:1999215152:0:10143773212672: mmdf:fsTotal:0:1:::3661677723648:481202021888:14:12117655064:0: mmdf:inode:0:1:::430741822:484301506:915043328:1332164000: ` @@ -51,39 +52,27 @@ mmdf:fsTotal:HEADER:version:reserved:reserved:fsSize:freeBlocks:freeBlocksPct:fr mmdf:inode:HEADER:version:reserved:reserved:usedInodes:freeInodes:allocatedInodes:maxInodes: mmdf:nsd:0:1:::P_META_VD102:system:771751936:300:Yes:No:320274944:41:5005384:1:: mmdf:nsd:0:1:::P_DATA_VD02:data:46766489600:200:No:Yes:6092915712:13:154966272:0:: -mmdf:poolTotal:0:1:::data:3647786188800:475190722560:13:12059515296:0:3860104580096: +mmdf:poolTotal:0:1:::system:783308292096:380564840448:49:10024464464:1:1153081262080: mmdf:data:0:1:::3647786188800:475190722560:13:12059515296:0: +mmdf:poolTotal:0:1:::data:3064453922816:1342362296320:44:1999215152:0:10143773212672: mmdf:fsTotal:0:1:::3661677723648:481202021888:14:12117655064:0: mmdf:inode:0:1:::430741822:484301506:915043328:1332164000: ` - mmdfStdoutMissingValues = ` + mmdfStdoutErrors = ` mmdf:nsd:HEADER:version:reserved:reserved:nsdName:storagePool:diskSize:failureGroup:metadata:data:freeBlocks:freeBlocksPct:freeFragments:freeFragmentsPct:diskAvailableForAlloc: mmdf:poolTotal:HEADER:version:reserved:reserved:poolName:poolSize:freeBlocks:freeBlocksPct:freeFragments:freeFragmentsPct:maxDiskSize: mmdf:data:HEADER:version:reserved:reserved:totalData:freeBlocks:freeBlocksPct:freeFragments:freeFragmentsPct: mmdf:metadata:HEADER:version:reserved:reserved:totalMetadata:freeBlocks:freeBlocksPct:freeFragments:freeFragmentsPct: -mmdf:fsTotal:HEADER:version:reserved:reserved:fsSize:freeBlocks:freeBlocksPct:freeFragments:freeFragmentsPct: -mmdf:inode:HEADER:version:reserved:reserved:usedInodes:freeInodes:allocatedInodes:maxInodes: -mmdf:nsd:0:1:::P_META_VD102:system:771751936:300:Yes:No:320274944:41:5005384:1:: -mmdf:nsd:0:1:::P_DATA_VD02:data:46766489600:200:No:Yes:6092915712:13:154966272:0:: -mmdf:poolTotal:0:1:::data:3647786188800:475190722560:13:12059515296:0:3860104580096: -mmdf:data:0:1:::3647786188800:475190722560:13:12059515296:0: -mmdf:metadata:0:1:::13891534848:6011299328:43:58139768:0: -mmdf:inode:0:1:::430741822:484301506:915043328:1332164000: -` - mmdfStdoutErrLen = ` -mmdf:nsd:HEADER:version:reserved:reserved:nsdName:storagePool:diskSize:failureGroup:metadata:data:freeBlocks:freeBlocksPct:freeFragments:freeFragmentsPct:diskAvailableForAlloc: -mmdf:poolTotal:HEADER:version:reserved:reserved:poolName:poolSize:freeBlocks:freeBlocksPct:freeFragments:freeFragmentsPct:maxDiskSize: -mmdf:data:HEADER:version:reserved:reserved:totalData:freeBlocks:freeBlocksPct:freeFragments:freeFragmentsPct: -mmdf:metadata:HEADER:version:reserved:reserved:totalMetadata:freeBlocks:freeBlocksPct:freeFragments:freeFragmentsPct: -mmdf:fsTotal:HEADER:version:reserved:reserved:fsSize:freeBlocks:freeBlocksPct:freeFragments:freeFragmentsPct: +mmdf:fsTotal:HEADER:version:reserved:reserved:foo:freeBlocks:freeBlocksPct:freeFragments:freeFragmentsPct: mmdf:inode:HEADER:version:reserved:reserved:usedInodes:freeInodes:allocatedInodes:maxInodes: mmdf:nsd:0:1:::P_META_VD102:system:771751936:300:Yes:No:320274944:41:5005384:1:: mmdf:nsd:0:1:::P_DATA_VD02:data:46766489600:200:No:Yes:6092915712:13:154966272:0:: -mmdf:poolTotal:0:1:::data:3647786188800:475190722560:13:12059515296:0:3860104580096: +mmdf:poolTotal:0:1:::system:foo:380564840448:49:10024464464:1:1153081262080: mmdf:data:0:1:::3647786188800:475190722560:13:12059515296:0: mmdf:metadata:0:1:::13891534848:6011299328:43:58139768:0: -mmdf:fsTotal:0:1:::3661677723648:481202021888:14:12117655064: -mmdf:inode:0:1:::430741822:484301506:915043328:1332164000: +mmdf:poolTotal:0:1:::data:3064453922816:1342362296320:44:1999215152:0:10143773212672: +mmdf:fsTotal:0:1:::foo:481202021888:14:12117655064:0: +mmdf:inode:0:1:::foo:484301506:915043328:1332164000: ` ) @@ -136,11 +125,7 @@ func TestMmdfTimeout(t *testing.T) { } func TestParseMmdf(t *testing.T) { - dfmetrics, err := parse_mmdf(mmdfStdout, log.NewNopLogger()) - if err != nil { - t.Errorf("Unexpected error: %s", err.Error()) - return - } + dfmetrics := parse_mmdf(mmdfStdout, log.NewNopLogger()) if dfmetrics.InodesFree != 484301506 { t.Errorf("Unexpected value for InodesFree, got %v", dfmetrics.InodesFree) } @@ -153,32 +138,34 @@ func TestParseMmdf(t *testing.T) { if dfmetrics.MetadataTotal != 14224931684352 { t.Errorf("Unexpected value for MetadataTotal, got %v", dfmetrics.MetadataTotal) } - dfmetrics, err = parse_mmdf(mmdfStdoutMissingMetadata, log.NewNopLogger()) - if err != nil { - t.Errorf("Unexpected error: %s", err.Error()) - return + if len(dfmetrics.Pools) != 2 { + t.Errorf("Unexpected number of pools, got %v", len(dfmetrics.Pools)) } + dfmetrics = parse_mmdf(mmdfStdoutErrors, log.NewNopLogger()) if dfmetrics.InodesFree != 484301506 { t.Errorf("Unexpected value for InodesFree, got %v", dfmetrics.InodesFree) } - if dfmetrics.FSTotal != 3749557989015552 { + if dfmetrics.FSTotal != 0 { t.Errorf("Unexpected value for FSTotal, got %v", dfmetrics.FSTotal) } - if dfmetrics.Metadata != false { + if dfmetrics.Metadata != true { t.Errorf("Unexpected value for Metadata, got %v", dfmetrics.Metadata) } -} - -func TestParseMmdfErrors(t *testing.T) { - _, err := parse_mmdf(mmdfStdoutMissingValues, log.NewNopLogger()) - if err == nil { - t.Errorf("Expected error") - return + if dfmetrics.MetadataTotal != 14224931684352 { + t.Errorf("Unexpected value for MetadataTotal, got %v", dfmetrics.MetadataTotal) } - _, err = parse_mmdf(mmdfStdoutErrLen, log.NewNopLogger()) - if err == nil { - t.Errorf("Expected error") - return + if len(dfmetrics.Pools) != 2 { + t.Errorf("Unexpected number of pools, got %v", len(dfmetrics.Pools)) + } + dfmetrics = parse_mmdf(mmdfStdoutMissingMetadata, log.NewNopLogger()) + if dfmetrics.InodesFree != 484301506 { + t.Errorf("Unexpected value for InodesFree, got %v", dfmetrics.InodesFree) + } + if dfmetrics.FSTotal != 3749557989015552 { + t.Errorf("Unexpected value for FSTotal, got %v", dfmetrics.FSTotal) + } + if dfmetrics.Metadata != false { + t.Errorf("Unexpected value for Metadata, got %v", dfmetrics.Metadata) } } @@ -210,6 +197,22 @@ func TestMmdfCollector(t *testing.T) { # HELP gpfs_fs_metadata_size_bytes GPFS total metadata size in bytes # TYPE gpfs_fs_metadata_size_bytes gauge gpfs_fs_metadata_size_bytes{fs="project"} 14224931684352 + # HELP gpfs_fs_pool_free_bytes GPFS pool free size in bytes + # TYPE gpfs_fs_pool_free_bytes gauge + gpfs_fs_pool_free_bytes{fs="project",pool="data"} 1374578991431680 + gpfs_fs_pool_free_bytes{fs="project",pool="system"} 389698396618752 + # HELP gpfs_fs_pool_free_fragments_bytes GPFS pool free fragments in bytes + # TYPE gpfs_fs_pool_free_fragments_bytes gauge + gpfs_fs_pool_free_fragments_bytes{fs="project",pool="data"} 2047196315648 + gpfs_fs_pool_free_fragments_bytes{fs="project",pool="system"} 10265051611136 + # HELP gpfs_fs_pool_max_disk_size_bytes GPFS pool max disk size in bytes + # TYPE gpfs_fs_pool_max_disk_size_bytes gauge + gpfs_fs_pool_max_disk_size_bytes{fs="project",pool="data"} 10387223769776128 + gpfs_fs_pool_max_disk_size_bytes{fs="project",pool="system"} 1180755212369920 + # HELP gpfs_fs_pool_total_bytes GPFS pool total size in bytes + # TYPE gpfs_fs_pool_total_bytes gauge + gpfs_fs_pool_total_bytes{fs="project",pool="data"} 3138000816963584 + gpfs_fs_pool_total_bytes{fs="project",pool="system"} 802107691106304 # HELP gpfs_fs_size_bytes GPFS filesystem total size in bytes # TYPE gpfs_fs_size_bytes gauge gpfs_fs_size_bytes{fs="project"} 3749557989015552 @@ -221,12 +224,14 @@ func TestMmdfCollector(t *testing.T) { gatherers := setupGatherer(collector) if val, err := testutil.GatherAndCount(gatherers); err != nil { t.Errorf("Unexpected error: %v", err) - } else if val != 12 { - t.Errorf("Unexpected collection count %d, expected 12", val) + } else if val != 20 { + t.Errorf("Unexpected collection count %d, expected 20", val) } if err := testutil.GatherAndCompare(gatherers, strings.NewReader(expected), "gpfs_fs_used_inodes", "gpfs_fs_free_inodes", "gpfs_fs_allocated_inodes", "gpfs_fs_inodes", "gpfs_fs_free_bytes", "gpfs_fs_free_percent", "gpfs_fs_size_bytes", + "gpfs_fs_pool_free_bytes", "gpfs_fs_pool_free_fragments_bytes", + "gpfs_fs_pool_max_disk_size_bytes", "gpfs_fs_pool_total_bytes", "gpfs_fs_metadata_size_bytes", "gpfs_fs_metadata_free_bytes", "gpfs_fs_metadata_free_percent"); err != nil { t.Errorf("unexpected collecting result:\n%s", err) } @@ -254,6 +259,22 @@ func TestMmdfCollectorNoMetadata(t *testing.T) { # HELP gpfs_fs_inodes GPFS filesystem inodes total # TYPE gpfs_fs_inodes gauge gpfs_fs_inodes{fs="project"} 1332164000 + # HELP gpfs_fs_pool_free_bytes GPFS pool free size in bytes + # TYPE gpfs_fs_pool_free_bytes gauge + gpfs_fs_pool_free_bytes{fs="project",pool="data"} 1374578991431680 + gpfs_fs_pool_free_bytes{fs="project",pool="system"} 389698396618752 + # HELP gpfs_fs_pool_free_fragments_bytes GPFS pool free fragments in bytes + # TYPE gpfs_fs_pool_free_fragments_bytes gauge + gpfs_fs_pool_free_fragments_bytes{fs="project",pool="data"} 2047196315648 + gpfs_fs_pool_free_fragments_bytes{fs="project",pool="system"} 10265051611136 + # HELP gpfs_fs_pool_max_disk_size_bytes GPFS pool max disk size in bytes + # TYPE gpfs_fs_pool_max_disk_size_bytes gauge + gpfs_fs_pool_max_disk_size_bytes{fs="project",pool="data"} 10387223769776128 + gpfs_fs_pool_max_disk_size_bytes{fs="project",pool="system"} 1180755212369920 + # HELP gpfs_fs_pool_total_bytes GPFS pool total size in bytes + # TYPE gpfs_fs_pool_total_bytes gauge + gpfs_fs_pool_total_bytes{fs="project",pool="data"} 3138000816963584 + gpfs_fs_pool_total_bytes{fs="project",pool="system"} 802107691106304 # HELP gpfs_fs_size_bytes GPFS filesystem total size in bytes # TYPE gpfs_fs_size_bytes gauge gpfs_fs_size_bytes{fs="project"} 3749557989015552 @@ -265,12 +286,14 @@ func TestMmdfCollectorNoMetadata(t *testing.T) { gatherers := setupGatherer(collector) if val, err := testutil.GatherAndCount(gatherers); err != nil { t.Errorf("Unexpected error: %v", err) - } else if val != 10 { - t.Errorf("Unexpected collection count %d, expected 10", val) + } else if val != 18 { + t.Errorf("Unexpected collection count %d, expected 18", val) } if err := testutil.GatherAndCompare(gatherers, strings.NewReader(expected), "gpfs_fs_used_inodes", "gpfs_fs_free_inodes", "gpfs_fs_allocated_inodes", "gpfs_fs_inodes", "gpfs_fs_free_bytes", "gpfs_fs_free_percent", "gpfs_fs_size_bytes", + "gpfs_fs_pool_free_bytes", "gpfs_fs_pool_free_fragments_bytes", + "gpfs_fs_pool_max_disk_size_bytes", "gpfs_fs_pool_total_bytes", "gpfs_fs_metadata_size_bytes", "gpfs_fs_metadata_free_bytes", "gpfs_fs_metadata_free_percent"); err != nil { t.Errorf("unexpected collecting result:\n%s", err) } @@ -314,6 +337,22 @@ mmlsfs::0:1:::project:defaultMountPoint:%2Ffs%2Fproject:: # HELP gpfs_fs_metadata_size_bytes GPFS total metadata size in bytes # TYPE gpfs_fs_metadata_size_bytes gauge gpfs_fs_metadata_size_bytes{fs="project"} 14224931684352 + # HELP gpfs_fs_pool_free_bytes GPFS pool free size in bytes + # TYPE gpfs_fs_pool_free_bytes gauge + gpfs_fs_pool_free_bytes{fs="project",pool="data"} 1374578991431680 + gpfs_fs_pool_free_bytes{fs="project",pool="system"} 389698396618752 + # HELP gpfs_fs_pool_free_fragments_bytes GPFS pool free fragments in bytes + # TYPE gpfs_fs_pool_free_fragments_bytes gauge + gpfs_fs_pool_free_fragments_bytes{fs="project",pool="data"} 2047196315648 + gpfs_fs_pool_free_fragments_bytes{fs="project",pool="system"} 10265051611136 + # HELP gpfs_fs_pool_max_disk_size_bytes GPFS pool max disk size in bytes + # TYPE gpfs_fs_pool_max_disk_size_bytes gauge + gpfs_fs_pool_max_disk_size_bytes{fs="project",pool="data"} 10387223769776128 + gpfs_fs_pool_max_disk_size_bytes{fs="project",pool="system"} 1180755212369920 + # HELP gpfs_fs_pool_total_bytes GPFS pool total size in bytes + # TYPE gpfs_fs_pool_total_bytes gauge + gpfs_fs_pool_total_bytes{fs="project",pool="data"} 3138000816963584 + gpfs_fs_pool_total_bytes{fs="project",pool="system"} 802107691106304 # HELP gpfs_fs_size_bytes GPFS filesystem total size in bytes # TYPE gpfs_fs_size_bytes gauge gpfs_fs_size_bytes{fs="project"} 3749557989015552 @@ -322,12 +361,14 @@ mmlsfs::0:1:::project:defaultMountPoint:%2Ffs%2Fproject:: gatherers := setupGatherer(collector) if val, err := testutil.GatherAndCount(gatherers); err != nil { t.Errorf("Unexpected error: %v", err) - } else if val != 14 { - t.Errorf("Unexpected collection count %d, expected 14", val) + } else if val != 22 { + t.Errorf("Unexpected collection count %d, expected 22", val) } if err := testutil.GatherAndCompare(gatherers, strings.NewReader(expected), "gpfs_fs_used_inodes", "gpfs_fs_free_inodes", "gpfs_fs_allocated_inodes", "gpfs_fs_inodes", "gpfs_fs_free_bytes", "gpfs_fs_free_percent", "gpfs_fs_size_bytes", + "gpfs_fs_pool_free_bytes", "gpfs_fs_pool_free_fragments_bytes", + "gpfs_fs_pool_max_disk_size_bytes", "gpfs_fs_pool_total_bytes", "gpfs_fs_metadata_size_bytes", "gpfs_fs_metadata_free_bytes", "gpfs_fs_metadata_free_percent"); err != nil { t.Errorf("unexpected collecting result:\n%s", err) }