From ee399026bc2e6c09f247253514384e1094e2d0b0 Mon Sep 17 00:00:00 2001 From: ibrahimkk-moideen Date: Thu, 14 Mar 2024 12:38:47 -0400 Subject: [PATCH 1/2] update fetching response on power and raid metrics --- cisco/s3260m4/exporter.go | 57 ++++++++++++++++++++++++++++++++++++-- cisco/s3260m5/exporter.go | 58 +++++++++++++++++++++++++++++++++++++-- hpe/dl360/exporter.go | 4 +++ 3 files changed, 113 insertions(+), 6 deletions(-) diff --git a/cisco/s3260m4/exporter.go b/cisco/s3260m4/exporter.go index 298858a..fe04b46 100644 --- a/cisco/s3260m4/exporter.go +++ b/cisco/s3260m4/exporter.go @@ -207,11 +207,20 @@ func NewExporter(ctx context.Context, target, uri, profile string) (*Exporter, e ) } + rcontrollers, err := getRaidEndpoint(fqdn.String()+uri+"/Systems/"+serial+"/Storage", target, retryClient) + if err != nil { + log.Error("error when getting storage controller endpoints from "+S3260M4, zap.Error(err), zap.Any("trace_id", ctx.Value("traceID"))) + return nil, err + } + + for _, rcontroller := range rcontrollers.Members { + tasks = append(tasks, + pool.NewTask(common.Fetch(fqdn.String()+rcontroller.URL, DRIVE, target, profile, retryClient))) + } + tasks = append(tasks, pool.NewTask(common.Fetch(fqdn.String()+mgr+"/Processors/CPU1", PROCESSOR, target, profile, retryClient)), - pool.NewTask(common.Fetch(fqdn.String()+mgr+"/Processors/CPU2", PROCESSOR, target, profile, retryClient)), - pool.NewTask(common.Fetch(fqdn.String()+uri+"/Systems/"+serial+"/Storage/SBMezz1", DRIVE, target, profile, retryClient)), - pool.NewTask(common.Fetch(fqdn.String()+uri+"/Systems/"+serial+"/Storage/IOEMezz1", DRIVE, target, profile, retryClient))) + pool.NewTask(common.Fetch(fqdn.String()+mgr+"/Processors/CPU2", PROCESSOR, target, profile, retryClient))) for _, dimm := range dimms.Members { tasks = append(tasks, @@ -743,3 +752,45 @@ func getDIMMEndpoints(url, host string, client *retryablehttp.Client) (Collectio return dimms, nil } + +func getRaidEndpoint(url, host string, client *retryablehttp.Client) (Collection, error) { + var rcontrollers Collection + var resp *http.Response + var err error + retryCount := 0 + req := common.BuildRequest(url, host) + + resp, err = common.DoRequest(client, req) + if err != nil { + return rcontrollers, err + } + defer resp.Body.Close() + if !(resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices) { + if resp.StatusCode == http.StatusNotFound { + for retryCount < 3 && resp.StatusCode == http.StatusNotFound { + time.Sleep(client.RetryWaitMin) + resp, err = common.DoRequest(client, req) + retryCount = retryCount + 1 + } + if err != nil { + return rcontrollers, err + } else if !(resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices) { + return rcontrollers, fmt.Errorf("HTTP status %d", resp.StatusCode) + } + } else { + return rcontrollers, fmt.Errorf("HTTP status %d", resp.StatusCode) + } + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return rcontrollers, fmt.Errorf("Error reading Response Body - " + err.Error()) + } + + err = json.Unmarshal(body, &rcontrollers) + if err != nil { + return rcontrollers, fmt.Errorf("Error Unmarshalling S3260M5 Chassis struct - " + err.Error()) + } + + return rcontrollers, nil +} diff --git a/cisco/s3260m5/exporter.go b/cisco/s3260m5/exporter.go index 0b875d3..7b75912 100644 --- a/cisco/s3260m5/exporter.go +++ b/cisco/s3260m5/exporter.go @@ -224,11 +224,21 @@ func NewExporter(ctx context.Context, target, uri, profile string) (*Exporter, e } } + // Storage controller endpoints array + rcontrollers, err := getRaidEndpoint(fqdn.String()+uri+"/Systems/"+serial+"/Storage", target, retryClient) + if err != nil { + log.Error("error when getting storage controller endpoints from "+S3260M5, zap.Error(err), zap.Any("trace_id", ctx.Value("traceID"))) + return nil, err + } + + for _, rcontroller := range rcontrollers.Members { + tasks = append(tasks, + pool.NewTask(common.Fetch(fqdn.String()+rcontroller.URL, DRIVE, target, profile, retryClient))) + } + tasks = append(tasks, pool.NewTask(common.Fetch(fqdn.String()+mgr+"/Processors/CPU1", PROCESSOR, target, profile, retryClient)), - pool.NewTask(common.Fetch(fqdn.String()+mgr+"/Processors/CPU2", PROCESSOR, target, profile, retryClient)), - pool.NewTask(common.Fetch(fqdn.String()+uri+"/Systems/"+serial+"/Storage/SBMezz1", DRIVE, target, profile, retryClient)), - pool.NewTask(common.Fetch(fqdn.String()+uri+"/Systems/"+serial+"/Storage/SBMezz2", DRIVE, target, profile, retryClient))) + pool.NewTask(common.Fetch(fqdn.String()+mgr+"/Processors/CPU2", PROCESSOR, target, profile, retryClient))) for _, dimm := range dimms.Members { tasks = append(tasks, @@ -740,3 +750,45 @@ func getDIMMEndpoints(url, host string, client *retryablehttp.Client) (Collectio return dimms, nil } + +func getRaidEndpoint(url, host string, client *retryablehttp.Client) (Collection, error) { + var rcontrollers Collection + var resp *http.Response + var err error + retryCount := 0 + req := common.BuildRequest(url, host) + + resp, err = common.DoRequest(client, req) + if err != nil { + return rcontrollers, err + } + defer resp.Body.Close() + if !(resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices) { + if resp.StatusCode == http.StatusNotFound { + for retryCount < 3 && resp.StatusCode == http.StatusNotFound { + time.Sleep(client.RetryWaitMin) + resp, err = common.DoRequest(client, req) + retryCount = retryCount + 1 + } + if err != nil { + return rcontrollers, err + } else if !(resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices) { + return rcontrollers, fmt.Errorf("HTTP status %d", resp.StatusCode) + } + } else { + return rcontrollers, fmt.Errorf("HTTP status %d", resp.StatusCode) + } + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return rcontrollers, fmt.Errorf("Error reading Response Body - " + err.Error()) + } + + err = json.Unmarshal(body, &rcontrollers) + if err != nil { + return rcontrollers, fmt.Errorf("Error Unmarshalling S3260M5 Chassis struct - " + err.Error()) + } + + return rcontrollers, nil +} diff --git a/hpe/dl360/exporter.go b/hpe/dl360/exporter.go index a9e983f..bda842b 100644 --- a/hpe/dl360/exporter.go +++ b/hpe/dl360/exporter.go @@ -484,6 +484,8 @@ func (e *Exporter) exportPowerMetrics(body []byte) error { if ps.Status.State == "Enabled" { if ps.MemberID != "" { (*dlPower)["supplyOutput"].WithLabelValues(ps.MemberID, ps.SparePartNumber).Set(float64(ps.LastPowerOutputWatts)) + } else if strconv.Itoa(ps.Oem.Hpe.BayNumber) != "" { + (*dlPower)["supplyOutput"].WithLabelValues(strconv.Itoa(ps.Oem.Hpe.BayNumber), ps.SparePartNumber).Set(float64(ps.LastPowerOutputWatts)) } else { (*dlPower)["supplyOutput"].WithLabelValues(strconv.Itoa(ps.Oem.Hp.BayNumber), ps.SparePartNumber).Set(float64(ps.LastPowerOutputWatts)) } @@ -494,6 +496,8 @@ func (e *Exporter) exportPowerMetrics(body []byte) error { } if ps.MemberID != "" { (*dlPower)["supplyStatus"].WithLabelValues(ps.MemberID, ps.SparePartNumber).Set(state) + } else if strconv.Itoa(ps.Oem.Hpe.BayNumber) != "" { + (*dlPower)["supplyStatus"].WithLabelValues(strconv.Itoa(ps.Oem.Hpe.BayNumber), ps.SparePartNumber).Set(state) } else { (*dlPower)["supplyStatus"].WithLabelValues(strconv.Itoa(ps.Oem.Hp.BayNumber), ps.SparePartNumber).Set(state) } From af09bededf93ebdaadd3a658303c65cb9ebcde3e Mon Sep 17 00:00:00 2001 From: ibrahimkk-moideen Date: Fri, 15 Mar 2024 10:33:10 -0400 Subject: [PATCH 2/2] read null in psu checks and include secretname for kv2 --- hpe/dl360/exporter.go | 4 ++-- vault/vault.go | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/hpe/dl360/exporter.go b/hpe/dl360/exporter.go index bda842b..ebdd48e 100644 --- a/hpe/dl360/exporter.go +++ b/hpe/dl360/exporter.go @@ -484,7 +484,7 @@ func (e *Exporter) exportPowerMetrics(body []byte) error { if ps.Status.State == "Enabled" { if ps.MemberID != "" { (*dlPower)["supplyOutput"].WithLabelValues(ps.MemberID, ps.SparePartNumber).Set(float64(ps.LastPowerOutputWatts)) - } else if strconv.Itoa(ps.Oem.Hpe.BayNumber) != "" { + } else if string(ps.Oem.Hp.BayNumber) == "null" { (*dlPower)["supplyOutput"].WithLabelValues(strconv.Itoa(ps.Oem.Hpe.BayNumber), ps.SparePartNumber).Set(float64(ps.LastPowerOutputWatts)) } else { (*dlPower)["supplyOutput"].WithLabelValues(strconv.Itoa(ps.Oem.Hp.BayNumber), ps.SparePartNumber).Set(float64(ps.LastPowerOutputWatts)) @@ -496,7 +496,7 @@ func (e *Exporter) exportPowerMetrics(body []byte) error { } if ps.MemberID != "" { (*dlPower)["supplyStatus"].WithLabelValues(ps.MemberID, ps.SparePartNumber).Set(state) - } else if strconv.Itoa(ps.Oem.Hpe.BayNumber) != "" { + } else if string(ps.Oem.Hp.BayNumber) == "null" { (*dlPower)["supplyStatus"].WithLabelValues(strconv.Itoa(ps.Oem.Hpe.BayNumber), ps.SparePartNumber).Set(state) } else { (*dlPower)["supplyStatus"].WithLabelValues(strconv.Itoa(ps.Oem.Hp.BayNumber), ps.SparePartNumber).Set(state) diff --git a/vault/vault.go b/vault/vault.go index 593ed50..d4afbaf 100644 --- a/vault/vault.go +++ b/vault/vault.go @@ -115,7 +115,11 @@ func (v *Vault) GetKVSecret(ctx context.Context, props *SecretProperties, secret kvSecret, err = v.client.KVv1(props.MountPath).Get(ctx, fmt.Sprintf("%s/%s", props.Path, secret)) } } else { - kvSecret, err = v.client.KVv2(props.MountPath).Get(ctx, fmt.Sprintf("%s/%s", props.Path, secret)) + if props.SecretName != "" { + kvSecret, err = v.client.KVv2(props.MountPath).Get(ctx, fmt.Sprintf("%s/%s", props.Path, props.SecretName)) + } else { + kvSecret, err = v.client.KVv2(props.MountPath).Get(ctx, fmt.Sprintf("%s/%s", props.Path, secret)) + } } if err != nil {