9
9
import json
10
10
import logging as _logging
11
11
import os
12
+ import requests
12
13
import sys
13
14
import time
14
15
32
33
from benchmark_logger import logging
33
34
34
35
from benchmark_utils import DataAggregator
36
+ from benchmark_utils import generate_json_metrics
35
37
from benchmark_utils import print_dict
36
38
from benchmark_utils import timed_section
37
39
@@ -140,19 +142,12 @@ def _export_runtime_metrics_to_json(self, metric_dict):
140
142
if file_path is None :
141
143
return
142
144
143
- metric_dict = {
144
- # Creating a copy to avoid modifying the original
145
- "results" : copy .deepcopy (metric_dict ),
146
- "runtime_arguments" : vars (self ._args )
147
- }
145
+ json_string = generate_json_metrics (
146
+ metrics = metric_dict ,
147
+ args = vars (self ._args ),
148
+ )
148
149
149
150
with open (file_path , 'w' ) as json_f :
150
- json_string = json .dumps (
151
- metric_dict ,
152
- default = lambda o : o .__dict__ ,
153
- sort_keys = True ,
154
- indent = 4
155
- )
156
151
print (json_string , file = json_f )
157
152
158
153
except Exception as e :
@@ -205,6 +200,36 @@ def _export_runtime_metrics_to_csv(self, metric_dict):
205
200
except Exception as e :
206
201
logging .error (f"An exception occured during export to CSV: { e } " )
207
202
203
+ def _upload_metrics_to_endpoint (self , metric_dict ):
204
+
205
+ try :
206
+
207
+ if self ._args .upload_metrics_endpoint is None :
208
+ return
209
+
210
+ json_string = generate_json_metrics (
211
+ metrics = metric_dict ,
212
+ args = vars (self ._args ),
213
+ )
214
+
215
+ headers = {"Content-Type" : "application/json" }
216
+
217
+ response = requests .put (
218
+ self ._args .upload_metrics_endpoint ,
219
+ data = json .dumps (data ),
220
+ headers = headers
221
+ )
222
+ response .raise_for_status ()
223
+
224
+ logging .info (
225
+ "Metrics Uploaded to endpoint: "
226
+ f"`{ self ._args .upload_metrics_endpoint } ` with experiment name: "
227
+ f"`{ self ._args .experiment_name } `."
228
+ )
229
+
230
+ except Exception as e :
231
+ logging .error (f"An exception occured during export to JSON: { e } " )
232
+
208
233
def _get_graph_func (self ):
209
234
"""Retreives a frozen SavedModel and applies TF-TRT
210
235
use_tftrt: bool, if true use TensorRT
@@ -587,9 +612,12 @@ def start_profiling():
587
612
if not self ._args .use_synthetic_data :
588
613
data_aggregator .aggregate_data (y_pred , y )
589
614
590
- if (not self ._args .debug_performance and
591
- step_idx % self ._args .display_every !=
592
- 0 ): # avoids double printing
615
+ # yapf: disable
616
+ if (
617
+ not self ._args .debug_performance and
618
+ # avoids double printing
619
+ step_idx % self ._args .display_every != 0
620
+ ):
593
621
log_step (
594
622
step_idx ,
595
623
display_every = 1 , # force print
@@ -602,6 +630,7 @@ def start_profiling():
602
630
dequeue_times [- self ._args .display_every :]
603
631
) * 1000
604
632
)
633
+ # yapf: enable
605
634
606
635
if step_idx >= 100 :
607
636
stop_profiling ()
@@ -668,6 +697,7 @@ def timing_metrics(time_arr, log_prefix):
668
697
669
698
self ._export_runtime_metrics_to_json (metrics )
670
699
self ._export_runtime_metrics_to_csv (metrics )
700
+ self ._upload_metrics_to_endpoint (metrics )
671
701
672
702
def log_value (key , val ):
673
703
if isinstance (val , (int , str )):
0 commit comments