Skip to content
This repository was archived by the owner on Feb 3, 2025. It is now read-only.

Commit 02448bf

Browse files
author
DEKHTIARJonathan
committed
[TF-TRT] Remote Upload Implemented
1 parent 0de3370 commit 02448bf

File tree

3 files changed

+50
-13
lines changed

3 files changed

+50
-13
lines changed

tftrt/benchmarking-python/benchmark_args.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -424,8 +424,11 @@ def _validate_args(self, args):
424424
"doesn't exist or is not a directory"
425425
)
426426

427-
if args.upload_metrics_endpoint is not None:
428-
raise NotImplementedError("This feature is not yet implemented.")
427+
if (
428+
args.upload_metrics_endpoint is not None and
429+
args.experiment_name is None):
430+
raise NotImplementedError("--experiment_name must be specified if "
431+
"--upload_metrics_endpoint is set.")
429432

430433
def _post_process_args(self, args):
431434
if args.use_synthetic_data:

tftrt/benchmarking-python/benchmark_runner.py

Lines changed: 29 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
from benchmark_logger import logging
3333

3434
from benchmark_utils import DataAggregator
35+
from benchmark_utils import generate_json_metrics
3536
from benchmark_utils import print_dict
3637
from benchmark_utils import timed_section
3738

@@ -140,19 +141,12 @@ def _export_runtime_metrics_to_json(self, metric_dict):
140141
if file_path is None:
141142
return
142143

143-
metric_dict = {
144-
# Creating a copy to avoid modifying the original
145-
"results": copy.deepcopy(metric_dict),
146-
"runtime_arguments": vars(self._args)
147-
}
144+
json_string = generate_json_metrics(
145+
metrics = metric_dict,
146+
args = vars(self._args),
147+
)
148148

149149
with open(file_path, 'w') as json_f:
150-
json_string = json.dumps(
151-
metric_dict,
152-
default=lambda o: o.__dict__,
153-
sort_keys=True,
154-
indent=4
155-
)
156150
print(json_string, file=json_f)
157151

158152
except Exception as e:
@@ -205,6 +199,30 @@ def _export_runtime_metrics_to_csv(self, metric_dict):
205199
except Exception as e:
206200
logging.error(f"An exception occured during export to CSV: {e}")
207201

202+
def _upload_metrics_to_endpoint(self, metric_dict):
203+
204+
try:
205+
206+
if self._args.upload_metrics_endpoint is None:
207+
return
208+
209+
json_string = generate_json_metrics(
210+
metrics = metric_dict,
211+
args = vars(self._args),
212+
)
213+
214+
headers = {"Content-Type": "application/json"}
215+
216+
response = requests.put(
217+
endpoint,
218+
data=json.dumps(data),
219+
headers=headers
220+
)
221+
response.raise_for_status()
222+
223+
except Exception as e:
224+
logging.error(f"An exception occured during export to JSON: {e}")
225+
208226
def _get_graph_func(self):
209227
"""Retreives a frozen SavedModel and applies TF-TRT
210228
use_tftrt: bool, if true use TensorRT

tftrt/benchmarking-python/benchmark_utils.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
33
# -*- coding: utf-8 -*-
44

5+
import json
56
import time
67

78
import numpy as np
@@ -114,6 +115,21 @@ def _format(tensor):
114115
return predictions, expected
115116

116117

118+
def generate_json_metrics(metrics, args):
119+
metric_dict = {
120+
"results": metrics,
121+
"runtime_arguments": args
122+
}
123+
124+
json_string = json.dumps(
125+
metric_dict,
126+
default=lambda o: o.__dict__,
127+
sort_keys=True,
128+
indent=4
129+
)
130+
return json_string
131+
132+
117133
class DataAggregator(object):
118134

119135
def __init__(self, postprocess_model_outputs_fn, args):

0 commit comments

Comments
 (0)