From 8fe6940e1a033d64f3d6a3dd6213e861811b12c8 Mon Sep 17 00:00:00 2001 From: jiahangxu Date: Wed, 1 Sep 2021 11:42:25 +0800 Subject: [PATCH] refactor nn_meter_cli --- README.md | 12 +- demo.py | 198 -------------------------------- docs/usage.md | 12 +- nn_meter/nn_meter.py | 166 -------------------------- nn_meter/nn_meter_cli.py | 190 ++++++++++++++++++++++++++++++ setup.py | 2 +- tests/integration_test.py | 8 +- tests/integration_test_torch.py | 12 +- 8 files changed, 215 insertions(+), 385 deletions(-) delete mode 100644 demo.py create mode 100644 nn_meter/nn_meter_cli.py diff --git a/README.md b/README.md index 7ba67ebc..701b3611 100644 --- a/README.md +++ b/README.md @@ -92,16 +92,16 @@ After installation, a command named `nn-meter` is enabled. To predict the latenc ```bash # for Tensorflow (*.pb) file -nn-meter --predictor [--predictor-version ] --tensorflow +nn-meter lat_pred --predictor [--predictor-version ] --tensorflow # for ONNX (*.onnx) file -nn-meter --predictor [--predictor-version ] --onnx +nn-meter lat_pred --predictor [--predictor-version ] --onnx # for torch model from torchvision model zoo (str) -nn-meter --predictor [--predictor-version ] --torchvision ... +nn-meter lat_pred --predictor [--predictor-version ] --torchvision ... # for nn-Meter IR (*.json) file -nn-meter --predictor [--predictor-version ] --nn-meter-ir +nn-meter lat_pred --predictor [--predictor-version ] --nn-meter-ir ``` `--predictor-version ` arguments is optional. When the predictor version is not specified by users, nn-meter will use the latest version of the predictor. @@ -116,10 +116,10 @@ Furthermore, users may be interested to convert tensorflow pb-file or onnx file ```bash # for Tensorflow (*.pb) file -nn-meter getir --tensorflow [--output ] +nn-meter get_ir --tensorflow [--output ] # for ONNX (*.onnx) file -nn-meter getir --onnx [--output ] +nn-meter get_ir --onnx [--output ] ``` Output name is default to be `/path/to/input/file/__ir.json` if not specified by users. diff --git a/demo.py b/demo.py deleted file mode 100644 index 2063b010..00000000 --- a/demo.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. -from nn_meter.utils.utils import try_import_torchvision_models -from nn_meter.nn_meter import apply_latency_predictor, get_nnmeter_ir -from nn_meter import list_latency_predictors -import argparse -import os -import sys -import logging -from glob import glob -from functools import partial, partialmethod - -logging.KEYINFO = 22 -logging.addLevelName(logging.KEYINFO, 'KEYINFO') -logging.Logger.keyinfo = partialmethod(logging.Logger.log, logging.KEYINFO) -logging.keyinfo = partial(logging.log, logging.KEYINFO) - -logging.RESULT = 25 -logging.addLevelName(logging.RESULT, 'RESULT') -logging.Logger.result = partialmethod(logging.Logger.log, logging.RESULT) -logging.result = partial(logging.log, logging.RESULT) - - -def test_ir_graphs(predictor, ppath="data/testmodels"): - # will remove this to examples once we have the pip package - from glob import glob - from nn_meter import download_from_url - - url = "https://github.com/microsoft/nn-Meter/releases/download/v1.0-data/ir_graphs.zip" - download_from_url(url, ppath) - models = glob(os.path.join(ppath, "**.json")) - print(models) - for model in models: - latency = predictor.predict(model) # in unit of ms - logging.info(os.path.basename(model), latency) - - -def test_pb_models(predictor, ppath="data/testmodels"): - # will remove this to examples once we have the pip package - from glob import glob - from nn_meter import download_from_url - - url = "https://github.com/microsoft/nn-Meter/releases/download/v1.0-data/pb_models.zip" - download_from_url(url, ppath) - models = glob(os.path.join(ppath, "**.pb")) - for model in models: - latency = predictor.predict(model) # in unit of ms - logging.info(os.path.basename(model), latency) - - -def test_onnx_models(predictor, ppath="data/testmodels"): - # will remove this to examples once we have the pip package - from glob import glob - from nn_meter import download_from_url - - url = "https://github.com/microsoft/nn-Meter/releases/download/v1.0-data/onnx_models.zip" - download_from_url(url, ppath) - models = glob(os.path.join(ppath, "**.onnx")) - for model in models: - latency = predictor.predict(model) # in unit of ms - logging.info(os.path.basename(model), latency) - - -def test_pytorch_models(args, predictor): - # will remove this to examples once we have the pip package - models = try_import_torchvision_models() - - resnet18 = models.resnet18() - alexnet = models.alexnet() - vgg16 = models.vgg16() - squeezenet = models.squeezenet1_0() - densenet161 = models.densenet161() - inception_v3 = models.inception_v3() - googlenet = models.googlenet() - shufflenet_v2 = models.shufflenet_v2_x1_0() - mobilenet_v2 = models.mobilenet_v2() # noqa: F841 - resnext50_32x4d = models.resnext50_32x4d() - wide_resnet50_2 = models.wide_resnet50_2() - mnasnet = models.mnasnet1_0() - models = [] - models.append(alexnet) - models.append(resnet18) - models.append(vgg16) - models.append(squeezenet) - models.append(densenet161) - models.append(inception_v3) - models.append(googlenet) - models.append(shufflenet_v2) - models.append(resnext50_32x4d) - models.append(wide_resnet50_2) - models.append(mnasnet) - logging.info("start to test") - for model in models: - latency = predictor.predict( - model, model_type="torch", input_shape=(1, 3, 224, 224) - ) # the resulting latency is in unit of ms - logging.info(model.__class__.__name__, latency) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser('nn-meter') - - # Usage 1: list predictors - parser.add_argument( - '--list-predictors', - help='list all supported predictors', - action='store_true', - default=False - ) - - # Usage 2: latency predictors - parser.add_argument( - "--predictor", - type=str, - help="name of target predictor (hardware)" - ) - parser.add_argument( - "--predictor-version", - type=float, - default=None, - help="the version of the latency predictor (If not specified, use the lateast version)", - ) - group = parser.add_mutually_exclusive_group() - group.add_argument( - "--tensorflow", - type=str, - help="Path to input Tensorflow model (*.pb)" - ) - group.add_argument( - "--onnx", - type=str, - help="Path to input ONNX model (*.onnx)" - ) - group.add_argument( - "--nn-meter-ir", - type=str, - help="Path to input nn-Meter IR model (*.json)" - ) - group.add_argument( - "--torchvision", # --torchvision only can support the model object. The argument specifies the name - type=str, # of the model, and we will look for the model in torchvision model zoo. - nargs='+', - help="Name of the input torch model from the torchvision model zoo" - ) - - # Usage 3: get nn-meter-ir model from tensorflow pbfile or onnx file - # Usags: nn-meter getir --tensorflow - subprasers = parser.add_subparsers(dest='getir') - getir = subprasers.add_parser( - 'getir', - help='specify a model type to convert to nn-meter ir graph' - ) - getir.add_argument( - "--tensorflow", - type = str, - help="Path to input Tensorflow model (*.pb)" - ) - getir.add_argument( - "--onnx", - type=str, - help="Path to input ONNX model (*.onnx)" - ) - getir.add_argument( - "-o", "--output", - type=str, - help="Path to save the output nn-meter ir graph for tensorflow and onnx (*.json)" - ) - - # Other utils - parser.add_argument( - "-v", "--verbose", - help="increase output verbosity", - action="store_true" - ) - - # parse args - args = parser.parse_args() - if args.verbose: - logging.basicConfig(stream=sys.stdout, format="%(message)s", level=logging.INFO) - else: - logging.basicConfig(stream=sys.stdout, format="%(message)s", level=logging.KEYINFO) - - # Usage 1 - if args.list_predictors: - preds = list_latency_predictors() - logging.keyinfo("Supported latency predictors:") - for p in preds: - logging.result(f"[Predictor] {p['name']}: version={p['version']}") - - # Usage 2 - if not args.getir: - _ = apply_latency_predictor(args) - - # Usage 3 - if args.getir: - get_nnmeter_ir(args) - - diff --git a/docs/usage.md b/docs/usage.md index 1ffe4bfd..4b6fad7e 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -36,16 +36,16 @@ After installation, a command named `nn-meter` is enabled. To predict the latenc ```bash # for Tensorflow (*.pb) file -nn-meter --predictor [--predictor-version ] --tensorflow +nn-meter lat_pred --predictor [--predictor-version ] --tensorflow # for ONNX (*.onnx) file -nn-meter --predictor [--predictor-version ] --onnx +nn-meter lat_pred --predictor [--predictor-version ] --onnx # for torch model from torchvision model zoo (str) -nn-meter --predictor [--predictor-version ] --torchvision ... +nn-meter lat_pred --predictor [--predictor-version ] --torchvision ... # for nn-Meter IR (*.json) file -nn-meter --predictor [--predictor-version ] --nn-meter-ir +nn-meter lat_pred --predictor [--predictor-version ] --nn-meter-ir ``` `--predictor-version ` arguments is optional. When the predictor version is not specified by users, nn-meter will use the latest version of the predictor. @@ -60,10 +60,10 @@ Furthermore, users may be interested to convert tensorflow pb-file or onnx file ```bash # for Tensorflow (*.pb) file -nn-meter getir --tensorflow [--output ] +nn-meter get_ir --tensorflow [--output ] # for ONNX (*.onnx) file -nn-meter getir --onnx [--output ] +nn-meter get_ir --onnx [--output ] ``` Output name is default to be `/path/to/input/file/__ir.json` if not specified by users. diff --git a/nn_meter/nn_meter.py b/nn_meter/nn_meter.py index 550a0ae5..c6cde85f 100644 --- a/nn_meter/nn_meter.py +++ b/nn_meter/nn_meter.py @@ -98,71 +98,6 @@ def load_latency_predictor(predictor_name: str, predictor_version: float = None) return nnMeter(kernel_predictors, fusionrule) -def apply_latency_predictor(args): - """apply latency predictor to predict model latency according to the command line interface arguments - """ - # specify model type - if args.tensorflow: - input_model, model_type, model_suffix = args.tensorflow, "pb", ".pb" - elif args.onnx: - input_model, model_type, model_suffix = args.onnx, "onnx", ".onnx" - elif args.nn_meter_ir: - input_model, model_type, model_suffix = args.nn_meter_ir, "nnmeter-ir", ".json" - elif args.torchvision: # torch model name from torchvision model zoo - input_model_list, model_type = args.torchvision, "torch" - - # load predictor - predictor = load_latency_predictor(args.predictor, args.predictor_version) - - # specify model for prediction - if not args.torchvision: # input of tensorflow, onnx, nnmeter-ir and nni-ir is file name, while input of torchvision is string list - input_model_list = [] - if os.path.isfile(input_model): - input_model_list = [input_model] - elif os.path.isdir(input_model): - input_model_list = glob(os.path.join(input_model, "**" + model_suffix)) - input_model_list.sort() - logging.info(f'Found {len(input_model_list)} model in {input_model}. Start prediction ...') - else: - logging.error(f'Cannot find any model satisfying the arguments.') - - # predict latency - result = {} - for model in input_model_list: - latency = predictor.predict(model, model_type) # in unit of ms - result[os.path.basename(model)] = latency - logging.result(f'[RESULT] predict latency for {os.path.basename(model)}: {latency} ms') - - return result - - -def get_nnmeter_ir(args): - """convert pb file or onnx file to nn-Meter IR graph according to the command line interface arguments - """ - import json - from nn_meter.utils.graph_tool import NumpyEncoder - if args.tensorflow: - graph = model_file_to_graph(args.tensorflow, 'pb') - filename = args.output if args.output else args.tensorflow.replace(".pb", "_pb_ir.json") - elif args.onnx: - graph = model_file_to_graph(args.onnx, 'onnx') - filename = args.output if args.output else args.onnx.replace(".onnx", "_onnx_ir.json") - else: - raise ValueError(f"Unsupported model.") - - if not str.endswith(filename, '.json'): filename += '.json' - with open(filename, "w+") as fp: - json.dump(graph, - fp, - indent=4, - skipkeys=True, - sort_keys=True, - cls=NumpyEncoder, - ) - - logging.result(f'The nn-meter ir graph has been saved. Saved path: {os.path.abspath(filename)}') - - class nnMeter: def __init__(self, predictors, fusionrule): self.kernel_predictors = predictors @@ -206,104 +141,3 @@ def predict( py = nn_predict(self.kernel_predictors, self.kd.kernels) # in unit of ms logging.info(f"Predict latency: {py} ms") return py - - -def nn_meter_cli(): - parser = argparse.ArgumentParser('nn-meter') - - # Usage 1: list predictors - parser.add_argument( - "--list-predictors", - help='list all supported predictors', - action='store_true', - default=False - ) - - # Usage 2: latency predictors - parser.add_argument( - "--predictor", - type=str, - help="name of target predictor (hardware)" - ) - parser.add_argument( - "--predictor-version", - type=float, - help="the version of the latency predictor (If not specified, use the lateast version)", - default=None - ) - group = parser.add_mutually_exclusive_group() - group.add_argument( - "--tensorflow", - type=str, - help="Path to input Tensorflow model (*.pb)" - ) - group.add_argument( - "--onnx", - type=str, - help="Path to input ONNX model (*.onnx)" - ) - group.add_argument( - "--nn-meter-ir", - type=str, - help="Path to input nn-Meter IR model (*.json)" - ) - group.add_argument( - "--torchvision", # --torchvision only can support the model object. The argument specifies - type=str, # the name of the model, and we will look for the model in torchvision model zoo. - nargs='+', - help="Name of the input torch model from the torchvision model zoo" - ) - - # Usage 3: get nn-meter-ir model from tensorflow pbfile or onnx file - # Usags: nn-meter getir --tensorflow - subprasers = parser.add_subparsers(dest='getir') - getir = subprasers.add_parser( - "getir", - help='specify a model type to convert to nn-meter ir graph' - ) - getir.add_argument( - "--tensorflow", - type = str, - help="Path to input Tensorflow model (*.pb)" - ) - getir.add_argument( - "--onnx", - type=str, - help="Path to input ONNX model (*.onnx)" - ) - getir.add_argument( - "-o", "--output", - type=str, - help="Path to save the output nn-meter ir graph for tensorflow and onnx (*.json), default to be /path/to/input/file/_ir.json" - ) - - # Other utils - parser.add_argument( - "-v", "--verbose", - help="increase output verbosity", - action="store_true" - ) - - # parse args - args = parser.parse_args() - if args.verbose: - logging.basicConfig(stream=sys.stdout, format="(nn-Meter) %(message)s", level=logging.INFO) - else: - logging.basicConfig(stream=sys.stdout, format="(nn-Meter) %(message)s", level=logging.KEYINFO) - - # Usage 1 - if args.list_predictors: - preds = list_latency_predictors() - logging.keyinfo("Supported latency predictors:") - for p in preds: - logging.result(f"[Predictor] {p['name']}: version={p['version']}") - return - - # Usage 2 - if not args.getir: - _ = apply_latency_predictor(args) - - # Usage 3 - if args.getir: - get_nnmeter_ir(args) - diff --git a/nn_meter/nn_meter_cli.py b/nn_meter/nn_meter_cli.py new file mode 100644 index 00000000..55dccd27 --- /dev/null +++ b/nn_meter/nn_meter_cli.py @@ -0,0 +1,190 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +from glob import glob +import os +import sys +import argparse +import logging +from nn_meter.nn_meter import * + +__user_config_folder__ = os.path.expanduser('~/.nn_meter/config') +__user_data_folder__ = os.path.expanduser('~/.nn_meter/data') + +__predictors_cfg_filename__ = 'predictors.yaml' + + +def list_latency_predictors_cli(): + preds = list_latency_predictors() + logging.keyinfo("Supported latency predictors:") + for p in preds: + logging.result(f"[Predictor] {p['name']}: version={p['version']}") + return + + +def apply_latency_predictor_cli(args): + """apply latency predictor to predict model latency according to the command line interface arguments + """ + if not args.predictor: + logging.keyinfo('You must specify a predictor. Use "nn-meter --list-predictors" to see all supporting predictors.') + return + + # specify model type + if args.tensorflow: + input_model, model_type, model_suffix = args.tensorflow, "pb", ".pb" + elif args.onnx: + input_model, model_type, model_suffix = args.onnx, "onnx", ".onnx" + elif args.nn_meter_ir: + input_model, model_type, model_suffix = args.nn_meter_ir, "nnmeter-ir", ".json" + elif args.torchvision: # torch model name from torchvision model zoo + input_model_list, model_type = args.torchvision, "torch" + + # load predictor + predictor = load_latency_predictor(args.predictor, args.predictor_version) + + # specify model for prediction + if not args.torchvision: # input of tensorflow, onnx, nnmeter-ir and nni-ir is file name, while input of torchvision is string list + input_model_list = [] + if os.path.isfile(input_model): + input_model_list = [input_model] + elif os.path.isdir(input_model): + input_model_list = glob(os.path.join(input_model, "**" + model_suffix)) + input_model_list.sort() + logging.info(f'Found {len(input_model_list)} model in {input_model}. Start prediction ...') + else: + logging.error(f'Cannot find any model satisfying the arguments.') + + # predict latency + result = {} + for model in input_model_list: + latency = predictor.predict(model, model_type) # in unit of ms + result[os.path.basename(model)] = latency + logging.result(f'[RESULT] predict latency for {os.path.basename(model)}: {latency} ms') + + return result + +def get_nnmeter_ir_cli(args): + """convert pb file or onnx file to nn-Meter IR graph according to the command line interface arguments + """ + import json + from nn_meter.utils.graph_tool import NumpyEncoder + if args.tensorflow: + graph = model_file_to_graph(args.tensorflow, 'pb') + filename = args.output if args.output else args.tensorflow.replace(".pb", "_pb_ir.json") + elif args.onnx: + graph = model_file_to_graph(args.onnx, 'onnx') + filename = args.output if args.output else args.onnx.replace(".onnx", "_onnx_ir.json") + else: + raise ValueError(f"Unsupported model.") + + if not str.endswith(filename, '.json'): filename += '.json' + with open(filename, "w+") as fp: + json.dump(graph, + fp, + indent=4, + skipkeys=True, + sort_keys=True, + cls=NumpyEncoder, + ) + + logging.result(f'The nn-meter ir graph has been saved. Saved path: {os.path.abspath(filename)}') + + +def nn_meter_info(args): + if args.list_predictors: + list_latency_predictors_cli() + else: + logging.keyinfo('please run "nn-meter {positional argument} --help" to see nn-meter guidance') + + +def nn_meter_cli(): + parser = argparse.ArgumentParser('nn-meter', description='please run "nn-meter {positional argument} --help" to see nn-meter guidance') + parser.set_defaults(func=nn_meter_info) + + # optional arguments + parser.add_argument( + "-v", "--verbose", + help="increase output verbosity", + action="store_true" + ) + parser.add_argument( + '--list-predictors', + help='list all supported predictors', + action='store_true', + default=False + ) + + # create subparsers for args with sub values + subparsers = parser.add_subparsers() + + # Usage 1: latency predictors + lat_pred = subparsers.add_parser('lat_pred', help='apply latency predictor for testing model') + lat_pred.add_argument( + "--predictor", + type=str, + help="name of target predictor (hardware)" + ) + lat_pred.add_argument( + "--predictor-version", + type=float, + help="the version of the latency predictor (if not specified, use the lateast version)", + default=None + ) + group = lat_pred.add_mutually_exclusive_group() + group.add_argument( + "--tensorflow", + type=str, + help="path to input Tensorflow model (*.pb file or floder)" + ) + group.add_argument( + "--onnx", + type=str, + help="path to input ONNX model (*.onnx file or floder)" + ) + group.add_argument( + "--nn-meter-ir", + type=str, + help="path to input nn-Meter IR model (*.json file or floder)" + ) + group.add_argument( + "--torchvision", # --torchvision only can support the model object. The argument specifies + type=str, # the name of the model, and we will look for the model in torchvision model zoo. + nargs='+', + help="name of the input torch model from the torchvision model zoo" + ) + lat_pred.set_defaults(func=apply_latency_predictor_cli) + + # Usage 2: get nn-meter-ir model from tensorflow pbfile or onnx file + # Usage: nn-meter get_ir --tensorflow + get_ir = subparsers.add_parser( + 'get_ir', + help='specify a model type to convert to nn-meter ir graph' + ) + group2 = get_ir.add_mutually_exclusive_group() + group2.add_argument( + "--tensorflow", + type = str, + help="path to input Tensorflow model (*.pb)" + ) + group2.add_argument( + "--onnx", + type=str, + help="path to input ONNX model (*.onnx)" + ) + get_ir.add_argument( + "-o", "--output", + type=str, + help="path to save the output nn-meter ir graph for tensorflow and onnx (*.json), default to be /path/to/input/file/_ir.json" + ) + get_ir.set_defaults(func=get_nnmeter_ir_cli) + + # parse args + args = parser.parse_args() + if args.verbose: + logging.basicConfig(stream=sys.stdout, format="(nn-Meter) %(message)s", level=logging.INFO) + else: + logging.basicConfig(stream=sys.stdout, format="(nn-Meter) %(message)s", level=logging.KEYINFO) + args.func(args) + + +if __name__ == '__main__': + nn_meter_cli() diff --git a/setup.py b/setup.py index 474e3d00..8b446fa1 100644 --- a/setup.py +++ b/setup.py @@ -29,7 +29,7 @@ 'nn_meter': ['configs/*.yaml', 'kerneldetection/fusionlib/*.json'], }, entry_points={ - 'console_scripts': ['nn-meter=nn_meter.nn_meter:nn_meter_cli'], + 'console_scripts': ['nn-meter=nn_meter.nn_meter_cli:nn_meter_cli'], }, install_requires=[ 'numpy', 'tqdm', 'networkx', 'requests', 'protobuf', 'PyYAML', 'scikit_learn', 'packaging' diff --git a/tests/integration_test.py b/tests/integration_test.py index 56716516..1df9664e 100644 --- a/tests/integration_test.py +++ b/tests/integration_test.py @@ -82,7 +82,7 @@ def integration_test(model_type, url, ppath, output_name = "tests/test_result.tx try: since = time.time() # print(f'nn-meter --{model_type} {ppath} --predictor {pred_name} --predictor-version {pred_version}') - result = subprocess.check_output(['nn-meter', f'--{model_type}', f'{ppath}', '--predictor', f'{pred_name}', '--predictor-version', f'{pred_version}']) + result = subprocess.check_output(['nn-meter', 'lat_pred', f'--{model_type}', f'{ppath}', '--predictor', f'{pred_name}', '--predictor-version', f'{pred_version}']) runtime = time.time() - since except NotImplementedError: logging.error(f"Meets ERROR when checking --{model_type} {ppath} --predictor {pred_name} --predictor-version {pred_version}") @@ -98,13 +98,13 @@ def integration_test(model_type, url, ppath, output_name = "tests/test_result.tx def check_getir_module(model_type, ppath): for model in get_models(model_type, ppath): try: - _ = subprocess.check_output(['nn-meter', 'getir', f'--{model_type}', model]) - _ = subprocess.check_output(['nn-meter', 'getir', f'--{model_type}', model, '--output', f'temp.json']) + _ = subprocess.check_output(['nn-meter', 'get_ir', f'--{model_type}', model]) + _ = subprocess.check_output(['nn-meter', 'get_ir', f'--{model_type}', model, '--output', f'temp.json']) if os.path.exists('temp.json'): os.remove('temp.json') break # test just one file to avoid time cosuming except NotImplementedError: - logging.error("Meets ERROR when checking getir --{model_type} {ppath}'") + logging.error("Meets ERROR when checking get_ir --{model_type} {ppath}'") if __name__ == "__main__": diff --git a/tests/integration_test_torch.py b/tests/integration_test_torch.py index 7ed92296..4f716cbc 100644 --- a/tests/integration_test_torch.py +++ b/tests/integration_test_torch.py @@ -46,7 +46,7 @@ def integration_test_onnx_based_torch(model_type, model_list, output_name = "tes try: since = time.time() # print(f'nn-meter --torchvision ' + " ".join(model_list) + f' --predictor {pred_name} --predictor-version {pred_version}') - result = subprocess.check_output(['nn-meter', f'--torchvision'] + model_list + ['--predictor', f'{pred_name}', '--predictor-version', f'{pred_version}']) + result = subprocess.check_output(['nn-meter', 'lat_pred', f'--torchvision'] + model_list + ['--predictor', f'{pred_name}', '--predictor-version', f'{pred_version}']) runtime = time.time() - since except NotImplementedError: logging.error("Meets ERROR when checking --torchvision {model_string} --predictor {pred_name} --predictor-version {pred_version}") @@ -100,7 +100,14 @@ def integration_test_nni_based_torch(output_name = "tests/test_result_nni_based_ check_package_status() + if not args.apply_onnx and not args.apply_nni: + args.apply_onnx = True + args.apply_nni = True + # check torch model + if args.apply_nni: + # check NNI-based torch converter + integration_test_nni_based_torch() if args.apply_onnx: # check ONNX-based torch converter integration_test_onnx_based_torch( @@ -109,7 +116,4 @@ def integration_test_nni_based_torch(output_name = "tests/test_result_nni_based_ 'resnet18', 'alexnet', 'vgg16', 'squeezenet', 'densenet161', 'inception_v3', 'googlenet', 'shufflenet_v2', 'mobilenet_v2', 'resnext50_32x4d', 'wide_resnet50_2', 'mnasnet'] ) - elif args.apply_nni: - # check NNI-based torch converter - integration_test_nni_based_torch() \ No newline at end of file