From d466f26a01c52d6bd9225ae179953267fac7a883 Mon Sep 17 00:00:00 2001 From: jcwchen Date: Thu, 13 Jul 2023 08:44:21 -0700 Subject: [PATCH] check_path Signed-off-by: jcwchen --- workflow_scripts/check_model.py | 5 ++--- workflow_scripts/ort_test_dir_utils.py | 3 --- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/workflow_scripts/check_model.py b/workflow_scripts/check_model.py index ae2bfaf80..33dce8001 100644 --- a/workflow_scripts/check_model.py +++ b/workflow_scripts/check_model.py @@ -16,8 +16,8 @@ def has_vnni_support(): def run_onnx_checker(model_path): model = onnx.load(model_path) - onnx.checker.check_model(model, full_check=True) del model + onnx.checker.check_model(model_path, full_check=True) def ort_skip_reason(model_path): @@ -48,8 +48,7 @@ def run_backend_ort(model_path, test_data_set=None, tar_gz_path=None): # based on the build flags) when instantiating InferenceSession. # For example, if NVIDIA GPU is available and ORT Python package is built with CUDA, then call API as following: # onnxruntime.InferenceSession(path/to/model, providers=["CUDAExecutionProvider"]) - sess = onnxruntime.InferenceSession(model_path) - del sess + onnxruntime.InferenceSession(model_path) # Get model name without .onnx model_name = os.path.basename(os.path.splitext(model_path)[0]) if model_name is None: diff --git a/workflow_scripts/ort_test_dir_utils.py b/workflow_scripts/ort_test_dir_utils.py index 66d07d500..91f241e6a 100644 --- a/workflow_scripts/ort_test_dir_utils.py +++ b/workflow_scripts/ort_test_dir_utils.py @@ -153,7 +153,6 @@ def save_data(prefix, name_data_map, model_info): name_output_map = {} for name, data in zip(output_names, outputs): name_output_map[name] = data - del sess save_data("output", name_output_map, model_outputs) @@ -263,8 +262,6 @@ def run_test_dir(model_or_dir): print("Mismatch for {}:\nExpected:{}\nGot:{}".format(output_names[idx], expected, actual)) failed = True if failed: - del sess raise ValueError("FAILED due to output mismatch.") else: print("PASS") - del sess