From 124164eabd227bd265abbf545f93155d83535596 Mon Sep 17 00:00:00 2001 From: yuchengliu1 Date: Wed, 10 Jul 2024 00:38:29 +0800 Subject: [PATCH] revert unnecessary change --- test/xpu/run_test_with_skip.py | 36 ---------------------------------- test/xpu/xpu_test_utils.py | 4 ++++ 2 files changed, 4 insertions(+), 36 deletions(-) diff --git a/test/xpu/run_test_with_skip.py b/test/xpu/run_test_with_skip.py index fed045172..e64ac1d0a 100644 --- a/test/xpu/run_test_with_skip.py +++ b/test/xpu/run_test_with_skip.py @@ -256,26 +256,6 @@ def launch_test(test_case, skip_list=None, exe_list=None): "test_python_ref_executor__refs_pow_executor_aten_xpu_complex32", # Didn't align with CUDA, Unexpected success "test_compare_cpu_nn_functional_grid_sample_xpu_float32", # AssertionError: Tensor-likes are not close! "test_dtypes_nn_functional_batch_norm_without_cudnn_xpu", # AssertionError: The supported dtypes for nn.functional.batch_norm on device type xpu are incorrect! - - # AssertionError: Tensor-likes are not close! - "test_compare_cpu__refs_std_mean_xpu_float32", - "test_compare_cpu__refs_var_mean_xpu_float32", - "test_compare_cpu_std_mean_xpu_float32", - "test_compare_cpu_var_mean_xpu_float32", - "test_noncontiguous_samples_std_mean_xpu_float32", - "test_noncontiguous_samples_std_xpu_float32", - "test_noncontiguous_samples_var_mean_xpu_float32", - "test_noncontiguous_samples_var_xpu_float32", - "test_out__refs_std_xpu_float32", - "test_out__refs_var_xpu_float32", - "test_out_std_xpu_float32", - "test_out_var_xpu_float32", - "test_out_warning__refs_std_xpu", - "test_out_warning__refs_var_xpu", - "test_out_warning_std_xpu", - "test_out_warning_var_xpu", - "test_variant_consistency_eager_std_xpu_float32", - "test_variant_consistency_eager_var_xpu_float32", # Unexpected success "test_errors_histogramdd_xpu", "test_noncontiguous_samples__batch_norm_with_update_xpu_float32", @@ -283,22 +263,6 @@ def launch_test(test_case, skip_list=None, exe_list=None): "test_out_warning_logcumsumexp_xpu", "test_python_ref__refs_mul_xpu_complex32", "test_python_ref_torch_fallback__refs_mul_xpu_complex32", - # AssertionError: tensor(False, device='xpu:0') is not true : Reference result was farther from the precise computation than the torch result - "test_python_ref__refs_std_mean_xpu_float32", - "test_python_ref__refs_std_xpu_bfloat16", - "test_python_ref__refs_std_xpu_float16", - "test_python_ref_executor__refs_std_mean_executor_aten_xpu_bfloat16", - "test_python_ref_executor__refs_std_mean_executor_aten_xpu_float16", - "test_python_ref__refs_var_mean_xpu_float32", - "test_python_ref_torch_fallback__refs_std_mean_xpu_bfloat16", - "test_python_ref_torch_fallback__refs_std_mean_xpu_float16", - "test_python_ref__refs_var_xpu_float32", - "test_python_ref_executor__refs_std_executor_aten_xpu_bfloat16", - "test_python_ref_executor__refs_std_executor_aten_xpu_float16", - "test_python_ref_executor__refs_std_mean_executor_aten_xpu_float32", - "test_python_ref_torch_fallback__refs_var_mean_xpu_bfloat16", - "test_python_ref_torch_fallback__refs_var_mean_xpu_float16", - "test_python_ref_torch_fallback__refs_var_mean_xpu_float32", # Jiterator is only supported on CUDA and ROCm GPUs, none are available. "_jiterator_", # https://github.com/intel/torch-xpu-ops/issues/157 diff --git a/test/xpu/xpu_test_utils.py b/test/xpu/xpu_test_utils.py index 389f676f8..e175891df 100644 --- a/test/xpu/xpu_test_utils.py +++ b/test/xpu/xpu_test_utils.py @@ -157,6 +157,10 @@ "addr", "cdist", "nn.functional.group_norm", + "nn.functional.batch_norm", + "native_batch_norm", + "_native_batch_norm_legit", + "_batch_norm_with_update", "bincount", "renorm", "lerp",