From 9ed0a1aed5e9735e26afb7330688e85299bbc589 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Fri, 20 Dec 2024 20:59:03 +0800 Subject: [PATCH] Extend UT test_nonzero_static_large to XPU device and skip some cases (#1161) 1.Extend UT test_nonzero_static_large to XPU device. 2. Skip some cases for rounding/trunc div accuracy. 3. Skip some cases for unspport tunableop. --------- Signed-off-by: Cheng Signed-off-by: Cheng, Penghui --- test/xpu/extended/skip_list_arc.py | 16 ++++++++++++ test/xpu/extended/skip_list_common.py | 4 +++ test/xpu/skip_list_common.py | 16 +++++++++++- test/xpu/test_unary_ufuncs_xpu.py | 35 ++++++++++++++++++++++++++- 4 files changed, 69 insertions(+), 2 deletions(-) diff --git a/test/xpu/extended/skip_list_arc.py b/test/xpu/extended/skip_list_arc.py index e1e701b84..c8e26ccf3 100644 --- a/test/xpu/extended/skip_list_arc.py +++ b/test/xpu/extended/skip_list_arc.py @@ -7,5 +7,21 @@ "test_compare_cpu_bincount_xpu_int64", "test_compare_cpu_bincount_xpu_int8", "test_compare_cpu_bincount_xpu_uint8", + # RuntimeError: Kernel is incompatible with all devices in devs + # https://github.com/intel/torch-xpu-ops/issues/1150 + "test_compare_cpu_logcumsumexp_xpu_float16", + "test_compare_cpu_logcumsumexp_xpu_float32", + "test_compare_cpu_nn_functional_pdist_xpu_float32", + "test_compare_cpu_tril_indices_xpu_int32", + "test_compare_cpu_tril_indices_xpu_int64", + "test_compare_cpu_triu_indices_xpu_int32", + "test_compare_cpu_triu_indices_xpu_int64", + "test_backward_logcumsumexp_xpu_float32", + "test_backward_nn_functional_pdist_xpu_float32", + "test_forward_ad_logcumsumexp_xpu_float32", + "test_operator_logcumsumexp_xpu_float32", + "test_operator_nn_functional_pdist_xpu_float32", + "test_view_replay_logcumsumexp_xpu_float32", + "test_view_replay_nn_functional_pdist_xpu_float32", ), } diff --git a/test/xpu/extended/skip_list_common.py b/test/xpu/extended/skip_list_common.py index 6b5fd653e..643d631eb 100644 --- a/test/xpu/extended/skip_list_common.py +++ b/test/xpu/extended/skip_list_common.py @@ -194,5 +194,9 @@ # Greatest absolute difference: 0.0625 at index (1,) (up to 0.001 allowed) # Greatest relative difference: 0.00640869140625 at index (1,) (up to 0.001 allowed) "test_compare_cpu_xlogy_xpu_bfloat16", + "test_compare_cpu_div_trunc_rounding_xpu_float64", + "test_compare_cpu_div_trunc_rounding_xpu_float16", + "test_compare_cpu_div_floor_rounding_xpu_float16", + "test_compare_cpu_div_floor_rounding_xpu_bfloat16", ), } diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index 670a88f53..52a93d91b 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -649,6 +649,14 @@ "test_python_ref__refs_square_xpu_complex64", "test_python_ref_torch_fallback__refs_square_xpu_complex64", "test_python_ref_torch_fallback__refs_exp_xpu_complex128", + + # Failed on rolling driver, passed on preci + "test_python_ref__refs_div_trunc_rounding_xpu_float64", + "test_python_ref_executor__refs_div_trunc_rounding_executor_aten_xpu_float64", + "test_python_ref_torch_fallback__refs_div_trunc_rounding_xpu_float64", + + # TODO: passed from source code building version, investigate + "test_python_ref__refs_log2_xpu_complex128", ), "test_binary_ufuncs_xpu.py": ( @@ -1136,6 +1144,7 @@ # Greatest relative difference: 1.9145216356264427e-05 at index (463, 204) (up to 1.3e-06 allowed) "test_reference_numerics_normal__refs_asinh_xpu_complex64", "test_reference_numerics_normal_asinh_xpu_complex64", + "test_batch_vs_slicing__refs_sigmoid_xpu_complex128", # Unexpected success: CUDA uses thrust::sqrt and has accuracy issue. XPU use std::sqrt and has no issue. "test_reference_numerics_large_rsqrt_xpu_complex32", # Numeric difference @@ -1514,6 +1523,8 @@ # XPU does not support tunable. "test_bmm_tunableop_rocm_xpu_float32", "test_numeric_check_leak_tunableop_rocm_xpu_float32", + "test_dump_results_on_exit_tunableop_xpu_float32", + "test_rotating_buffer_tunableop_xpu_float32", # CUDA bias cases added in latest PyTorch # AttributeError: module 'torch._C' has no attribute '_cuda_tunableop_enable' "test_matmul_check_entries_tunableop_xpu_float16", @@ -3230,7 +3241,10 @@ "test_type_promotion_xpu.py": None, - "test_distributions_xpu.py": None, + "test_distributions_xpu.py": ( + # TODO: Passed on lts driver version, but failed on rolling driver version + "test_gamma_gpu_sample_xpu", + ), "test_optim_xpu.py": ( # oneDNN issues diff --git a/test/xpu/test_unary_ufuncs_xpu.py b/test/xpu/test_unary_ufuncs_xpu.py index 0e05a8e7c..a6c12a2ad 100644 --- a/test/xpu/test_unary_ufuncs_xpu.py +++ b/test/xpu/test_unary_ufuncs_xpu.py @@ -1,6 +1,7 @@ # Owner(s): ["module: intel"] -from torch.testing._internal.common_device_type import instantiate_device_type_tests +import torch +from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyXPU from torch.testing._internal.common_utils import run_tests try: @@ -11,6 +12,38 @@ with XPUPatchForImport(False): from test_unary_ufuncs import TestUnaryUfuncs + @onlyXPU + def _nonzero_static_large(self, device): + # large enough to have multiple iters per SM even on H100 + # with 132 sms + size_inp = 1024 * 16 * 132 + 1024 * 16 + x = torch.zeros(size_inp, device=device) + # unique indices + indices = torch.randperm(size_inp, device=device)[: size_inp // 2] + sorted, _ = torch.sort(indices) + x[sorted] = 1 + res = torch.nonzero_static(x, size=size_inp // 2).view(-1) + self.assertEqual(res, sorted) + # no oob writes + out = torch.full((size_inp,), 10, device=device, dtype=torch.int64) + res = torch.nonzero_static(x, size=size_inp // 4, out=out[: size_inp // 2]) + self.assertEqual(out[: size_inp // 4], sorted[: size_inp // 4]) + self.assertEqual( + out[size_inp // 4 :], + torch.tensor(10, device="xpu").expand_as(out[size_inp // 4 :]), + ) + # correct fill for 2d + x = x.view(2, size_inp // 2) + ref = x.nonzero() + res = x.nonzero_static(size=size_inp // 2 + 2) + self.assertEqual(res.shape, [size_inp // 2 + 2, 2]) + self.assertEqual(ref, res[: size_inp // 2]) + self.assertEqual( + res[size_inp // 2 :], + torch.tensor(-1, device="xpu").expand_as(res[size_inp // 2 :]), + ) + TestUnaryUfuncs.test_nonzero_static_large = _nonzero_static_large + instantiate_device_type_tests(TestUnaryUfuncs, globals(),only_for=("xpu"), allow_xpu=True) if __name__ == "__main__":