Skip to content

Commit

Permalink
Undo skip test cases of CPU fallback failures (#657)
Browse files Browse the repository at this point in the history
Some tests cases are failed due to missing XPU implementations. These
cases are now re-evaluated and removed from skip list when passed.

---------

Co-authored-by: Feng Yuan <[email protected]>
  • Loading branch information
hjhee and fengyuan14 authored Jul 31, 2024
1 parent 894843c commit ced287f
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 32 deletions.
20 changes: 13 additions & 7 deletions test/xpu/extended/run_test_with_skip.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,6 @@
# TestCompositeCompliance
# CPU fallback fails
# Require implementing aten::embedding_renorm_
"test_forward_ad_nn_functional_embedding_xpu_float32",
"test_backward_nn_functional_embedding_xpu_float32",
"test_forward_ad_nn_functional_embedding_xpu_float32",
"test_view_replay_nn_functional_embedding_xpu_float32",

# TestCompositeCompliance::test_cow_input
Expand Down Expand Up @@ -133,10 +130,19 @@
# When XPU uses original data type, the case passes.
"test_compare_cpu_logit_xpu_bfloat16",

# Not implemented operators, aten::upsample_linear1d, aten::upsample_bilinear2d,
# aten::upsample_trilinear3d,
"nn_functional_interpolate_bilinear",
"nn_functional_interpolate_trilinear",
# precison error
# Mismatched elements: 1 / 24 (4.2%)
# Greatest absolute difference: 0.03125 at index (0, 1, 0, 1) (up to 0.001 allowed)
# Greatest relative difference: 0.0048828125 at index (0, 1, 0, 1) (up to 0.001 allowed)
"test_compare_cpu_nn_functional_interpolate_bilinear_xpu_bfloat16",

# RuntimeError: "compute_index_ranges_weights" not implemented for 'Half'
"test_compare_cpu_nn_functional_interpolate_bilinear_xpu_float16",

# AssertionError: False is not true : Argument 0 during forward call unexpectedly materializes. Either set `supports_cow_input_no_materialize_forward=False...
"test_cow_input_nn_functional_interpolate_bilinear_xpu_float32",
"test_cow_input_nn_functional_interpolate_linear_xpu_float32",
"test_cow_input_nn_functional_interpolate_trilinear_xpu_float32",

#The results of XPU and CUDA are consistent, but the results of CPU and CUDA are inconsistent
"test_compare_cpu_nn_functional_interpolate_linear_xpu_bfloat16",
Expand Down
30 changes: 5 additions & 25 deletions test/xpu/run_test_with_skip.py
Original file line number Diff line number Diff line change
Expand Up @@ -755,9 +755,6 @@ def launch_test(test_case, skip_list=None, exe_list=None):
# The following dtypes did not work in backward but are listed by the OpInfo: {torch.float16}.
"test_dtypes_nn_functional_pad_replicate_negative_xpu",
"test_dtypes_nn_functional_pad_replicate_xpu",
# Fallback to cpu‘s implementation but use the dtypes claim by xpu , AssertionError: The supported dtypes for nn.functional.interpolate on device type xpu are incorrect!
# https://github.com/intel/torch-xpu-ops/issues/468
"test_dtypes_nn_functional_interpolate_bilinear_xpu",

# Op impl aligns with CUDA on the supported dtypes.
# RuntimeError: "avg_pool2d_xpu" not implemented for 'Long'.
Expand All @@ -779,9 +776,6 @@ def launch_test(test_case, skip_list=None, exe_list=None):
"test_dtypes_unique_consecutive_xpu",
"test_dtypes_unique_xpu",

# torch.complex32 - "sinh_cpu" not implemented for 'ComplexHalf'
"test_dtypes_cosh_xpu",

# RuntimeError: Expected both inputs to be Half, Float or Double tensors but got BFloat16 and BFloat16.
# Polar's backward is calculated using complex(), which does not support bfloat16. CUDA fails with same error.
"test_dtypes_polar_xpu",
Expand Down Expand Up @@ -1327,18 +1321,10 @@ def launch_test(test_case, skip_list=None, exe_list=None):
# AssertionError: False is not true
"test_ctc_loss_cudnn_xpu", # want "xpu" in function name
"test_ctc_loss_cudnn_tensor", # want "xpu" in function name
# NotImplementedError: Could not run 'aten::batch_norm_stats' with arguments from the 'CPU' backend.
"test_sync_batchnorm_accuracy_cuda",
# NotImplementedError: Could not run 'aten::batch_norm_backward_elemt' with arguments from the 'CPU' backend.
"test_sync_batchnorm_backward_elemt",
# RuntimeError: "smooth_l1_backward_cpu_out" not implemented for 'Half'
"test_SmoothL1Loss_no_batch_dim_mean_cuda_half",
"test_SmoothL1Loss_no_batch_dim_none_cuda_half",
"test_SmoothL1Loss_no_batch_dim_sum_cuda_half",
# RuntimeError: "mse_backward_cpu_out" not implemented for 'Half'
"test_MSELoss_no_batch_dim_mean_cuda_half",
"test_MSELoss_no_batch_dim_none_cuda_half",
"test_MSELoss_no_batch_dim_sum_cuda_half",
# RuntimeError: "multilabel_margin_loss_forward_out_frame" not implemented for 'Half'
"test_MultiLabelMarginLoss_no_batch_dim_mean_cuda_half",
"test_MultiLabelMarginLoss_no_batch_dim_none_cuda_half",
Expand Down Expand Up @@ -1492,13 +1478,7 @@ def launch_test(test_case, skip_list=None, exe_list=None):
# AssertionError: Jiterator is only supported on CUDA and ROCm GPUs, none are available.
"_jiterator_",
# CPU Fallback fails: Tensor-likes are not close!
"test_reference_numerics_extremal__refs_acos_xpu_complex128",
"test_reference_numerics_extremal__refs_nn_functional_tanhshrink_xpu_complex64",
"test_reference_numerics_extremal_acos_xpu_complex128",
"test_reference_numerics_extremal_nn_functional_tanhshrink_xpu_complex64",
"test_reference_numerics_normal__refs_nn_functional_tanhshrink_xpu_complex64",
"test_reference_numerics_normal_nn_functional_tanhshrink_xpu_complex64",
"test_reference_numerics_large__refs_tanh_xpu_complex32",
"test_reference_numerics_large_tanh_xpu_complex32",
# For extreme value processing, Numpy and XPU results are inconsistent
# std operations get different behavior on std::complex operarands for extremal cases
Expand Down Expand Up @@ -1535,19 +1515,19 @@ def launch_test(test_case, skip_list=None, exe_list=None):
"test_reference_numerics_large_asinh_xpu_complex64",
"test_reference_numerics_large_asinh_xpu_complex32",

# Mismatched elements: 1 / 943593 (0.0%)
# Greatest absolute difference: 1.3363442121772096e-05 at index (742, 249) (up to 1e-05 allowed)
# Greatest relative difference: 8.852276550896931e-06 at index (742, 249) (up to 1.3e-06 allowed)
"test_reference_numerics_normal_nn_functional_tanhshrink_xpu_complex64",

# AssertionError: Tensor-likes are not close!
# exceeded maximum allowed difference
# Greatest absolute difference: 6.266784475883469e-05 at index (463, 204) (up to 1e-05 allowed)
# Greatest relative difference: 1.9145216356264427e-05 at index (463, 204) (up to 1.3e-06 allowed)
"test_reference_numerics_normal__refs_asinh_xpu_complex64",
"test_reference_numerics_normal_asinh_xpu_complex64",

# CPU Fallback fails
# New ATen operators fails on CPU Fallback.
# E.g. aten::special_spherical_bessel_j0, aten::special_airy_ai.
"_special_",
# Failed: Unexpected success
"test_reference_numerics_large__refs_rsqrt_xpu_complex32",
"test_reference_numerics_large_rsqrt_xpu_complex32",

# Numeric difference
Expand Down

0 comments on commit ced287f

Please sign in to comment.