Skip to content

Commit

Permalink
im2col/col2im: Enable bool to align with PyTorch claimed data types. (#…
Browse files Browse the repository at this point in the history
…894)

Expect all the "RuntimeError: "im2col_xpu" not implemented for 'Bool'"
fixed with this PR.

---------

Co-authored-by: chunhuanMeng <[email protected]>
  • Loading branch information
daisyden and chunhuanMeng authored Sep 12, 2024
1 parent 987ad12 commit c6981a2
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 19 deletions.
3 changes: 2 additions & 1 deletion src/ATen/native/xpu/sycl/Col2ImKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -212,9 +212,10 @@ void col2im_kernel(
output.resize_({batch_size, n_output_plane, output_height, output_width});
output.zero_();

AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3(
at::ScalarType::BFloat16,
at::ScalarType::Half,
at::ScalarType::Bool,
input.scalar_type(),
"col2im_xpu",
[&] {
Expand Down
4 changes: 2 additions & 2 deletions src/ATen/native/xpu/sycl/Im2ColKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -211,8 +211,8 @@ void im2col_kernel(
output.resize_({batch_size, n_output_plane, output_length});
output.zero_();

AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
kHalf, kBFloat16, input.scalar_type(), "im2col_xpu", [&] {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3(
kHalf, kBFloat16, kBool, input.scalar_type(), "im2col_xpu", [&] {
Tensor input_n;
Tensor output_n;

Expand Down
18 changes: 2 additions & 16 deletions test/xpu/skip_list_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -672,14 +672,7 @@
# So far CUDA doesn't support Half, so that XPU fails as we aligned claimed dtypes with CUDA in test infra.
# https://github.com/intel/torch-xpu-ops/issues/623
"test_dtypes_nextafter_xpu",

# Bool is involved in latest PyTorch. XPU impl of unfold doesn't support it.
# https://github.com/intel/torch-xpu-ops/issues/887
"test_dtypes_nn_functional_unfold_xpu",
"test_non_standard_bool_values_nn_functional_unfold_xpu_bool",
"test_compare_cpu_nn_functional_unfold_xpu_bool",
"test_non_standard_bool_values_nn_functional_unfold_xpu_bool",


# AssertionError: The supported dtypes for argsort on device type xpu are incorrect!
# The following dtypes worked in forward but are not listed by the OpInfo: {torch.bool}.
# CUDA does not have torch.bool support on argsort.
Expand Down Expand Up @@ -2112,10 +2105,6 @@
"test_reduction_all_prod_layout2_xpu_float64",
"test_reduction_all_sum_layout2_xpu_float16",
"test_reduction_all_sum_layout2_xpu_float64",

# Bool is involved in latest PyTorch. XPU impl of unfold doesn't support it.
# https://github.com/intel/torch-xpu-ops/issues/887
"test_nn_unfold_xpu",
),

"nn/test_packed_sequence_xpu.py": (
Expand Down Expand Up @@ -3493,9 +3482,6 @@
"test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool1d_xpu_bfloat16",
"test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool1d_xpu_float",
"test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool2d_xpu_bfloat16",
"test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool2d_xpu_float",
"test_meta_outplace_nn_functional_unfold_xpu_bool",
"test_dispatch_meta_outplace_nn_functional_unfold_xpu_bool",
"test_dispatch_symbolic_meta_outplace_nn_functional_unfold_xpu_bool",
"test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool2d_xpu_float",
),
}

0 comments on commit c6981a2

Please sign in to comment.