Skip to content

Commit

Permalink
fix UT test_unary_op_out_casting_xpu_int64_float32 (#872)
Browse files Browse the repository at this point in the history
fix UT
test_type_promotion.py::TestTypePromotionXPU::test_unary_op_out_casting_xpu_int64_float32.

---------

Co-authored-by: Daisy, Deng <[email protected]>
  • Loading branch information
huaiyuzh and daisyden authored Sep 11, 2024
1 parent 1206590 commit 987ad12
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 2 deletions.
6 changes: 4 additions & 2 deletions src/ATen/native/xpu/UnaryOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1028,11 +1028,12 @@ Tensor& XPUNativeFunctions::ceil_(Tensor& self) {
}

Tensor& XPUNativeFunctions::ceil_out(const Tensor& self, Tensor& out) {
auto iter = ceil_meta(self, out);

if (c10::isIntegralType(self.scalar_type(), /*includeBool=*/false)) {
out.copy_(self);
return out;
}
auto iter = ceil_meta(self, out);
native::xpu::ceil_kernel(iter);
return out;
}
Expand Down Expand Up @@ -1134,11 +1135,12 @@ Tensor& XPUNativeFunctions::floor_(Tensor& self) {
}

Tensor& XPUNativeFunctions::floor_out(const Tensor& self, Tensor& out) {
auto iter = meta_floor(self, out);
if (c10::isIntegralType(self.scalar_type(), /*includeBool=*/false)) {
out.copy_(self);
return out;
}
auto iter = meta_floor(self, out);

native::xpu::floor_kernel(iter);
return out;
}
Expand Down
14 changes: 14 additions & 0 deletions test/xpu/skip_list_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -673,6 +673,13 @@
# https://github.com/intel/torch-xpu-ops/issues/623
"test_dtypes_nextafter_xpu",

# Bool is involved in latest PyTorch. XPU impl of unfold doesn't support it.
# https://github.com/intel/torch-xpu-ops/issues/887
"test_dtypes_nn_functional_unfold_xpu",
"test_non_standard_bool_values_nn_functional_unfold_xpu_bool",
"test_compare_cpu_nn_functional_unfold_xpu_bool",
"test_non_standard_bool_values_nn_functional_unfold_xpu_bool",

# AssertionError: The supported dtypes for argsort on device type xpu are incorrect!
# The following dtypes worked in forward but are not listed by the OpInfo: {torch.bool}.
# CUDA does not have torch.bool support on argsort.
Expand Down Expand Up @@ -2105,6 +2112,10 @@
"test_reduction_all_prod_layout2_xpu_float64",
"test_reduction_all_sum_layout2_xpu_float16",
"test_reduction_all_sum_layout2_xpu_float64",

# Bool is involved in latest PyTorch. XPU impl of unfold doesn't support it.
# https://github.com/intel/torch-xpu-ops/issues/887
"test_nn_unfold_xpu",
),

"nn/test_packed_sequence_xpu.py": (
Expand Down Expand Up @@ -3483,5 +3494,8 @@
"test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool1d_xpu_float",
"test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool2d_xpu_bfloat16",
"test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool2d_xpu_float",
"test_meta_outplace_nn_functional_unfold_xpu_bool",
"test_dispatch_meta_outplace_nn_functional_unfold_xpu_bool",
"test_dispatch_symbolic_meta_outplace_nn_functional_unfold_xpu_bool",
),
}

0 comments on commit 987ad12

Please sign in to comment.