diff --git a/src/ATen/native/xpu/UnaryOps.cpp b/src/ATen/native/xpu/UnaryOps.cpp index 895c641ac..f1bb12ba3 100644 --- a/src/ATen/native/xpu/UnaryOps.cpp +++ b/src/ATen/native/xpu/UnaryOps.cpp @@ -1028,11 +1028,12 @@ Tensor& XPUNativeFunctions::ceil_(Tensor& self) { } Tensor& XPUNativeFunctions::ceil_out(const Tensor& self, Tensor& out) { + auto iter = ceil_meta(self, out); + if (c10::isIntegralType(self.scalar_type(), /*includeBool=*/false)) { out.copy_(self); return out; } - auto iter = ceil_meta(self, out); native::xpu::ceil_kernel(iter); return out; } @@ -1134,11 +1135,12 @@ Tensor& XPUNativeFunctions::floor_(Tensor& self) { } Tensor& XPUNativeFunctions::floor_out(const Tensor& self, Tensor& out) { + auto iter = meta_floor(self, out); if (c10::isIntegralType(self.scalar_type(), /*includeBool=*/false)) { out.copy_(self); return out; } - auto iter = meta_floor(self, out); + native::xpu::floor_kernel(iter); return out; } diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index 6b5df037b..9c7c297c4 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -673,6 +673,13 @@ # https://github.com/intel/torch-xpu-ops/issues/623 "test_dtypes_nextafter_xpu", + # Bool is involved in latest PyTorch. XPU impl of unfold doesn't support it. + # https://github.com/intel/torch-xpu-ops/issues/887 + "test_dtypes_nn_functional_unfold_xpu", + "test_non_standard_bool_values_nn_functional_unfold_xpu_bool", + "test_compare_cpu_nn_functional_unfold_xpu_bool", + "test_non_standard_bool_values_nn_functional_unfold_xpu_bool", + # AssertionError: The supported dtypes for argsort on device type xpu are incorrect! # The following dtypes worked in forward but are not listed by the OpInfo: {torch.bool}. # CUDA does not have torch.bool support on argsort. @@ -2105,6 +2112,10 @@ "test_reduction_all_prod_layout2_xpu_float64", "test_reduction_all_sum_layout2_xpu_float16", "test_reduction_all_sum_layout2_xpu_float64", + + # Bool is involved in latest PyTorch. XPU impl of unfold doesn't support it. + # https://github.com/intel/torch-xpu-ops/issues/887 + "test_nn_unfold_xpu", ), "nn/test_packed_sequence_xpu.py": ( @@ -3483,5 +3494,8 @@ "test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool1d_xpu_float", "test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool2d_xpu_bfloat16", "test_dispatch_symbolic_meta_outplace_nn_functional_adaptive_max_pool2d_xpu_float", + "test_meta_outplace_nn_functional_unfold_xpu_bool", + "test_dispatch_meta_outplace_nn_functional_unfold_xpu_bool", + "test_dispatch_symbolic_meta_outplace_nn_functional_unfold_xpu_bool", ), }