diff --git a/src/ATen/native/xpu/PinnedMemoryAllocator.cpp b/src/ATen/native/xpu/PinnedMemoryAllocator.cpp deleted file mode 100644 index 8d04d93e1..000000000 --- a/src/ATen/native/xpu/PinnedMemoryAllocator.cpp +++ /dev/null @@ -1,39 +0,0 @@ -#include -#include -#include -#include - -namespace at { - -// Note: The user must call is_pinned(device='xpu') to explicitly call here. -bool XPUNativeFunctions::is_pinned( - const Tensor& self, - c10::optional device) { - TORCH_INTERNAL_ASSERT_DEBUG_ONLY( - !device.has_value() || device->type() == c10::DeviceType::XPU); - - return at::detail::getXPUHooks().isPinnedPtr(self.storage().data()); -} - -// Note: The user must call tensor.pin_memory(device='xpu') to explicitly call -// here. -Tensor XPUNativeFunctions::_pin_memory( - const Tensor& self, - c10::optional device) { - TORCH_INTERNAL_ASSERT_DEBUG_ONLY( - !device.has_value() || device->type() == c10::DeviceType::XPU); - - auto* allocator = at::xpu::getPinnedMemoryAllocator(); - auto storage = c10::Storage( - c10::Storage::use_byte_size_t(), - at::detail::computeStorageNbytes( - self.sizes(), self.strides(), self.dtype().itemsize()), - allocator, - /*resizable=*/false); - auto tensor = at::cpu::empty({0}, self.options()) - .set_(storage, 0, self.sizes(), self.strides()); - tensor.copy_(self); - return tensor; -} - -} // namespace at diff --git a/test/xpu/test_torch_xpu.py b/test/xpu/test_torch_xpu.py index afb4b28a8..b82a8ec67 100644 --- a/test/xpu/test_torch_xpu.py +++ b/test/xpu/test_torch_xpu.py @@ -8572,31 +8572,17 @@ def test_new(self) -> None: # TypeError would be better self.assertRaises(RuntimeError, lambda: x.new(z.storage())) - @unittest.skipIf(PYTORCH_CUDA_MEMCHECK, "is_pinned uses failure to detect pointer property") def test_pin_memory(self): x = torch.randn(3, 5) self.assertFalse(x.is_pinned()) - if not torch.cuda.is_available() or not torch.xpu.is_available(): - self.assertRaises(RuntimeError, lambda: x.pin_memory()) - else: - if torch.xpu.is_available(): - device = 'xpu' - self.assertFalse(x.is_pinned(device)) - pinned = x.pin_memory(device) - self.assertTrue(pinned.is_pinned(device)) - self.assertEqual(pinned, x) - self.assertNotEqual(pinned.data_ptr(), x.data_ptr()) - # test that pin_memory on already pinned tensor has no effect - self.assertIs(pinned, pinned.pin_memory(device)) - self.assertEqual(pinned.data_ptr(), pinned.pin_memory(device).data_ptr()) - else: - pinned = x.pin_memory() - self.assertTrue(pinned.is_pinned()) - self.assertEqual(pinned, x) - self.assertNotEqual(pinned.data_ptr(), x.data_ptr()) - # test that pin_memory on already pinned tensor has no effect - self.assertIs(pinned, pinned.pin_memory()) - self.assertEqual(pinned.data_ptr(), pinned.pin_memory().data_ptr()) + if torch.cuda.is_available() or torch.xpu.is_available(): + pinned = x.pin_memory() + self.assertTrue(pinned.is_pinned()) + self.assertEqual(pinned, x) + self.assertNotEqual(pinned.data_ptr(), x.data_ptr()) + # test that pin_memory on already pinned tensor has no effect + self.assertIs(pinned, pinned.pin_memory()) + self.assertEqual(pinned.data_ptr(), pinned.pin_memory().data_ptr()) diff --git a/yaml/xpu_functions.yaml b/yaml/xpu_functions.yaml index 54c5c31ff..cd6f80b84 100644 --- a/yaml/xpu_functions.yaml +++ b/yaml/xpu_functions.yaml @@ -539,8 +539,6 @@ supported: - sgn.out - sgn_ - _cdist_forward - - _pin_memory - - is_pinned - is_set_to - bucketize.Tensor - bucketize.Tensor_out