Skip to content

Commit

Permalink
Add early exit in sparse_async_cumsum op (pytorch#2213)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: pytorch#2213

This diff adds an early exit in sparse_async_cumsum ops. When the size(s) of input tensor are zeor(s) ops return zero tensor.

This is reworked version of previous diff D51999938 which failed some tests -
https://www.internalfb.com/intern/test/281475091287900/
and has been reverted ( D52099677 )

Reviewed By: jasonjk-park

Differential Revision: D52155358

fbshipit-source-id: 31e9e921b0c71946af6114043e66d962ee63666d
  • Loading branch information
Mark Eremeev authored and facebook-github-bot committed Jan 11, 2024
1 parent cf86516 commit 5300974
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 7 deletions.
22 changes: 18 additions & 4 deletions fbgemm_gpu/src/sparse_ops/sparse_async_cumsum.cu
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,13 @@ namespace fbgemm_gpu {

DLL_PUBLIC Tensor asynchronous_inclusive_cumsum_gpu(const Tensor& t_in) {
TENSOR_ON_CUDA_GPU(t_in);

at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(t_in.get_device());

if (t_in.numel() == 0) {
return at::empty_like(t_in);
}

size_t temp_storage_bytes = 0;
TORCH_CHECK(t_in.is_contiguous());
TORCH_CHECK(t_in.dtype() == at::kInt || t_in.dtype() == at::kLong);
Expand Down Expand Up @@ -55,9 +59,13 @@ DLL_PUBLIC Tensor asynchronous_inclusive_cumsum_gpu(const Tensor& t_in) {

DLL_PUBLIC Tensor asynchronous_exclusive_cumsum_gpu(const Tensor& t_in) {
TENSOR_ON_CUDA_GPU(t_in);

at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(t_in.get_device());

if (t_in.numel() == 0) {
return at::empty_like(t_in);
}

size_t temp_storage_bytes = 0;
TORCH_CHECK(t_in.is_contiguous());
TORCH_CHECK(t_in.dtype() == at::kInt || t_in.dtype() == at::kLong);
Expand Down Expand Up @@ -96,7 +104,6 @@ DLL_PUBLIC Tensor asynchronous_exclusive_cumsum_gpu(const Tensor& t_in) {

DLL_PUBLIC Tensor asynchronous_complete_cumsum_gpu(const Tensor& t_in) {
TENSOR_ON_CUDA_GPU(t_in);

at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(t_in.get_device());
size_t temp_storage_bytes = 0;
Expand All @@ -106,10 +113,13 @@ DLL_PUBLIC Tensor asynchronous_complete_cumsum_gpu(const Tensor& t_in) {
if (t_in.dim() == 1) {
// CUB only handles up to INT_MAX elements.
TORCH_CHECK(t_in.numel() < std::numeric_limits<int32_t>::max());

auto t_out = at::empty({t_in.numel() + 1}, t_in.options());
t_out[0].zero_();

if (t_in.numel() == 0) {
return t_out;
}

AT_DISPATCH_INDEX_TYPES(
t_in.scalar_type(), "cub_inclusive_sum_wrapper1", [&] {
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX cub::DeviceScan::InclusiveSum(
Expand Down Expand Up @@ -146,6 +156,10 @@ DLL_PUBLIC Tensor asynchronous_complete_cumsum_gpu(const Tensor& t_in) {
TORCH_CHECK(num_entries < std::numeric_limits<int32_t>::max());
auto t_out = at::zeros({num_vecs, num_entries + 1}, t_in.options());

if (t_in.numel() == 0) {
return t_out;
}

AT_DISPATCH_INDEX_TYPES(
t_in.scalar_type(), "cub_inclusive_sum_wrapper1", [&] {
AT_CUDA_CHECK(FBGEMM_GPU_CUB_NS_PREFIX cub::DeviceScan::InclusiveSum(
Expand Down
6 changes: 3 additions & 3 deletions fbgemm_gpu/test/sparse_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -595,7 +595,7 @@ def test_block_bucketize_sparse_features_long_indices(
torch.testing.assert_close(new_indices_gpu.cpu(), new_indices_cpu)

@given(
n=st.integers(min_value=1, max_value=100),
n=st.integers(min_value=0, max_value=10),
long_index=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
Expand Down Expand Up @@ -659,8 +659,8 @@ def test_cumsum(self, n: int, long_index: bool) -> None:
)

@given(
n=st.integers(min_value=1, max_value=600),
b=st.integers(min_value=1, max_value=10),
n=st.integers(min_value=0, max_value=60),
b=st.integers(min_value=0, max_value=10),
long_index=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
Expand Down

0 comments on commit 5300974

Please sign in to comment.