Skip to content

Commit 43dea76

Browse files
eqypytorchmergebot
authored andcommitted
[CUDA] Switch to at::empty_like in adaptive_avg_pool3d_backward_cuda (pytorch#100202)
Same as pytorch#100138, `gradInput` is already zero'd out. Also clean up includes after pytorch#100138. CC @ngimel @ptrblck Pull Request resolved: pytorch#100202 Approved by: https://github.com/ngimel
1 parent 380ccfd commit 43dea76

File tree

2 files changed

+2
-3
lines changed

2 files changed

+2
-3
lines changed

Diff for: aten/src/ATen/native/cuda/AdaptiveAveragePooling3d.cu

+2-2
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
#include <ATen/ops/adaptive_avg_pool3d_backward_native.h>
1616
#include <ATen/ops/adaptive_avg_pool3d_native.h>
1717
#include <ATen/ops/empty.h>
18-
#include <ATen/ops/zeros_like.h>
18+
#include <ATen/ops/empty_like.h>
1919
#endif
2020

2121
#include <ATen/native/AdaptivePooling.h>
@@ -537,7 +537,7 @@ Tensor adaptive_avg_pool3d_backward_cuda(
537537
// See Note [Writing Nondeterministic Operations]
538538
// Nondeterministic because of atomicAdd usage
539539
globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_cuda");
540-
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
540+
auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
541541
adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input);
542542
return gradInput;
543543
}

Diff for: aten/src/ATen/native/cuda/DilatedMaxPool3d.cu

-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
#include <ATen/ops/empty.h>
2222
#include <ATen/ops/max_pool3d_with_indices_native.h>
2323
#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
24-
#include <ATen/ops/zeros_like.h>
2524
#endif
2625

2726
namespace at::native {

0 commit comments

Comments
 (0)