Skip to content

Commit

Permalink
Add Aten::adaptive_avg_pool3d forward/backward (#851)
Browse files Browse the repository at this point in the history
- [x] Aten::adaptive_avg_pool3d forward
- [x] Aten::adaptive_avg_pool3d backward

---------

Co-authored-by: Yutao Xu <[email protected]>
  • Loading branch information
chunhuanMeng and xytintel authored Oct 22, 2024
1 parent 4034556 commit 88b029e
Show file tree
Hide file tree
Showing 8 changed files with 766 additions and 102 deletions.
101 changes: 1 addition & 100 deletions src/ATen/native/xpu/AdaptiveAveragePooling2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,81 +13,6 @@
#include <ATen/native/xpu/sycl/AdaptiveAveragePooling2dKernels.h>

namespace at {

namespace {

static c10::SymInt _safe_size(c10::SymIntArrayRef sizes, c10::IntArrayRef dim) {
c10::SymInt size = 1;
if (sizes.empty()) {
return 1;
}
for (auto d : dim) {
d = at::maybe_wrap_dim(d, static_cast<int64_t>(sizes.size()));
size *= sizes[d];
}
return size;
}

Tensor unsqueeze_multiple(
const Tensor& t,
OptionalIntArrayRef opt_dim,
size_t n_dims) {
if (opt_dim.has_value()) {
IntArrayRef dim = opt_dim.value();
auto dim_size = dim.size();
// Optimisation for two common cases
if (dim_size == 0) {
return t;
} else if (dim_size == 1) {
return t.unsqueeze(dim[0]);
}
}
auto dims_to_unsqueeze = at::dim_list_to_bitset(opt_dim, n_dims);
Tensor res = t;
for (const auto i : c10::irange(n_dims)) {
if (dims_to_unsqueeze[i]) {
res = res.unsqueeze(static_cast<int64_t>(i));
}
}
return res;
}

Tensor sum_backward(
const Tensor& grad,
c10::SymIntArrayRef sizes,
OptionalIntArrayRef opt_dims,
bool keepdim) {
if (!keepdim && !sizes.empty()) {
if (opt_dims.has_value() && !opt_dims.value().empty()) {
return unsqueeze_multiple(grad, opt_dims, sizes.size())
.expand_symint(sizes);
}
}
return grad.expand_symint(sizes);
}

Tensor mean_backward(
const Tensor& grad,
const Tensor& input,
c10::SymIntArrayRef shape,
OptionalIntArrayRef opt_dim,
c10::SymInt numel,
bool keepdim) {
bool is_all_reduce = !opt_dim.has_value() || opt_dim.value().empty();
auto n =
is_all_reduce ? std::move(numel) : _safe_size(shape, opt_dim.value());

Tensor grad_input =
sum_backward(grad, shape, opt_dim, keepdim) / std::move(n);

if (input.suggest_memory_format() == at::MemoryFormat::ChannelsLast) {
grad_input = grad_input.contiguous(input.suggest_memory_format());
}

return grad_input;
}
} // namespace

namespace native {
Tensor adaptive_avg_pool2d_backward_xpu(
const Tensor& grad_output,
Expand All @@ -104,16 +29,6 @@ Tensor adaptive_avg_pool2d_backward_xpu(
(input.ndimension() == 3 || input.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");

if (grad_output.size(-1) == 1 && grad_output.size(-2) == 1) {
return mean_backward(
grad_output,
input,
input.sym_sizes().vec(),
{-1, -2},
input.sym_numel(),
true);
}

globalContext().alertNotDeterministic("_adaptive_avg_pool2d_backward");

Tensor grad_input;
Expand Down Expand Up @@ -153,21 +68,7 @@ Tensor& adaptive_avg_pool2d_out_xpu(
"empty");
}

if (output_size[0] == 1 && output_size[1] == 1) {
if (output.numel() == 0) {
output = input.mean({-1, -2}, /* keepdim = */ true);
} else {
at::mean_out(output, input, {-1, -2}, true, std::nullopt);
}
if (input.suggest_memory_format() == at::MemoryFormat::ChannelsLast) {
// assert ndim == 4, since ndim = 3 doesn't give channels_last
const auto n = input.sym_size(0);
const auto c = input.sym_size(1);
output.as_strided__symint({n, c, 1, 1}, {c, 1, c, c});
}
} else {
xpu::adaptive_avg_pool2d_kernel(output, input, output_size);
}
xpu::adaptive_avg_pool2d_kernel(output, input, output_size);
return output;
}

Expand Down
46 changes: 46 additions & 0 deletions src/ATen/native/xpu/AdaptiveAveragePooling3d.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
#include <ATen/core/Tensor.h>
#include <ATen/native/AdaptivePooling.h>
#include <ATen/native/xpu/sycl/AdaptiveAveragePooling3dKernels.h>

#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#include <xpu/ATen/ops/adaptive_avg_pool3d_backward_native.h>
#include <xpu/ATen/ops/adaptive_avg_pool3d_native.h>

namespace at::native {

Tensor& adaptive_avg_pool3d_out_xpu(
const Tensor& input,
IntArrayRef output_size,
Tensor& output) {
at::native::xpu::adaptive_avg_pool3d_kernel(output, input, output_size);
return output;
}

Tensor adaptive_avg_pool3d_xpu(const Tensor& input, IntArrayRef output_size) {
auto output = at::empty({0}, input.options());
at::native::xpu::adaptive_avg_pool3d_kernel(output, input, output_size);
return output;
}

Tensor& adaptive_avg_pool3d_backward_out_xpu(
const Tensor& gradOutput_,
const Tensor& input,
Tensor& gradInput) {
globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_xpu");
at::native::xpu::adaptive_avg_pool3d_backward_kernel(
gradInput, gradOutput_, input);
return gradInput;
}

Tensor adaptive_avg_pool3d_backward_xpu(
const Tensor& gradOutput_,
const Tensor& input) {
globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_xpu");
auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::xpu::adaptive_avg_pool3d_backward_kernel(
gradInput, gradOutput_, input);
return gradInput;
}

} // namespace at::native
2 changes: 0 additions & 2 deletions src/ATen/native/xpu/XPUFallback.template
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,6 @@ TORCH_LIBRARY_IMPL(aten, XPU, m) {
*/
TORCH_LIBRARY_IMPL(aten, XPU, m) {
std::vector<std::string> fallback_list = {
"_adaptive_avg_pool3d",
"_adaptive_avg_pool3d_backward",
"_cdist_backward",
"cholesky",
"cholesky_inverse",
Expand Down
Loading

0 comments on commit 88b029e

Please sign in to comment.