Skip to content

Commit

Permalink
Remove unnecessary operators registration (#689)
Browse files Browse the repository at this point in the history
Explicit registration of scalar version operators leads to bypassing
device agnostic code path, which furtherly lead to miss some necessary
redispatching (`ZeroTensor`) for XPU backend and segmentation fault in
XPU SYCL kernel due to zero pointer. Remove them to align with other
backends.

Signed-off-by: majing <[email protected]>
  • Loading branch information
majing921201 authored Aug 6, 2024
1 parent e6dee02 commit 718bc42
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 70 deletions.
61 changes: 0 additions & 61 deletions src/ATen/native/xpu/BinaryOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,31 +49,6 @@ Tensor& XPUNativeFunctions::add_out(
return out;
}

Tensor XPUNativeFunctions::add(
const Tensor& self,
const Scalar& other,
const Scalar& alpha) {
auto wrapper = native::wrapped_scalar_tensor(other);
return XPUNativeFunctions::add(self, wrapper, alpha);
}

Tensor& XPUNativeFunctions::add_(
Tensor& self,
const Scalar& other,
const Scalar& alpha) {
auto wrapper = native::wrapped_scalar_tensor(other);
return XPUNativeFunctions::add_(self, wrapper, alpha);
}

Tensor& XPUNativeFunctions::add_out(
const Tensor& self,
const Scalar& other,
const Scalar& alpha,
Tensor& out) {
auto wrapper = native::wrapped_scalar_tensor(other);
return XPUNativeFunctions::add_out(self, wrapper, alpha, out);
}

Tensor XPUNativeFunctions::sub(
const Tensor& self,
const Tensor& other,
Expand Down Expand Up @@ -131,24 +106,6 @@ Tensor& XPUNativeFunctions::mul_out(
return out;
}

Tensor XPUNativeFunctions::mul(const Tensor& self, const Scalar& other) {
auto wrapper = native::wrapped_scalar_tensor(other);
return XPUNativeFunctions::mul(self, wrapper);
}

Tensor& XPUNativeFunctions::mul_(Tensor& self, const Scalar& other) {
auto wrapper = native::wrapped_scalar_tensor(other);
return XPUNativeFunctions::mul_(self, wrapper);
}

Tensor& XPUNativeFunctions::mul_out(
const Tensor& self,
const Scalar& other,
Tensor& out) {
auto wrapper = native::wrapped_scalar_tensor(other);
return XPUNativeFunctions::mul_out(self, wrapper, out);
}

Tensor XPUNativeFunctions::div(const Tensor& self, const Tensor& other) {
Tensor out;
TensorIterator iter;
Expand All @@ -174,24 +131,6 @@ Tensor& XPUNativeFunctions::div_out(
return out;
}

Tensor XPUNativeFunctions::div(const Tensor& self, const Scalar& other) {
auto wrapper = native::wrapped_scalar_tensor(other);
return XPUNativeFunctions::div(self, wrapper);
}

Tensor& XPUNativeFunctions::div_(Tensor& self, const Scalar& other) {
auto wrapper = native::wrapped_scalar_tensor(other);
return XPUNativeFunctions::div_(self, wrapper);
}

Tensor& XPUNativeFunctions::div_out(
const Tensor& self,
const Scalar& other,
Tensor& out) {
auto wrapper = native::wrapped_scalar_tensor(other);
return XPUNativeFunctions::div_out(self, wrapper, out);
}

static inline TensorIterator meta_func_div_Tensor_mode(
const Tensor& self,
const Tensor& other,
Expand Down
9 changes: 0 additions & 9 deletions yaml/xpu_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,6 @@ supported:
- add.Tensor
- add_.Tensor
- add.out
- add.Scalar
- add_.Scalar
- add.Scalar_out
- _adaptive_avg_pool2d_backward
- adaptive_avg_pool2d.out
- _adaptive_avg_pool2d
Expand All @@ -28,15 +25,9 @@ supported:
- mul.Tensor
- mul_.Tensor
- mul.out
- mul.Scalar
- mul_.Scalar
- mul.Scalar_out
- div.Tensor
- div_.Tensor
- div.out
- div.Scalar
- div_.Scalar
- div.Scalar_out
- div.Tensor_mode
- div_.Tensor_mode
- div.out_mode
Expand Down

0 comments on commit 718bc42

Please sign in to comment.