Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Compatible] Update Pinned PyTorch Nightly 1.13.0.dev20220801 #39

Open
wants to merge 4 commits into
base: pt-nightly-compatible
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions raf_native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,6 @@ supported:
- _index_put_impl_
- inverse
- isnan
- kl_div
- kl_div_backward
- kthvalue
- log
- log10
Expand Down
42 changes: 12 additions & 30 deletions ratex/csrc/aten_raf_type.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1437,21 +1437,6 @@ at::Tensor LazyNativeFunctions::isnan(const at::Tensor& self) {
return bridge::AtenFromLtcTensor(LazyTensor::isnan(bridge::raf_backend::GetLtcTensor(self)));
}

at::Tensor LazyNativeFunctions::kl_div(const at::Tensor& self, const at::Tensor& target,
int64_t reduction, bool log_target) {
LTC_FN_COUNTER("raf::");
return at::native::kl_div(self, target, reduction, log_target);
}

at::Tensor LazyNativeFunctions::kl_div_backward(const at::Tensor& grad_output,
const at::Tensor& self, const at::Tensor& target,
int64_t reduction, bool log_target) {
LTC_FN_COUNTER("raf::");
return bridge::AtenFromLtcTensor(LazyTensor::kl_div_backward(
bridge::raf_backend::GetLtcTensor(grad_output), bridge::raf_backend::GetLtcTensor(self),
bridge::raf_backend::GetLtcTensor(target), reduction, log_target));
}

std::tuple<at::Tensor, at::Tensor> LazyNativeFunctions::kthvalue(const at::Tensor& self, int64_t k,
int64_t dim, bool keepdim) {
LTC_FN_COUNTER("raf::");
Expand All @@ -1460,7 +1445,6 @@ std::tuple<at::Tensor, at::Tensor> LazyNativeFunctions::kthvalue(const at::Tenso
bridge::AtenFromLtcTensor(std::get<1>(results)));
}


at::Tensor LazyNativeFunctions::le(const at::Tensor& self, const at::Scalar& other) {
LTC_FN_COUNTER("raf::");
return bridge::AtenFromLtcTensor(LazyTensor::le(bridge::raf_backend::GetLtcTensor(self), other));
Expand Down Expand Up @@ -1715,7 +1699,6 @@ at::Tensor LazyNativeFunctions::max_unpool2d(const at::Tensor& self, const at::T
lazy_tensors::util::ToVector<int64_t>(output_size)));
}


at::Tensor LazyNativeFunctions::max_unpool3d(const at::Tensor& self, const at::Tensor& indices,
at::IntArrayRef output_size, at::IntArrayRef stride,
at::IntArrayRef padding) {
Expand All @@ -1725,7 +1708,6 @@ at::Tensor LazyNativeFunctions::max_unpool3d(const at::Tensor& self, const at::T
lazy_tensors::util::ToVector<int64_t>(output_size)));
}


at::Tensor LazyNativeFunctions::mean(const at::Tensor& self, c10::optional<at::ScalarType> dtype) {
LTC_FN_COUNTER("raf::");
LazyTensor self_tensor = bridge::raf_backend::GetLtcTensor(self);
Expand All @@ -1734,12 +1716,12 @@ at::Tensor LazyNativeFunctions::mean(const at::Tensor& self, c10::optional<at::S
/*keep_reduced_dimensions=*/false, dtype));
}

at::Tensor LazyNativeFunctions::mean(const at::Tensor& self, at::IntArrayRef dim, bool keepdim,
c10::optional<at::ScalarType> dtype) {
at::Tensor LazyNativeFunctions::mean(const at::Tensor& self, at::OptionalIntArrayRef dim,
bool keepdim, c10::optional<at::ScalarType> dtype) {
LTC_FN_COUNTER("raf::");
return bridge::AtenFromLtcTensor(LazyTensor::mean(bridge::raf_backend::GetLtcTensor(self),
lazy_tensors::util::ToVector<int64_t>(dim),
/*keep_reduced_dimensions=*/keepdim, dtype));
return bridge::AtenFromLtcTensor(LazyTensor::mean(
bridge::raf_backend::GetLtcTensor(self), lazy_tensors::util::ToVector<int64_t>(dim.value()),
/*keep_reduced_dimensions=*/keepdim, dtype));
}

at::Tensor LazyNativeFunctions::min(const at::Tensor& self) {
Expand Down Expand Up @@ -2547,12 +2529,12 @@ at::Tensor LazyNativeFunctions::sum(const at::Tensor& self, c10::optional<at::Sc
/*keep_reduced_dimensions=*/false, dtype));
}

at::Tensor LazyNativeFunctions::sum(const at::Tensor& self, at::IntArrayRef dim, bool keepdim,
c10::optional<at::ScalarType> dtype) {
at::Tensor LazyNativeFunctions::sum(const at::Tensor& self, at::OptionalIntArrayRef dim,
bool keepdim, c10::optional<at::ScalarType> dtype) {
LTC_FN_COUNTER("raf::");
return bridge::AtenFromLtcTensor(LazyTensor::sum(bridge::raf_backend::GetLtcTensor(self),
lazy_tensors::util::ToVector<int64_t>(dim),
keepdim, dtype));
return bridge::AtenFromLtcTensor(
LazyTensor::sum(bridge::raf_backend::GetLtcTensor(self),
lazy_tensors::util::ToVector<int64_t>(dim.value()), keepdim, dtype));
}

std::tuple<at::Tensor, at::Tensor, at::Tensor> LazyNativeFunctions::svd(const at::Tensor& self,
Expand Down Expand Up @@ -2762,8 +2744,8 @@ at::Tensor LazyNativeFunctions::upsample_nearest2d(
}

at::Tensor LazyNativeFunctions::upsample_nearest2d_backward(
const at::Tensor& grad_output, at::OptionalIntArrayRef output_size,
at::IntArrayRef input_size, c10::optional<at::ArrayRef<double>> scale_factors) {
const at::Tensor& grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size,
c10::optional<at::ArrayRef<double>> scale_factors) {
LTC_FN_COUNTER("raf::");
LazyTensor grad_output_tensor = bridge::raf_backend::GetLtcTensor(grad_output);
if (grad_output_tensor.GetDevice().hw_type != DeviceType::TPU) {
Expand Down
3 changes: 2 additions & 1 deletion ratex/jit/script.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,8 @@ def wrapper(*args, **kwargs):
# TODO: use torch.jit.script
assert len(args) == 1, f"Only support single input for now, but got {len(args)}"
assert not kwargs, "Do not support kwargs yet"
shape_n_dtype = (list(args[0].shape), str(args[0].dtype).rsplit(".", maxsplit=1)[-1])
arg0_meta = args[0].to("meta")
shape_n_dtype = (list(arg0_meta.shape), str(arg0_meta.dtype).rsplit(".", maxsplit=1)[-1])
cache_key = (hash_torch_module(module), str(shape_n_dtype))
if cache_key in JIT_CACHE:
# Cache hit.
Expand Down
14 changes: 7 additions & 7 deletions ratex/lazy_tensors/computation_client/debug_macros.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,13 @@

#define LTC_ERROR() LOG(ERROR)
#define LTC_CHECK(c) CHECK(c)
#define LTC_CHECK_OK(c) CHECK(c.ok())
#define LTC_CHECK_EQ(a, b) CHECK_EQ(a, b)
#define LTC_CHECK_NE(a, b) CHECK_NE(a, b)
#define LTC_CHECK_LE(a, b) CHECK_LE(a, b)
#define LTC_CHECK_GE(a, b) CHECK_GE(a, b)
#define LTC_CHECK_LT(a, b) CHECK_LT(a, b)
#define LTC_CHECK_GT(a, b) CHECK_GT(a, b)
#define LTC_CHECK_OK(c) TORCH_CHECK(c.ok())
#define LTC_CHECK_EQ(a, b) TORCH_CHECK_EQ(a, b)
#define LTC_CHECK_NE(a, b) TORCH_CHECK_NE(a, b)
#define LTC_CHECK_LE(a, b) TORCH_CHECK_LE(a, b)
#define LTC_CHECK_GE(a, b) TORCH_CHECK_GE(a, b)
#define LTC_CHECK_LT(a, b) TORCH_CHECK_LT(a, b)
#define LTC_CHECK_GT(a, b) TORCH_CHECK_GT(a, b)

template <typename T>
T ConsumeValue(lazy_tensors::StatusOr<T>&& status) {
Expand Down
2 changes: 1 addition & 1 deletion scripts/pinned_torch_nightly.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.13.0.dev20220629
1.13.0.dev20220801