Skip to content

Commit

Permalink
Merge branch 'main' into unexpected_xfail
Browse files Browse the repository at this point in the history
  • Loading branch information
yuchengliu1 authored Aug 5, 2024
2 parents 84bcf77 + 2f75c47 commit 62bff24
Show file tree
Hide file tree
Showing 27 changed files with 813 additions and 142 deletions.
2 changes: 1 addition & 1 deletion .github/ci_commit_pins/torchbench.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
bb5294090a397b15fadf10cd2172f9bd9c461f9a
03cde49eba0580ed17f9ae2250832fd8af4ed756
1 change: 1 addition & 0 deletions .github/workflows/_linux_ut.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ on:
default: 'linux.idc.xpu'
description: Runner label

permissions: read-all

jobs:
Torch-XPU-UT-Tests:
Expand Down
34 changes: 32 additions & 2 deletions cmake/BuildFlags.cmake
Original file line number Diff line number Diff line change
@@ -1,5 +1,27 @@
# Setup building flags for SYCL device and host codes.

function(CHECK_SYCL_FLAG FLAG VARIABLE_NAME)
set(TEMP_DIR "${CMAKE_BINARY_DIR}/temp")
file(MAKE_DIRECTORY ${TEMP_DIR})
set(TEST_SRC_FILE "${TEMP_DIR}/check_options.cpp")
set(TEST_EXE_FILE "${TEMP_DIR}/check_options.out")
file(WRITE ${TEST_SRC_FILE} "#include <iostream>\nint main() { std::cout << \"Checking compiler options ...\" << std::endl; return 0; }\n")
execute_process(
COMMAND ${SYCL_COMPILER} -fsycl ${TEST_SRC_FILE} -o ${TEST_EXE_FILE} ${FLAG}
WORKING_DIRECTORY ${TEMP_DIR}
OUTPUT_VARIABLE output
ERROR_VARIABLE output
RESULT_VARIABLE result
TIMEOUT 60
)
if(result EQUAL 0)
set(${VARIABLE_NAME} TRUE PARENT_SCOPE)
else()
set(${VARIABLE_NAME} FALSE PARENT_SCOPE)
endif()
file(REMOVE_RECURSE ${TEMP_DIR})
endfunction()

# Support GCC on Linux and MSVC on Windows at the moment.
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
# # -- Host flags (SYCL_CXX_FLAGS)
Expand Down Expand Up @@ -64,8 +86,16 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "MSVC"
set(SYCL_KERNEL_OPTIONS ${SYCL_KERNEL_OPTIONS} -D_GLIBCXX_USE_CXX11_ABI=${GLIBCXX_USE_CXX11_ABI})
endif()

set(SYCL_KERNEL_OPTIONS ${SYCL_KERNEL_OPTIONS} -fsycl-fp64-conv-emu)

CHECK_SYCL_FLAG("-fsycl-fp64-conv-emu" SUPPORTS_FP64_CONV_EMU)
if(SUPPORTS_FP64_CONV_EMU)
set(SYCL_KERNEL_OPTIONS ${SYCL_KERNEL_OPTIONS} -fsycl-fp64-conv-emu)
else()
message(WARNING "The compiler does not support the '-fsycl-fp64-conv-emu' flag, \
will disable it. On some platforms that don't support FP64, \
running operations with the FP64 datatype will raise a Runtime error: Required aspect fp64 is not supported on the device \
or a Native API failed error.")
endif()

set(SYCL_FLAGS ${SYCL_FLAGS} ${SYCL_KERNEL_OPTIONS})

set(TORCH_XPU_OPS_FLAGS ${SYCL_HOST_FLAGS})
Expand Down
6 changes: 1 addition & 5 deletions src/ATen/native/xpu/GroupNorm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,6 @@ std::tuple<Tensor, Tensor, Tensor> XPUNativeFunctions::native_group_norm(
// repeated check so expanded weights can call native_group_norm directly but
// save mean and variance from forward
check_group_norm_inputs(X, gamma, beta, C, group);
auto memory_format = X.device().is_cpu() ? X.suggest_memory_format()
: at::MemoryFormat::Contiguous;

TORCH_CHECK(X.is_contiguous(memory_format));

bool mixed_type = at::native::is_mixed_type(X, gamma, beta);
if (mixed_type) {
Expand All @@ -76,7 +72,7 @@ std::tuple<Tensor, Tensor, Tensor> XPUNativeFunctions::native_group_norm(
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
memory_format);
MemoryFormat::Contiguous);
const auto dtype = at::native::param_scalar_type(X, mixed_type);
Tensor mean = at::empty({N, group}, X.options().dtype(dtype));
Tensor rstd = at::empty({N, group}, X.options().dtype(dtype));
Expand Down
3 changes: 2 additions & 1 deletion src/ATen/native/xpu/ReduceOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,8 @@ inline TensorIterator get_allany_iter(
const Tensor& result,
OptionalIntArrayRef dims,
bool keepdim) {
return meta::make_reduction(self, result, dims, keepdim, self.scalar_type());
return meta::make_reduction_from_out_ty(
self, result, dims, keepdim, result.scalar_type());
}

template <int identity, typename Stub>
Expand Down
18 changes: 10 additions & 8 deletions src/ATen/native/xpu/sycl/BatchNormKernels.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,8 @@ static inline void group_reduce(
// uint32_t SIMD = sg.get_local_range()[0];
#pragma unroll
for (int i = 1; i < SIMD; i <<= 1) {
val = bin_op(val, static_cast<accscalar_t>(sg.shuffle_down(val, i)));
val = bin_op(
val, static_cast<accscalar_t>(sycl::shift_group_left(sg, val, i)));
}
if (sub_group_num == 1) {
if (lane_id == 0) {
Expand Down Expand Up @@ -294,7 +295,8 @@ static inline void group_reduce(
}
#pragma unroll
for (int i = 1; i < SIMD; i <<= 1) {
val = bin_op(val, static_cast<accscalar_t>(sg.shuffle_down(val, i)));
val = bin_op(
val, static_cast<accscalar_t>(sycl::shift_group_left(sg, val, i)));
if (i >= ((sub_group_num + 1) >> 1))
break;
}
Expand Down Expand Up @@ -450,10 +452,10 @@ struct BatchNormCollectStatisticsKernelFunctor
// one value per subgroup
#pragma unroll
for (int i = 1; i < SIMD; i <<= 1) {
stat_accscalar_t o_avg = sg.shuffle_xor(avg, i);
int o_n = sg.shuffle_xor(n, i);
stat_accscalar_t o_avg = sycl::permute_group_by_xor(sg, avg, i);
int o_n = sycl::permute_group_by_xor(sg, n, i);
stat_accscalar_t factor = 1.0 / fmaxf(1.0, n + o_n);
var_n += sg.shuffle_xor(var_n, i) +
var_n += sycl::permute_group_by_xor(sg, var_n, i) +
(avg - o_avg) * (avg - o_avg) * n * o_n * factor;
avg = (n * avg + o_n * o_avg) * factor;
n += o_n;
Expand Down Expand Up @@ -481,10 +483,10 @@ struct BatchNormCollectStatisticsKernelFunctor
}
#pragma unroll
for (int i = 1; i < SIMD; i <<= 1) {
stat_accscalar_t o_avg = sg.shuffle_xor(avg, i);
int o_n = sg.shuffle_xor(n, i);
stat_accscalar_t o_avg = sycl::permute_group_by_xor(sg, avg, i);
int o_n = sycl::permute_group_by_xor(sg, n, i);
stat_accscalar_t factor = 1.0f / fmaxf(1.0f, n + o_n);
var_n += sg.shuffle_xor(var_n, i) +
var_n += sycl::permute_group_by_xor(sg, var_n, i) +
(avg - o_avg) * (avg - o_avg) * n * o_n * factor;
avg = (n * avg + o_n * o_avg) * factor;
n += o_n;
Expand Down
18 changes: 18 additions & 0 deletions src/ATen/native/xpu/sycl/CumprodKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,22 @@ void launch_cumprod_kernel(
});
}

static c10::MaybeOwned<Tensor> contiguous_out_arg(const Tensor& tensor) {
if (tensor.is_contiguous()) {
return c10::MaybeOwned<Tensor>::borrowed(tensor);
}
return c10::MaybeOwned<Tensor>::owned(
at::empty(tensor.sizes(), tensor.options()));
}

void cumprod_kernel(const Tensor& result, const Tensor& self, int64_t dim) {
auto result_ = contiguous_out_arg(result);

launch_cumprod_kernel(*result_, self, dim);

if (!result.is_same(*result_)) {
result.copy_(*result_);
}
}

} // namespace at::native::xpu
18 changes: 18 additions & 0 deletions src/ATen/native/xpu/sycl/CumsumKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,22 @@ void launch_cumsum_kernel(
});
}

static c10::MaybeOwned<Tensor> contiguous_out_arg(const Tensor& tensor) {
if (tensor.is_contiguous()) {
return c10::MaybeOwned<Tensor>::borrowed(tensor);
}
return c10::MaybeOwned<Tensor>::owned(
at::empty(tensor.sizes(), tensor.options()));
}

void cumsum_kernel(const Tensor& result, const Tensor& self, int64_t dim) {
auto result_ = contiguous_out_arg(result);

launch_cumsum_kernel(*result_, self, dim);

if (!result.is_same(*result_)) {
result.copy_(*result_);
}
}

} // namespace at::native::xpu
2 changes: 1 addition & 1 deletion src/ATen/native/xpu/sycl/DistanceKernels.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ scalar_t subgroup_reduce_agg_without_broadcast_impl(

#pragma unroll
for (int offset = (SG_SIZE >> 1); offset > 0; offset >>= 1) {
F::agg(value, sg.shuffle_down(value, offset));
F::agg(value, sycl::shift_group_left(sg, value, offset));
}
return value;
}
Expand Down
20 changes: 10 additions & 10 deletions src/ATen/native/xpu/sycl/GroupNormKernels.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/CanUse32BitIndexMath.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/xpu/sycl/GroupReduceUtils.h>
#include <ATen/native/xpu/sycl/Loops.h>
#include <ATen/native/xpu/sycl/SharedReduceOps.h>
#include <comm/MemoryFormat.h>
#include <comm/XPUMathCompat.h>

Expand All @@ -18,23 +18,23 @@ template <
typename index_t,
typename res_t>
struct WelfordOpsXPU
: public at::native::WelfordOps<scalar_t, acc_scalar_t, index_t, res_t> {
: public WelfordOps<scalar_t, acc_scalar_t, index_t, res_t> {
sycl::nd_item<1>& item;

public:
using acc_t = typename at::native::
WelfordOps<scalar_t, acc_scalar_t, index_t, res_t>::acc_t;
using acc_t =
typename WelfordOps<scalar_t, acc_scalar_t, index_t, res_t>::acc_t;
inline acc_t shfl_down(acc_t acc, int offset) const {
auto sg = item.get_sub_group();
return {
sg.shuffle_down(acc.mean, offset),
sg.shuffle_down(acc.m2, offset),
sg.shuffle_down(acc.n, offset),
sg.shuffle_down(acc.nf, offset)};
sycl::shift_group_left(sg, acc.mean, offset),
sycl::shift_group_left(sg, acc.m2, offset),
sycl::shift_group_left(sg, acc.n, offset),
sycl::shift_group_left(sg, acc.nf, offset)};
}

WelfordOpsXPU(acc_scalar_t correction, bool take_sqrt, sycl::nd_item<1>& item)
: at::native::WelfordOps<scalar_t, acc_scalar_t, index_t, res_t>(
: WelfordOps<scalar_t, acc_scalar_t, index_t, res_t>(
correction,
take_sqrt),
item(item) {}
Expand All @@ -43,7 +43,7 @@ struct WelfordOpsXPU
template <typename T, int SIMD>
struct GNRowwiseMomentsFunctor : public __SYCL_KER_CONFIG_CONVENTION__ {
using T_ACC = acc_type_device<T, kXPU>;
using WelfordType = at::native::WelfordData<T_ACC, int64_t>;
using WelfordType = WelfordData<T_ACC, int64_t>;
using WelfordOp =
WelfordOpsXPU<T_ACC, T_ACC, int64_t, std::pair<T_ACC, T_ACC>>;

Expand Down
12 changes: 8 additions & 4 deletions src/ATen/native/xpu/sycl/Norm.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,10 @@ static inline void norm_group_reduce(
// uint32_t SIMD = sg.get_local_range()[0];
#pragma unroll
for (int i = 1; i < SIMD; i <<= 1) {
sum1 = bin_op(sum1, static_cast<accscalar_t>(sg.shuffle_down(sum1, i)));
sum2 = bin_op(sum2, static_cast<accscalar_t>(sg.shuffle_down(sum2, i)));
sum1 = bin_op(
sum1, static_cast<accscalar_t>(sycl::shift_group_left(sg, sum1, i)));
sum2 = bin_op(
sum2, static_cast<accscalar_t>(sycl::shift_group_left(sg, sum2, i)));
}
if (sub_group_num == 1) {
sum1 = sycl::group_broadcast(sg, sum1, 0);
Expand Down Expand Up @@ -73,8 +75,10 @@ static inline void norm_group_reduce(
}
#pragma unroll
for (int i = 1; i < SIMD; i <<= 1) {
sum1 = bin_op(sum1, static_cast<accscalar_t>(sg.shuffle_down(sum1, i)));
sum2 = bin_op(sum2, static_cast<accscalar_t>(sg.shuffle_down(sum2, i)));
sum1 = bin_op(
sum1, static_cast<accscalar_t>(sycl::shift_group_left(sg, sum1, i)));
sum2 = bin_op(
sum2, static_cast<accscalar_t>(sycl::shift_group_left(sg, sum2, i)));
if (i >= ((sub_group_num + 1) >> 1))
break;
}
Expand Down
Loading

0 comments on commit 62bff24

Please sign in to comment.