Skip to content

Input alignment #323

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 10 commits into
base: sycl-develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 52 additions & 19 deletions examples/sycl/00_pvc_gemm/00_pvc_gemm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,11 @@ using namespace cute;

///////////////////////////////////////////////////////////////////////////////////////////////////

// The alignment requirement in bytes on inner dimmension that will work for both PVC and BMG
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry for a slightly cumbersome late suggestion - this didn't occur to me on my first review, but I wonder if we want to make this a dedicated example? Or, at least, apply it to an example other than 00_pvc_gemm. Given this will be many developers first intro to cutlass-sycl, it would be nice not to hit them in the face with some potentially daunting alignment & padding calculations straight away.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe, but I am unsure if this is a big enough change for its own example. Also, alternatively we hit them with many inputs being unsupported. I am not sure. I would prefer to hear other opinions on this.

constexpr int AlignmentInner = 16;
// The alignment requirement in bytes on outer dimmension that will work for both PVC and BMG
constexpr int AlignmentPtr = 64;

// Command line options parsing
struct Options {

Expand Down Expand Up @@ -157,12 +162,18 @@ struct ExampleRunner {

using CollectiveEpilogue = typename Gemm::CollectiveEpilogue;
using ElementC = typename Gemm::ElementC;
using ElementD = typename Gemm::ElementD;
using ElementOutput = typename CollectiveEpilogue::ElementOutput;
using ElementCompute = typename CollectiveEpilogue::ElementCompute;
using ElementAccumulator = typename CollectiveEpilogue::ElementAccumulator;

using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape;

static constexpr int AlignElemA = AlignmentInner / sizeof(ElementA);
static constexpr int AlignElemB = AlignmentInner / sizeof(ElementB);
static constexpr int AlignElemC = AlignmentInner / sizeof(ElementB);
static constexpr int AlignElemD = AlignmentInner / sizeof(ElementD);

//
// Data members
//
Expand All @@ -186,11 +197,23 @@ struct ExampleRunner {

bool verify(const ProblemShapeType& problem_size, ElementCompute alpha, ElementCompute beta) {
auto [M, N, K, L] = problem_size;

cutlass::TensorRef ref_A(block_A.get(), LayoutA::packed({M, K}));
cutlass::TensorRef ref_B(block_B.get(), LayoutB::packed({K, N}));
cutlass::TensorRef ref_C(block_C.get(), LayoutC::packed({M, N}));
cutlass::TensorRef ref_D(block_ref_D.get(), LayoutD::packed({M, N}));

// Padded values
// The inner dimension is padded. Since this example is all RowMajor,
// we require the following:
int N_B = cute::round_up(N, AlignElemB);
int N_C = cute::round_up(N, AlignElemC);
int N_D = cute::round_up(N, AlignElemD);
int K_A = cute::round_up(K, AlignElemA);

int AlignmentOuter = AlignmentPtr / AlignmentInner;
int M_ACD = cute::round_up(M, AlignmentOuter);
int K_B = cute::round_up(K, AlignmentOuter);

cutlass::TensorRef ref_A(block_A.get(), LayoutA(K_A));
cutlass::TensorRef ref_B(block_B.get(), LayoutB(N_B));
cutlass::TensorRef ref_C(block_C.get(), LayoutC(N_C));
cutlass::TensorRef ref_D(block_ref_D.get(), LayoutD(N_D));

cutlass::reference::device::GemmComplex(
{M, N, K},
Expand All @@ -204,10 +227,10 @@ struct ExampleRunner {
ref_D,
ElementAccumulator(0),
L, // batch_count
M * K, // batch_stride_A
K * N, // batch_stride_B
M * N, // batch_stride_C
M * N // batch_stride_D
M_ACD * K_A, // batch_stride_A
K_B * N_B, // batch_stride_B
M_ACD * N_C, // batch_stride_C
M_ACD * N_D // batch_stride_D
);

// CUTLASS on SYCL uses the compatibility library syclcompat for e.g. default in-order queue
Expand All @@ -225,17 +248,27 @@ struct ExampleRunner {
auto problem_shape_MNKL = cute::append<4>(problem_size, 1);
auto [M, N, K, L] = problem_shape_MNKL;

// Padded values
int N_B = cute::round_up(N, AlignElemB);
int N_C = cute::round_up(N, AlignElemC);
int N_D = cute::round_up(N, AlignElemD);
int K_A = cute::round_up(K, AlignElemA);

int AlignmentOuter = AlignmentPtr / AlignmentInner;
int M_ACD = cute::round_up(M, AlignmentOuter);
int K_B = cute::round_up(K, AlignmentOuter);

// Complete the stride by combining static layout info (StrideA) with runtime size info (M,K,L)
stride_A = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M, K, L));
stride_B = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N, K, L));
stride_C = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, L));
stride_D = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, L));

block_A.reset(M * K * L);
block_B.reset(K * N * L);
block_C.reset(M * N * L);
block_D.reset(M * N * L);
block_ref_D.reset(M * N * L);
stride_A = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M_ACD, K_A, L));
stride_B = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N_B, K_B, L));
stride_C = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M_ACD, N_C, L));
stride_D = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M_ACD, N_D, L));

block_A.reset(M_ACD * K_A * L);
block_B.reset(K_B * N_B * L);
block_C.reset(M_ACD * N_C * L);
block_D.reset(M_ACD * N_D * L);
block_ref_D.reset(M_ACD * N_D * L);

initialize_block(block_A, seed + 2023);
initialize_block(block_B, seed + 2022);
Expand Down
28 changes: 25 additions & 3 deletions include/cutlass/gemm/kernel/xe_gemm.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -162,11 +162,33 @@ class GemmUniversal<
auto m = get<0>(args.problem_shape);
auto n = get<1>(args.problem_shape);
auto k = get<2>(args.problem_shape);
auto l = get<3>(args.problem_shape);
bool is_batch = l > 1;
// all these requirements are in bytes
constexpr int inner_alignment_requirement = 16;
constexpr int outer_alignment_requirement = 64;
constexpr int width_alignment_requirement = 4;

auto check_stride = [is_batch](auto stride, int el_size){
auto a = get<0>(stride);
auto b = get<1>(stride);
auto valid_is_unit = a == _1{} || b == _1{};
auto inner = a == _1{} ? b : a;
Comment on lines +175 to +176
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
auto valid_is_unit = a == _1{} || b == _1{};
auto inner = a == _1{} ? b : a;
auto valid_is_unit = a == 1 || b == 1;
auto inner = a == 1 ? b : a;

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why would you prefer that changed? If a check can be made at compile time, why not do it?

auto valid_inner = inner % (inner_alignment_requirement / el_size) == 0;
auto valid_outer = !is_batch || get<2>(stride) % (outer_alignment_requirement / el_size) == 0;
return valid_is_unit && valid_inner && valid_outer;
};
bool strides_valid = check_stride(args.mainloop.dA, sizeof(ElementA)) &&
check_stride(args.mainloop.dB, sizeof(ElementB)) &&
check_stride(args.epilogue.dC, sizeof(ElementC)) &&
check_stride(args.epilogue.dD, sizeof(ElementD));
// TODO(codeplay): base *_valid on the atom shapes
bool m_valid = m > 0;
bool n_valid = n > 0 && n % 4 == 0;
bool k_valid = k > 0 && k % get<2>(TileShape{}) == 0;
bool shape_implementable = (m_valid && n_valid && k_valid);
bool n_valid = n > 0 && n % (width_alignment_requirement / sizeof(ElementB)) == 0 &&
n % (width_alignment_requirement / sizeof(ElementC)) == 0 &&
n % (width_alignment_requirement / sizeof(ElementD)) == 0;
bool k_valid = k > 0 && k % (width_alignment_requirement / sizeof(ElementA)) == 0;
bool shape_implementable = m_valid && n_valid && k_valid && strides_valid;

bool mode_implementable = args.mode == GemmUniversalMode::kGemm ||
(args.mode == GemmUniversalMode::kBatched && rank(ProblemShape{}) == 4);
Expand Down
Loading