Skip to content

Commit

Permalink
address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
binarman committed Sep 10, 2024
1 parent b4463d4 commit cebb7e9
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 11 deletions.
Original file line number Diff line number Diff line change
@@ -1,32 +1,32 @@
// RUN: triton-opt %s --split-input-file --decompose-unsupported-amd-conversions=arch=gfx942 | FileCheck %s

// CHECK: #[[DST_ENC:.+]] = #triton_gpu.blocked<{{.*}}>
// CHECK: #[[SRC_ENC:.+]] = #triton_gpu.amd_mfma<{{.*}}>
// CHECK: #[[TMP_ENC:.+]] = #triton_gpu.amd_mfma<{{.*}}>
// CHECK-DAG: #[[DST_ENC:.+]] = #triton_gpu.blocked<{{.*}}>
// CHECK-DAG: #[[SRC_ENC:.+]] = #triton_gpu.amd_mfma<{{.*}}>
// CHECK-DAG: #[[TMP_ENC:.+]] = #triton_gpu.amd_mfma<{{.*}}>
// CHECK: large_tensor_conversion
#src = #triton_gpu.amd_mfma<{versionMajor = 3, versionMinor = 0, warpsPerCTA = [1, 4], instrShape = [32, 32], isTransposed = false}>
#dst = #triton_gpu.blocked<{sizePerThread = [4, 1], threadsPerWarp = [8, 8], warpsPerCTA = [4, 1], order = [1, 0]}>
module attributes {"triton_gpu.num-ctas" = 1 : i32, "triton_gpu.num-warps" = 4 : i32, "triton_gpu.threads-per-warp" = 64 : i32} {
tt.func @large_tensor_conversion(%arg0: tensor<128x128xf32, #src>) {
// CHECK: %0 = triton_gpu.convert_layout %arg0 : tensor<128x128xf32, #[[SRC_ENC]]> -> tensor<128x128xf32, #[[TMP_ENC]]>
// CHECK: %1 = triton_gpu.convert_layout %0 : tensor<128x128xf32, #[[TMP_ENC]]> -> tensor<128x128xf32, #[[DST_ENC]]>
// CHECK: %[[TMP:.*]] = triton_gpu.convert_layout {{.*}} : tensor<128x128xf32, #[[SRC_ENC]]> -> tensor<128x128xf32, #[[TMP_ENC]]>
// CHECK: %{{.*}} = triton_gpu.convert_layout [[TMP]] : tensor<128x128xf32, #[[TMP_ENC]]> -> tensor<128x128xf32, #[[DST_ENC]]>
%0 = triton_gpu.convert_layout %arg0 : tensor<128x128xf32, #src> -> tensor<128x128xf32, #dst>
tt.return
}
}

// -----

// CHECK: #[[DST_ENC:.+]] = #triton_gpu.blocked<{{.*}}>
// CHECK: #[[SRC_ENC:.+]] = #triton_gpu.amd_mfma<{{.*}}>
// CHECK: #[[TMP_ENC:.+]] = #triton_gpu.amd_mfma<{{.*}}>
// CHECK-DAG: #[[DST_ENC:.+]] = #triton_gpu.blocked<{{.*}}>
// CHECK-DAG: #[[SRC_ENC:.+]] = #triton_gpu.amd_mfma<{{.*}}>
// CHECK-DAG: #[[TMP_ENC:.+]] = #triton_gpu.amd_mfma<{{.*}}>
// CHECK: large_tensor_3d_conversion
#src = #triton_gpu.amd_mfma<{versionMajor = 3, versionMinor = 0, warpsPerCTA = [2, 1, 2], instrShape = [32, 32], isTransposed = false}>
#dst = #triton_gpu.blocked<{sizePerThread = [1, 1, 1], threadsPerWarp = [1, 64, 1], warpsPerCTA = [2, 2, 1], order = [2, 1, 0]}>
module attributes {"triton_gpu.num-ctas" = 1 : i32, "triton_gpu.num-warps" = 4 : i32, "triton_gpu.threads-per-warp" = 64 : i32} {
tt.func @large_tensor_3d_conversion(%arg0: tensor<2x128x64xf32, #src>) {
// CHECK: %0 = triton_gpu.convert_layout %arg0 : tensor<2x128x64xf32, #[[SRC_ENC]]> -> tensor<2x128x64xf32, #[[TMP_ENC]]>
// CHECK: %1 = triton_gpu.convert_layout %0 : tensor<2x128x64xf32, #[[TMP_ENC]]> -> tensor<2x128x64xf32, #[[DST_ENC]]>
// CHECK: %[[TMP:.*]] = triton_gpu.convert_layout {{.*}} : tensor<2x128x64xf32, #[[SRC_ENC]]> -> tensor<2x128x64xf32, #[[TMP_ENC]]>
// CHECK: {{.*}} = triton_gpu.convert_layout %[[TMP]] : tensor<2x128x64xf32, #[[TMP_ENC]]> -> tensor<2x128x64xf32, #[[DST_ENC]]>
%0 = triton_gpu.convert_layout %arg0 : tensor<2x128x64xf32, #src> -> tensor<2x128x64xf32, #dst>
tt.return
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ struct DecomposeUnsupportedAMDConversions
return;
}

auto currLDSUsage = mlir::triton::AMD::getCvtOpLDSUsage(cvtOp);
auto currLDSUsage = triton::AMD::getCvtOpLDSUsage(cvtOp);
if (currLDSUsage <= sharedMemoryLimit) {
return;
}
Expand Down

0 comments on commit cebb7e9

Please sign in to comment.