forked from intel/mlir-extensions
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
96 changed files
with
1,034 additions
and
1,885 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1 @@ | ||
08a61eb01172054fc5f8c78ff527f01d9768569b | ||
add6b2f35f2bcf1f59a2ab2d5b3dab124fe0895a |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,35 +1,14 @@ | ||
From 94cc2bb6a778cad3b762244d6d78ecf2e19b5372 Mon Sep 17 00:00:00 2001 | ||
From: Md Abdullah Shahneous Bari <[email protected]> | ||
Date: Fri, 26 Apr 2024 20:20:28 +0000 | ||
Subject: [PATCH 1/7] Add-support-for-VectorAnyINTEL-capability | ||
|
||
Allow vector of any lengths between [2-2^63-1]. | ||
VectorAnyINTEL capability (part of "SPV_INTEL_vector_compute" extension) | ||
relaxes the length constraint on SPIR-V vector sizes from 2,3, and 4. | ||
|
||
Also add support for following: | ||
|
||
- Add support for capability inferred extension requirement checking. | ||
If a capability is a requirement, the respective extension that implements | ||
it should also become an extension requirement, there were no support for | ||
that check, as a result, the extension requirement had to be added separately. | ||
This separate requirement addition causes problem when a feature is enabled by | ||
multiple capability, and one of the capability is part of an extension. E.g., | ||
vector size of 16 can be enabled by both "Vector16" and "vectorAnyINTEL" | ||
capability, however, only "vectorAnyINTEL" has an extension requirement | ||
("SPV_INTEL_vector_compute"). Since the process of adding capability | ||
and extension requirement are independent, there is no way, to handle | ||
cases like this. Therefore, for cases like this, enable adding capability | ||
requirement initially, then do the check for capability inferred extension. | ||
|
||
- Add support for optionally skipping capability and extension requirement | ||
From 45b150c9a0c4e4bd60c153e5142da17fd6cde6da Mon Sep 17 00:00:00 2001 | ||
From: izamyati <[email protected]> | ||
Date: Tue, 24 Sep 2024 17:42:02 -0500 | ||
Subject: [PATCH] Add support for VectorAnyINTEL capability | ||
|
||
--- | ||
.../mlir/Dialect/SPIRV/IR/SPIRVBase.td | 9 +- | ||
mlir/include/mlir/IR/CommonTypeConstraints.td | 86 ++++++++++++ | ||
mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp | 7 +- | ||
mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp | 24 +++- | ||
.../SPIRV/Transforms/SPIRVConversion.cpp | 132 +++++++++++++++--- | ||
.../SPIRV/Transforms/SPIRVConversion.cpp | 126 +++++++++++++++--- | ||
.../arith-to-spirv-unsupported.mlir | 4 +- | ||
.../ArithToSPIRV/arith-to-spirv.mlir | 34 +++++ | ||
.../FuncToSPIRV/types-to-spirv.mlir | 17 ++- | ||
|
@@ -42,13 +21,13 @@ requirement initially, then do the check for capability inferred extension. | |
mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir | 34 ++--- | ||
mlir/test/Target/SPIRV/arithmetic-ops.mlir | 6 +- | ||
mlir/test/Target/SPIRV/ocl-ops.mlir | 6 + | ||
17 files changed, 319 insertions(+), 68 deletions(-) | ||
17 files changed, 316 insertions(+), 65 deletions(-) | ||
|
||
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td | ||
index 6ec97e17c5dc..75e42c024553 100644 | ||
index 3b7da9b44a08..ddaeb13ef253 100644 | ||
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td | ||
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td | ||
@@ -4138,7 +4138,12 @@ def SPIRV_Int32 : TypeAlias<I32, "Int32">; | ||
@@ -4142,7 +4142,12 @@ def SPIRV_Int32 : TypeAlias<I32, "Int32">; | ||
def SPIRV_Float32 : TypeAlias<F32, "Float32">; | ||
def SPIRV_Float : FloatOfWidths<[16, 32, 64]>; | ||
def SPIRV_Float16or32 : FloatOfWidths<[16, 32]>; | ||
|
@@ -62,8 +41,8 @@ index 6ec97e17c5dc..75e42c024553 100644 | |
[SPIRV_Bool, SPIRV_Integer, SPIRV_Float]>; | ||
// Component type check is done in the type parser for the following SPIR-V | ||
// dialect-specific types so we use "Any" here. | ||
@@ -4189,7 +4194,7 @@ class SPIRV_JointMatrixOfType<list<Type> allowedTypes> : | ||
"Joint Matrix">; | ||
@@ -4185,7 +4190,7 @@ class SPIRV_CoopMatrixOfType<list<Type> allowedTypes> : | ||
"Cooperative Matrix">; | ||
|
||
class SPIRV_VectorOf<Type type> : | ||
- VectorOfLengthAndType<[2, 3, 4, 8,16], [type]>; | ||
|
@@ -72,10 +51,10 @@ index 6ec97e17c5dc..75e42c024553 100644 | |
class SPIRV_ScalarOrVectorOf<Type type> : | ||
AnyTypeOf<[type, SPIRV_VectorOf<type>]>; | ||
diff --git a/mlir/include/mlir/IR/CommonTypeConstraints.td b/mlir/include/mlir/IR/CommonTypeConstraints.td | ||
index af4f13dc0936..28d49d9e91f0 100644 | ||
index 211385245555..671ec270efe0 100644 | ||
--- a/mlir/include/mlir/IR/CommonTypeConstraints.td | ||
+++ b/mlir/include/mlir/IR/CommonTypeConstraints.td | ||
@@ -608,6 +608,92 @@ class ScalableVectorOfRankAndLengthAndType<list<int> allowedRanks, | ||
@@ -637,6 +637,92 @@ class ScalableVectorOfRankAndLengthAndType<list<int> allowedRanks, | ||
ScalableVectorOfLength<allowedLengths>.summary, | ||
"::mlir::VectorType">; | ||
|
||
|
@@ -169,7 +148,7 @@ index af4f13dc0936..28d49d9e91f0 100644 | |
// Negative values for `n` index in reverse. | ||
class ShapedTypeWithNthDimOfSize<int n, list<int> allowedSizes> : Type< | ||
diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp | ||
index 72488d6e5d0b..b38f20458d32 100644 | ||
index 48be287ef833..aec6d64209dd 100644 | ||
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp | ||
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp | ||
@@ -187,9 +187,12 @@ static Type parseAndVerifyType(SPIRVDialect const &dialect, | ||
|
@@ -188,7 +167,7 @@ index 72488d6e5d0b..b38f20458d32 100644 | |
return Type(); | ||
} | ||
diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp | ||
index 3f25696aa5eb..2d64fea0dc26 100644 | ||
index 337df3a5a65f..542c6beba2e4 100644 | ||
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp | ||
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp | ||
@@ -100,9 +100,11 @@ bool CompositeType::classof(Type type) { | ||
|
@@ -206,7 +185,7 @@ index 3f25696aa5eb..2d64fea0dc26 100644 | |
} | ||
|
||
Type CompositeType::getElementType(unsigned index) const { | ||
@@ -170,7 +172,21 @@ void CompositeType::getCapabilities( | ||
@@ -164,7 +166,21 @@ void CompositeType::getCapabilities( | ||
.Case<VectorType>([&](VectorType type) { | ||
auto vecSize = getNumElements(); | ||
if (vecSize == 8 || vecSize == 16) { | ||
|
@@ -230,10 +209,10 @@ index 3f25696aa5eb..2d64fea0dc26 100644 | |
capabilities.push_back(ref); | ||
} | ||
diff --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp | ||
index 4072608dc8f8..3fc675632970 100644 | ||
index d833ec9309ba..36840582a114 100644 | ||
--- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp | ||
+++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp | ||
@@ -43,9 +43,13 @@ using namespace mlir; | ||
@@ -88,9 +88,13 @@ static std::optional<SmallVector<int64_t>> getTargetShape(VectorType vecType) { | ||
template <typename LabelT> | ||
static LogicalResult checkExtensionRequirements( | ||
LabelT label, const spirv::TargetEnv &targetEnv, | ||
|
@@ -249,7 +228,7 @@ index 4072608dc8f8..3fc675632970 100644 | |
continue; | ||
|
||
LLVM_DEBUG({ | ||
@@ -71,9 +75,13 @@ static LogicalResult checkExtensionRequirements( | ||
@@ -116,9 +120,13 @@ static LogicalResult checkExtensionRequirements( | ||
template <typename LabelT> | ||
static LogicalResult checkCapabilityRequirements( | ||
LabelT label, const spirv::TargetEnv &targetEnv, | ||
|
@@ -265,7 +244,7 @@ index 4072608dc8f8..3fc675632970 100644 | |
continue; | ||
|
||
LLVM_DEBUG({ | ||
@@ -90,6 +98,55 @@ static LogicalResult checkCapabilityRequirements( | ||
@@ -135,6 +143,55 @@ static LogicalResult checkCapabilityRequirements( | ||
return success(); | ||
} | ||
|
||
|
@@ -321,27 +300,24 @@ index 4072608dc8f8..3fc675632970 100644 | |
/// Returns true if the given `storageClass` needs explicit layout when used in | ||
/// Shader environments. | ||
static bool needsExplicitLayout(spirv::StorageClass storageClass) { | ||
@@ -247,12 +304,17 @@ convertScalarType(const spirv::TargetEnv &targetEnv, | ||
@@ -280,11 +337,16 @@ convertScalarType(const spirv::TargetEnv &targetEnv, | ||
return nullptr; | ||
} | ||
|
||
- if (auto floatType = dyn_cast<FloatType>(type)) { | ||
+ //if (auto floatType = dyn_cast<FloatType>(type)) { | ||
+ // Convert to 32-bit float and remove floatType related capability | ||
+ // restriction | ||
+ if (auto floatType = dyn_cast<FloatType>(type)) { | ||
if (auto floatType = dyn_cast<FloatType>(type)) { | ||
LLVM_DEBUG(llvm::dbgs() << type << " converted to 32-bit for SPIR-V\n"); | ||
return Builder(targetEnv.getContext()).getF32Type(); | ||
} | ||
|
||
- auto intType = cast<IntegerType>(type); | ||
+ //auto intType = cast<IntegerType>(type); | ||
+ // Convert to 32-bit int and remove intType related capability restriction | ||
+ auto intType = cast<IntegerType>(type); | ||
auto intType = cast<IntegerType>(type); | ||
LLVM_DEBUG(llvm::dbgs() << type << " converted to 32-bit for SPIR-V\n"); | ||
return IntegerType::get(targetEnv.getContext(), /*width=*/32, | ||
intType.getSignedness()); | ||
@@ -342,16 +404,40 @@ convertVectorType(const spirv::TargetEnv &targetEnv, | ||
@@ -375,16 +437,40 @@ convertVectorType(const spirv::TargetEnv &targetEnv, | ||
cast<spirv::CompositeType>(type).getExtensions(extensions, storageClass); | ||
cast<spirv::CompositeType>(type).getCapabilities(capabilities, storageClass); | ||
|
||
|
@@ -389,7 +365,7 @@ index 4072608dc8f8..3fc675632970 100644 | |
} | ||
|
||
static Type | ||
@@ -1163,16 +1249,18 @@ bool SPIRVConversionTarget::isLegalOp(Operation *op) { | ||
@@ -1553,16 +1639,18 @@ bool SPIRVConversionTarget::isLegalOp(Operation *op) { | ||
SmallVector<ArrayRef<spirv::Extension>, 4> typeExtensions; | ||
SmallVector<ArrayRef<spirv::Capability>, 8> typeCapabilities; | ||
for (Type valueType : valueTypes) { | ||
|
@@ -400,10 +376,9 @@ index 4072608dc8f8..3fc675632970 100644 | |
- return false; | ||
- | ||
typeCapabilities.clear(); | ||
- cast<spirv::SPIRVType>(valueType).getCapabilities(typeCapabilities); | ||
cast<spirv::SPIRVType>(valueType).getCapabilities(typeCapabilities); | ||
- if (failed(checkCapabilityRequirements(op->getName(), this->targetEnv, | ||
- typeCapabilities))) | ||
+ cast<spirv::SPIRVType>(valueType).getCapabilities(typeCapabilities); | ||
+ typeExtensions.clear(); | ||
+ cast<spirv::SPIRVType>(valueType).getExtensions(typeExtensions); | ||
+ // Checking for capability and extension requirements along with capability | ||
|
@@ -418,10 +393,10 @@ index 4072608dc8f8..3fc675632970 100644 | |
} | ||
|
||
diff --git a/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv-unsupported.mlir b/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv-unsupported.mlir | ||
index 0d92a8e676d8..d61ace8d6876 100644 | ||
index 24a0bab352c3..96b8ea6e7975 100644 | ||
--- a/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv-unsupported.mlir | ||
+++ b/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv-unsupported.mlir | ||
@@ -11,9 +11,9 @@ module attributes { | ||
@@ -28,9 +28,9 @@ module attributes { | ||
#spirv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64, Shader], []>, #spirv.resource_limits<>> | ||
} { | ||
|
||
|
@@ -434,10 +409,10 @@ index 0d92a8e676d8..d61ace8d6876 100644 | |
} | ||
|
||
diff --git a/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir b/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir | ||
index ae47ae36ca51..644996fe0fa7 100644 | ||
index 1abe0fd2ec46..e485296ad026 100644 | ||
--- a/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir | ||
+++ b/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir | ||
@@ -1447,6 +1447,40 @@ func.func @ops_flags(%arg0: i64, %arg1: i64) { | ||
@@ -1462,6 +1462,40 @@ func.func @ops_flags(%arg0: i64, %arg1: i64) { | ||
%2 = arith.muli %arg0, %arg1 overflow<nsw, nuw> : i64 | ||
// CHECK: %{{.*}} = spirv.IMul %{{.*}}, %{{.*}} : i64 | ||
%3 = arith.muli %arg0, %arg1 overflow<nsw, nuw> : i64 | ||
|
@@ -586,7 +561,7 @@ index 53a1015de75b..6970b8ec0628 100644 | |
spirv.Return | ||
} | ||
diff --git a/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir b/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir | ||
index 7dc0bd99f54b..5dd9901828cd 100644 | ||
index 5c24f0e6a7d3..3ca61ab48096 100644 | ||
--- a/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir | ||
+++ b/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir | ||
@@ -166,7 +166,7 @@ func.func @logicalUnary(%arg0 : i1) | ||
|
@@ -599,10 +574,10 @@ index 7dc0bd99f54b..5dd9901828cd 100644 | |
return | ||
} | ||
diff --git a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir | ||
index f7fd05b36bae..5228bb719d94 100644 | ||
index d8a26c71d12f..d22378817dbb 100644 | ||
--- a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir | ||
+++ b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir | ||
@@ -439,7 +439,7 @@ func.func @group_non_uniform_bitwise_and(%val: i32) -> i32 { | ||
@@ -495,7 +495,7 @@ func.func @group_non_uniform_bitwise_and(%val: i32) -> i32 { | ||
// ----- | ||
|
||
func.func @group_non_uniform_bitwise_and(%val: i1) -> i1 { | ||
|
@@ -611,7 +586,7 @@ index f7fd05b36bae..5228bb719d94 100644 | |
%0 = spirv.GroupNonUniformBitwiseAnd "Workgroup" "Reduce" %val : i1 | ||
return %0: i1 | ||
} | ||
@@ -460,7 +460,7 @@ func.func @group_non_uniform_bitwise_or(%val: i32) -> i32 { | ||
@@ -516,7 +516,7 @@ func.func @group_non_uniform_bitwise_or(%val: i32) -> i32 { | ||
// ----- | ||
|
||
func.func @group_non_uniform_bitwise_or(%val: i1) -> i1 { | ||
|
@@ -620,7 +595,7 @@ index f7fd05b36bae..5228bb719d94 100644 | |
%0 = spirv.GroupNonUniformBitwiseOr "Workgroup" "Reduce" %val : i1 | ||
return %0: i1 | ||
} | ||
@@ -481,7 +481,7 @@ func.func @group_non_uniform_bitwise_xor(%val: i32) -> i32 { | ||
@@ -537,7 +537,7 @@ func.func @group_non_uniform_bitwise_xor(%val: i32) -> i32 { | ||
// ----- | ||
|
||
func.func @group_non_uniform_bitwise_xor(%val: i1) -> i1 { | ||
|
@@ -629,7 +604,7 @@ index f7fd05b36bae..5228bb719d94 100644 | |
%0 = spirv.GroupNonUniformBitwiseXor "Workgroup" "Reduce" %val : i1 | ||
return %0: i1 | ||
} | ||
@@ -502,7 +502,7 @@ func.func @group_non_uniform_logical_and(%val: i1) -> i1 { | ||
@@ -558,7 +558,7 @@ func.func @group_non_uniform_logical_and(%val: i1) -> i1 { | ||
// ----- | ||
|
||
func.func @group_non_uniform_logical_and(%val: i32) -> i32 { | ||
|
@@ -638,7 +613,7 @@ index f7fd05b36bae..5228bb719d94 100644 | |
%0 = spirv.GroupNonUniformLogicalAnd "Workgroup" "Reduce" %val : i32 | ||
return %0: i32 | ||
} | ||
@@ -523,7 +523,7 @@ func.func @group_non_uniform_logical_or(%val: i1) -> i1 { | ||
@@ -579,7 +579,7 @@ func.func @group_non_uniform_logical_or(%val: i1) -> i1 { | ||
// ----- | ||
|
||
func.func @group_non_uniform_logical_or(%val: i32) -> i32 { | ||
|
@@ -647,7 +622,7 @@ index f7fd05b36bae..5228bb719d94 100644 | |
%0 = spirv.GroupNonUniformLogicalOr "Workgroup" "Reduce" %val : i32 | ||
return %0: i32 | ||
} | ||
@@ -544,7 +544,7 @@ func.func @group_non_uniform_logical_xor(%val: i1) -> i1 { | ||
@@ -600,7 +600,7 @@ func.func @group_non_uniform_logical_xor(%val: i1) -> i1 { | ||
// ----- | ||
|
||
func.func @group_non_uniform_logical_xor(%val: i32) -> i32 { | ||
|
49 changes: 0 additions & 49 deletions
49
build_tools/patches/0002-change-spirv.CL.printf-op-assembly-format.patch
This file was deleted.
Oops, something went wrong.
36 changes: 0 additions & 36 deletions
36
build_tools/patches/0003-Add-Constant-attribute-in-ParseDecoration.patch
This file was deleted.
Oops, something went wrong.
Oops, something went wrong.