Skip to content

Commit

Permalink
Integrate LLVM at llvm/llvm-project@8b448842c476
Browse files Browse the repository at this point in the history
Updates LLVM usage to match
[8b448842c476](llvm/llvm-project@8b448842c476)

PiperOrigin-RevId: 724088205
  • Loading branch information
HEIR Team authored and copybara-github committed Feb 6, 2025
1 parent 3d47e13 commit b3fcf97
Show file tree
Hide file tree
Showing 16 changed files with 28 additions and 156 deletions.
2 changes: 1 addition & 1 deletion bazel/import_llvm.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ load(

def import_llvm(name):
"""Imports LLVM."""
LLVM_COMMIT = "f8287f6c373fcf993643dd6f0e30dde304c1be73"
LLVM_COMMIT = "8b448842c4766306b74f9dba2ee9ad3af12fea6c"

new_git_repository(
name = name,
Expand Down
2 changes: 1 addition & 1 deletion docs/content/en/docs/Design/secret.md
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ func.func @main(%arg0: tensor<1x1xi8> {secret.secret}) -> tensor<1x16xi32> {
secret.separator
%4 = "tosa.const"() {value = dense<[0, 0, -5438, -5515, -1352, -1500, -4152, -84, 3396, 0, 1981, -5581, 0, -6964, 3407, -7217]> : tensor<16xi32>} : () -> tensor<16xi32>
%5 = "tosa.const"() {value = dense<[[-9], [-54], [57], [71], [104], [115], [98], [99], [64], [-26], [127], [25], [-82], [68], [95], [86]]> : tensor<16x1xi8>} : () -> tensor<16x1xi8>
%6 = "tosa.fully_connected"(%arg0, %5, %4) {quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>} : (tensor<1x1xi8>, tensor<16x1xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%6 = "tosa.fully_connected"(%arg0, %5, %4) {input_zp = -128 : i32, weight_zp = 0 : i32} : (tensor<1x1xi8>, tensor<16x1xi8>, tensor<16xi32>) -> tensor<1x16xi32>
secret.separator
return %6 : tensor<1x16xi32>
}
Expand Down
131 changes: 0 additions & 131 deletions patches/llvm.patch
Original file line number Diff line number Diff line change
@@ -1,132 +1 @@
Auto generated patch. Do not edit or delete it, even if empty.
diff -ruN --strip-trailing-cr a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -130,10 +130,6 @@
}
}

-let Features = "sse", Header = "xmmintrin.h", Attributes = [NoThrow, Const] in {
- def _mm_prefetch : X86LibBuiltin<"void(void const *, int)">;
-}
-
// AVX
let Attributes = [Const, NoThrow, RequiredVectorWidth<256>], Features = "avx" in {
foreach Op = ["addsub", "hadd", "hsub", "max", "min"] in {
@@ -142,12 +138,6 @@
}
}

-// PRFCHW
-let Features = "prfchw", Header = "intrin.h", Attributes = [NoThrow, Const] in {
- def _m_prefetch : X86LibBuiltin<"void(void *)">;
- def _m_prefetchw : X86LibBuiltin<"void(void volatile const *)">;
-}
-

// Mechanically ported builtins from the original `.def` file.
//
@@ -156,6 +146,10 @@
// current formulation is based on what was easiest to recognize from the
// pre-TableGen version.

+let Features = "mmx", Attributes = [NoThrow, Const] in {
+ def _mm_prefetch : X86NoPrefixBuiltin<"void(char const *, int)">;
+}
+
let Features = "sse", Attributes = [NoThrow] in {
def ldmxcsr : X86Builtin<"void(unsigned int)">;
}
diff -ruN --strip-trailing-cr a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -15374,17 +15374,6 @@
Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
return Builder.CreateCall(F, {Address, RW, Locality, Data});
}
- case X86::BI_m_prefetch:
- case X86::BI_m_prefetchw: {
- Value *Address = Ops[0];
- // The 'w' suffix implies write.
- Value *RW =
- ConstantInt::get(Int32Ty, BuiltinID == X86::BI_m_prefetchw ? 1 : 0);
- Value *Locality = ConstantInt::get(Int32Ty, 0x3);
- Value *Data = ConstantInt::get(Int32Ty, 1);
- Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
- return Builder.CreateCall(F, {Address, RW, Locality, Data});
- }
case X86::BI_mm_clflush: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
Ops[0]);
diff -ruN --strip-trailing-cr a/clang/lib/Headers/prfchwintrin.h b/clang/lib/Headers/prfchwintrin.h
--- a/clang/lib/Headers/prfchwintrin.h
+++ b/clang/lib/Headers/prfchwintrin.h
@@ -14,10 +14,6 @@
#ifndef __PRFCHWINTRIN_H
#define __PRFCHWINTRIN_H

-#if defined(__cplusplus)
-extern "C" {
-#endif
-
/// Loads a memory sequence containing the specified memory address into
/// all data cache levels.
///
@@ -30,7 +26,11 @@
///
/// \param __P
/// A pointer specifying the memory address to be prefetched.
-void _m_prefetch(void *__P);
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_m_prefetch(void *__P)
+{
+ __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);
+}

/// Loads a memory sequence containing the specified memory address into
/// the L1 data cache and sets the cache-coherency state to modified.
@@ -48,10 +48,13 @@
///
/// \param __P
/// A pointer specifying the memory address to be prefetched.
-void _m_prefetchw(volatile const void *__P);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_m_prefetchw(volatile const void *__P)
+{
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-qual"
+ __builtin_prefetch ((const void*)__P, 1, 3 /* _MM_HINT_T0 */);
+#pragma clang diagnostic pop
+}

#endif /* __PRFCHWINTRIN_H */
diff -ruN --strip-trailing-cr a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h
--- a/clang/lib/Headers/xmmintrin.h
+++ b/clang/lib/Headers/xmmintrin.h
@@ -2197,7 +2197,10 @@
#define _MM_HINT_T2 1
#define _MM_HINT_NTA 0

-#if 0
+#ifndef _MSC_VER
+/* FIXME: We have to #define this because "sel" must be a constant integer, and
+ Sema doesn't do any form of constant propagation yet. */
+
/// Loads one cache line of data from the specified address to a location
/// closer to the processor.
///
@@ -2222,10 +2225,6 @@
/// be generated. \n
/// _MM_HINT_T2: Move data using the T2 hint. The PREFETCHT2 instruction will
/// be generated.
-///
-/// _mm_prefetch is implemented as a "library builtin" directly in Clang,
-/// similar to how it is done in MSVC. Clang will warn if the user doesn't
-/// include xmmintrin.h or immintrin.h.
#define _mm_prefetch(a, sel) (__builtin_prefetch((const void *)(a), \
((sel) >> 2) & 1, (sel) & 0x3))
#endif
6 changes: 3 additions & 3 deletions tests/Emitter/verilog/hello_world.tosa.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@ module attributes {tf_saved_model.semantics} {
%3 = "tosa.const"() {value = dense<"0xF41AED091921F424E021EFBCF7F5FA1903DCD20206F9F402FFFAEFF1EFD327E1FB27DDEBDBE4051A17FC241215EF1EE410FE14DA1CF8F3F1EFE2F309E3E9EDE3E415070B041B1AFEEB01DE21E60BEC03230A22241E2703E60324FFC011F8FCF1110CF5E0F30717E5E8EDFADCE823FB07DDFBFD0014261117E7F111EA0226040425211D0ADB1DDC2001FAE3370BF11A16EF1CE703E01602032118092ED9E5140BEA1AFCD81300C4D8ECD9FE0D1920D8D6E21FE9D7CAE2DDC613E7043E000114C7DBE71515F506D61ADC0922FE080213EF191EE209FDF314DDDA20D90FE3F9F7EEE924E629000716E21E0D23D3DDF714FA0822262109080F0BE012F47FDC58E526"> : tensor<16x16xi8>} : () -> tensor<16x16xi8>
%4 = "tosa.const"() {value = dense<[0, 0, -5438, -5515, -1352, -1500, -4152, -84, 3396, 0, 1981, -5581, 0, -6964, 3407, -7217]> : tensor<16xi32>} : () -> tensor<16xi32>
%5 = "tosa.const"() {value = dense<[[-9], [-54], [57], [71], [104], [115], [98], [99], [64], [-26], [127], [25], [-82], [68], [95], [86]]> : tensor<16x1xi8>} : () -> tensor<16x1xi8>
%6 = "tosa.fully_connected"(%arg0, %5, %4) {quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>} : (tensor<1x1xi8>, tensor<16x1xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%6 = "tosa.fully_connected"(%arg0, %5, %4) {input_zp = -128 : i32, weight_zp = 0 : i32} : (tensor<1x1xi8>, tensor<16x1xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%7 = "tosa.rescale"(%6) {double_round = true, input_zp = 0 : i32, multiplier = array<i32: 2039655736>, output_zp = -128 : i32, per_channel = false, scale32 = true, shift = array<i8: 38>} : (tensor<1x16xi32>) -> tensor<1x16xi8>
%8 = "tosa.clamp"(%7) {max_fp = 0.000000e+00 : f32, max_int = 127 : i64, min_fp = 0.000000e+00 : f32, min_int = -128 : i64} : (tensor<1x16xi8>) -> tensor<1x16xi8>
%9 = "tosa.fully_connected"(%8, %3, %2) {quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>} : (tensor<1x16xi8>, tensor<16x16xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%9 = "tosa.fully_connected"(%8, %3, %2) {input_zp = -128 : i32, weight_zp = 0 : i32} : (tensor<1x16xi8>, tensor<16x16xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%10 = "tosa.rescale"(%9) {double_round = true, input_zp = 0 : i32, multiplier = array<i32: 1561796795>, output_zp = -128 : i32, per_channel = false, scale32 = true, shift = array<i8: 37>} : (tensor<1x16xi32>) -> tensor<1x16xi8>
%11 = "tosa.clamp"(%10) {max_fp = 0.000000e+00 : f32, max_int = 127 : i64, min_fp = 0.000000e+00 : f32, min_int = -128 : i64} : (tensor<1x16xi8>) -> tensor<1x16xi8>
%12 = "tosa.fully_connected"(%11, %1, %0) {quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>} : (tensor<1x16xi8>, tensor<1x16xi8>, tensor<1xi32>) -> tensor<1x1xi32>
%12 = "tosa.fully_connected"(%11, %1, %0) {input_zp = -128 : i32, weight_zp = 0 : i32} : (tensor<1x16xi8>, tensor<1x16xi8>, tensor<1xi32>) -> tensor<1x1xi32>
%13 = "tosa.rescale"(%12) {double_round = true, input_zp = 0 : i32, multiplier = array<i32: 1630361836>, output_zp = 5 : i32, per_channel = false, scale32 = true, shift = array<i8: 36>} : (tensor<1x1xi32>) -> tensor<1x1xi8>
// CHECK: return
return %13 : tensor<1x1xi8>
Expand Down
4 changes: 2 additions & 2 deletions tests/Examples/micro_speech/micro_speech.tosa.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ module attributes {tfl.description = "TOCO Converted.", tfl.schema_version = 3 :
%shift = "tosa.const"() {value = dense<31> : tensor<1xi8>} : () -> tensor<1xi8>
%shift30 = "tosa.const"() {value = dense<30> : tensor<1xi8>} : () -> tensor<1xi8>
%18 = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 49, 40, 1>}> : (tensor<1x1960xi8>) -> tensor<1x49x40x1xi8>
%19 = "tosa.depthwise_conv2d"(%18, %0, %17) <{acc_type = i32, dilation = array<i64: 1, 1>, pad = array<i64: 4, 5, 3, 3>, quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>, stride = array<i64: 2, 2>}> : (tensor<1x49x40x1xi8>, tensor<10x8x1x8xi8>, tensor<8xi32>) -> tensor<1x25x20x8xi32>
%19 = "tosa.depthwise_conv2d"(%18, %0, %17) <{acc_type = i32, dilation = array<i64: 1, 1>, pad = array<i64: 4, 5, 3, 3>, input_zp = -128 : i32, weight_zp = 0 : i32, stride = array<i64: 2, 2>}> : (tensor<1x49x40x1xi8>, tensor<10x8x1x8xi8>, tensor<8xi32>) -> tensor<1x25x20x8xi32>
%20 = "tosa.rescale"(%19) <{double_round = true, input_zp = 0 : i32, multiplier = array<i32: 1653229999, 1516545207, 2000799311, 1159928266, 1498403863, 1285645282, 2146175029, 1756589032>, output_zp = -128 : i32, per_channel = true, scale32 = true, shift = array<i8: 41, 43, 41, 41, 41, 41, 41, 41>}> : (tensor<1x25x20x8xi32>) -> tensor<1x25x20x8xi8>
%21 = "tosa.clamp"(%20) <{max_fp = 0.000000e+00 : f32, max_int = 127 : i64, min_fp = 0.000000e+00 : f32, min_int = -128 : i64}> : (tensor<1x25x20x8xi8>) -> tensor<1x25x20x8xi8>
%22 = "tosa.reshape"(%21) <{new_shape = array<i64: 1, 4000>}> : (tensor<1x25x20x8xi8>) -> tensor<1x4000xi8>
%23 = "tosa.fully_connected"(%22, %16, %15) <{quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>}> : (tensor<1x4000xi8>, tensor<4x4000xi8>, tensor<4xi32>) -> tensor<1x4xi32>
%23 = "tosa.fully_connected"(%22, %16, %15) <{input_zp = -128 : i32, weight_zp = 0 : i32}> : (tensor<1x4000xi8>, tensor<4x4000xi8>, tensor<4xi32>) -> tensor<1x4xi32>
%24 = "tosa.rescale"(%23) <{double_round = true, input_zp = 0 : i32, multiplier = array<i32: 1932201080>, output_zp = 14 : i32, per_channel = false, scale32 = true, shift = array<i8: 42>}> : (tensor<1x4xi32>) -> tensor<1x4xi8>
%25 = "tosa.rescale"(%24) <{double_round = false, input_zp = 14 : i32, multiplier = array<i32: 1073741824>, output_zp = 0 : i32, per_channel = false, scale32 = true, shift = array<i8: 30>}> : (tensor<1x4xi8>) -> tensor<1x4xi32>
%26 = "tosa.reduce_max"(%25) <{axis = 1 : i32}> : (tensor<1x4xi32>) -> tensor<1x1xi32>
Expand Down
2 changes: 1 addition & 1 deletion tests/Examples/tfhe_rust/test_fully_connected.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ module attributes {tf_saved_model.semantics} {
func.func @fn_under_test(%11: tensor<1x1xi8>) -> tensor<1x1xi32> {
%0 = "tosa.const"() {value = dense<1> : tensor<1xi32>} : () -> tensor<1xi32>
%1 = "tosa.const"() {value = dense<[[2]]> : tensor<1x1xi8>} : () -> tensor<1x1xi8>
%2 = "tosa.fully_connected"(%11, %1, %0) {quantization_info = #tosa.conv_quant<input_zp = 0, weight_zp = 0>} : (tensor<1x1xi8>, tensor<1x1xi8>, tensor<1xi32>) -> tensor<1x1xi32>
%2 = "tosa.fully_connected"(%11, %1, %0) {input_zp = 0 : i32, weight_zp = 0 : i32} : (tensor<1x1xi8>, tensor<1x1xi8>, tensor<1xi32>) -> tensor<1x1xi32>
return %2 : tensor<1x1xi32>
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ module attributes {tf_saved_model.semantics} {
func.func @fn_under_test(%11: tensor<1x3xi8>) -> tensor<1x3xi32> {
%0 = "tosa.const"() {value = dense<[3, 1, 4]> : tensor<3xi32>} : () -> tensor<3xi32>
%1 = "tosa.const"() {value = dense<[[2, 7, 1], [8,2,8], [1,8,2]]> : tensor<3x3xi8>} : () -> tensor<3x3xi8>
%2 = "tosa.fully_connected"(%11, %1, %0) {quantization_info = #tosa.conv_quant<input_zp = 0, weight_zp = 0>} : (tensor<1x3xi8>, tensor<3x3xi8>, tensor<3xi32>) -> tensor<1x3xi32>
%2 = "tosa.fully_connected"(%11, %1, %0) {input_zp = 0 : i32, weight_zp = 0 : i32} : (tensor<1x3xi8>, tensor<3x3xi8>, tensor<3xi32>) -> tensor<1x3xi32>
return %2 : tensor<1x3xi32>
}
}
6 changes: 3 additions & 3 deletions tests/Transforms/secretize/main.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@ module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted."
%3 = "tosa.const"() <{value = dense<"0xF41AED091921F424E021EFBCF7F5FA1903DCD20206F9F402FFFAEFF1EFD327E1FB27DDEBDBE4051A17FC241215EF1EE410FE14DA1CF8F3F1EFE2F309E3E9EDE3E415070B041B1AFEEB01DE21E60BEC03230A22241E2703E60324FFC011F8FCF1110CF5E0F30717E5E8EDFADCE823FB07DDFBFD0014261117E7F111EA0226040425211D0ADB1DDC2001FAE3370BF11A16EF1CE703E01602032118092ED9E5140BEA1AFCD81300C4D8ECD9FE0D1920D8D6E21FE9D7CAE2DDC613E7043E000114C7DBE71515F506D61ADC0922FE080213EF191EE209FDF314DDDA20D90FE3F9F7EEE924E629000716E21E0D23D3DDF714FA0822262109080F0BE012F47FDC58E526"> : tensor<16x16xi8>}> : () -> tensor<16x16xi8>
%4 = "tosa.const"() <{value = dense<[0, 0, -5438, -5515, -1352, -1500, -4152, -84, 3396, 0, 1981, -5581, 0, -6964, 3407, -7217]> : tensor<16xi32>}> : () -> tensor<16xi32>
%5 = "tosa.const"() <{value = dense<[[-9], [-54], [57], [71], [104], [115], [98], [99], [64], [-26], [127], [25], [-82], [68], [95], [86]]> : tensor<16x1xi8>}> : () -> tensor<16x1xi8>
%6 = "tosa.fully_connected"(%arg0, %5, %4) <{quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>}> : (tensor<1x1xi8>, tensor<16x1xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%6 = "tosa.fully_connected"(%arg0, %5, %4) <{input_zp = -128 : i32, weight_zp = 0 : i32}> : (tensor<1x1xi8>, tensor<16x1xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%7 = "tosa.rescale"(%6) <{double_round = true, input_zp = 0 : i32, multiplier = array<i32: 2039655736>, output_zp = -128 : i32, per_channel = false, scale32 = true, shift = array<i8: 38>}> : (tensor<1x16xi32>) -> tensor<1x16xi8>
%8 = "tosa.clamp"(%7) <{max_fp = 0.000000e+00 : f32, max_int = 127 : i64, min_fp = 0.000000e+00 : f32, min_int = -128 : i64}> : (tensor<1x16xi8>) -> tensor<1x16xi8>
%9 = "tosa.fully_connected"(%8, %3, %2) <{quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>}> : (tensor<1x16xi8>, tensor<16x16xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%9 = "tosa.fully_connected"(%8, %3, %2) <{input_zp = -128 : i32, weight_zp = 0 : i32}> : (tensor<1x16xi8>, tensor<16x16xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%10 = "tosa.rescale"(%9) <{double_round = true, input_zp = 0 : i32, multiplier = array<i32: 1561796795>, output_zp = -128 : i32, per_channel = false, scale32 = true, shift = array<i8: 37>}> : (tensor<1x16xi32>) -> tensor<1x16xi8>
%11 = "tosa.clamp"(%10) <{max_fp = 0.000000e+00 : f32, max_int = 127 : i64, min_fp = 0.000000e+00 : f32, min_int = -128 : i64}> : (tensor<1x16xi8>) -> tensor<1x16xi8>
%12 = "tosa.fully_connected"(%11, %1, %0) <{quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>}> : (tensor<1x16xi8>, tensor<1x16xi8>, tensor<1xi32>) -> tensor<1x1xi32>
%12 = "tosa.fully_connected"(%11, %1, %0) <{input_zp = -128 : i32, weight_zp = 0 : i32}> : (tensor<1x16xi8>, tensor<1x16xi8>, tensor<1xi32>) -> tensor<1x1xi32>
%13 = "tosa.rescale"(%12) <{double_round = true, input_zp = 0 : i32, multiplier = array<i32: 1630361836>, output_zp = 5 : i32, per_channel = false, scale32 = true, shift = array<i8: 36>}> : (tensor<1x1xi32>) -> tensor<1x1xi8>
return %13 : tensor<1x1xi8>
}
Expand Down
2 changes: 1 addition & 1 deletion tests/Transforms/tosa_to_boolean_tfhe/fully_connected.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ module attributes {tf_saved_model.semantics} {
func.func @main(%11: tensor<1x1xi8>) -> tensor<1x1xi32> {
%0 = "tosa.const"() {value = dense<1> : tensor<1xi32>} : () -> tensor<1xi32>
%1 = "tosa.const"() {value = dense<[[1]]> : tensor<1x1xi8>} : () -> tensor<1x1xi8>
%12 = "tosa.fully_connected"(%11, %1, %0) {quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>} : (tensor<1x1xi8>, tensor<1x1xi8>, tensor<1xi32>) -> tensor<1x1xi32>
%12 = "tosa.fully_connected"(%11, %1, %0) {input_zp = -128 : i32, weight_zp = 0 : i32} : (tensor<1x1xi8>, tensor<1x1xi8>, tensor<1xi32>) -> tensor<1x1xi32>
// CHECK: [[ALLOC:%.*]] = memref.alloc()
// CHECK-SAME: memref<1x1x32x!tfhe_rust.eui3>
// CHECK-NOT: comb
Expand Down
6 changes: 3 additions & 3 deletions tests/Transforms/tosa_to_boolean_tfhe/hello_world.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,13 @@ module attributes {tf_saved_model.semantics} {
%3 = "tosa.const"() {value = dense<"0xF41AED091921F424E021EFBCF7F5FA1903DCD20206F9F402FFFAEFF1EFD327E1FB27DDEBDBE4051A17FC241215EF1EE410FE14DA1CF8F3F1EFE2F309E3E9EDE3E415070B041B1AFEEB01DE21E60BEC03230A22241E2703E60324FFC011F8FCF1110CF5E0F30717E5E8EDFADCE823FB07DDFBFD0014261117E7F111EA0226040425211D0ADB1DDC2001FAE3370BF11A16EF1CE703E01602032118092ED9E5140BEA1AFCD81300C4D8ECD9FE0D1920D8D6E21FE9D7CAE2DDC613E7043E000114C7DBE71515F506D61ADC0922FE080213EF191EE209FDF314DDDA20D90FE3F9F7EEE924E629000716E21E0D23D3DDF714FA0822262109080F0BE012F47FDC58E526"> : tensor<16x16xi8>} : () -> tensor<16x16xi8>
%4 = "tosa.const"() {value = dense<[0, 0, -5438, -5515, -1352, -1500, -4152, -84, 3396, 0, 1981, -5581, 0, -6964, 3407, -7217]> : tensor<16xi32>} : () -> tensor<16xi32>
%5 = "tosa.const"() {value = dense<[[-9], [-54], [57], [71], [104], [115], [98], [99], [64], [-26], [127], [25], [-82], [68], [95], [86]]> : tensor<16x1xi8>} : () -> tensor<16x1xi8>
%6 = "tosa.fully_connected"(%arg0, %5, %4) {quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>} : (tensor<1x1xi8>, tensor<16x1xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%6 = "tosa.fully_connected"(%arg0, %5, %4) {input_zp = -128 : i32, weight_zp = 0 : i32} : (tensor<1x1xi8>, tensor<16x1xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%7 = "tosa.rescale"(%6) {double_round = true, input_zp = 0 : i32, multiplier = array<i32: 2039655736>, output_zp = -128 : i32, per_channel = false, scale32 = true, shift = array<i8: 38>} : (tensor<1x16xi32>) -> tensor<1x16xi8>
%8 = "tosa.clamp"(%7) {max_fp = 0.000000e+00 : f32, max_int = 127 : i64, min_fp = 0.000000e+00 : f32, min_int = -128 : i64} : (tensor<1x16xi8>) -> tensor<1x16xi8>
%9 = "tosa.fully_connected"(%8, %3, %2) {quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>} : (tensor<1x16xi8>, tensor<16x16xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%9 = "tosa.fully_connected"(%8, %3, %2) {input_zp = -128 : i32, weight_zp = 0 : i32} : (tensor<1x16xi8>, tensor<16x16xi8>, tensor<16xi32>) -> tensor<1x16xi32>
%10 = "tosa.rescale"(%9) {double_round = true, input_zp = 0 : i32, multiplier = array<i32: 1561796795>, output_zp = -128 : i32, per_channel = false, scale32 = true, shift = array<i8: 37>} : (tensor<1x16xi32>) -> tensor<1x16xi8>
%11 = "tosa.clamp"(%10) {max_fp = 0.000000e+00 : f32, max_int = 127 : i64, min_fp = 0.000000e+00 : f32, min_int = -128 : i64} : (tensor<1x16xi8>) -> tensor<1x16xi8>
%12 = "tosa.fully_connected"(%11, %1, %0) {quantization_info = #tosa.conv_quant<input_zp = -128, weight_zp = 0>} : (tensor<1x16xi8>, tensor<1x16xi8>, tensor<1xi32>) -> tensor<1x1xi32>
%12 = "tosa.fully_connected"(%11, %1, %0) {input_zp = -128 : i32, weight_zp = 0 : i32} : (tensor<1x16xi8>, tensor<1x16xi8>, tensor<1xi32>) -> tensor<1x1xi32>
%13 = "tosa.rescale"(%12) {double_round = true, input_zp = 0 : i32, multiplier = array<i32: 1630361836>, output_zp = 5 : i32, per_channel = false, scale32 = true, shift = array<i8: 36>} : (tensor<1x1xi32>) -> tensor<1x1xi8>
// CHECK: return
return %13 : tensor<1x1xi8>
Expand Down
Loading

0 comments on commit b3fcf97

Please sign in to comment.