diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 110bedf5e456..7102b2da91d0 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -205,6 +205,19 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createSub(lhs, rhs, false, true); } + struct BinOpOverflowResults { + mlir::Value result; + mlir::Value overflow; + }; + + BinOpOverflowResults createBinOpOverflowOp(mlir::Location loc, + mlir::cir::IntType resultTy, + mlir::cir::BinOpOverflowKind kind, + mlir::Value lhs, mlir::Value rhs) { + auto op = create(loc, resultTy, kind, lhs, rhs); + return {op.getResult(), op.getOverflow()}; + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 1d29cf7863fa..508b8d7f330c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1077,6 +1077,64 @@ def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// BinOpOverflowOp +//===----------------------------------------------------------------------===// + +def BinOpOverflowKind : I32EnumAttr< + "BinOpOverflowKind", + "checked binary arithmetic operation kind", + [BinOpKind_Add, BinOpKind_Sub, BinOpKind_Mul]> { + let cppNamespace = "::mlir::cir"; +} + +def BinOpOverflowOp : CIR_Op<"binop.overflow", [Pure, SameTypeOperands]> { + let summary = "Perform binary integral arithmetic with overflow checking"; + let description = [{ + `cir.binop.overflow` performs binary arithmetic operations with overflow + checking on integral operands. + + The `kind` argument specifies the kind of arithmetic operation to perform. + It can be either `add`, `sub`, or `mul`. The `lhs` and `rhs` arguments + specify the input operands of the arithmetic operation. The types of `lhs` + and `rhs` must be the same. + + `cir.binop.overflow` produces two SSA values. `result` is the result of the + arithmetic operation truncated to its specified type. `overflow` is a + boolean value indicating whether overflow happens during the operation. + + The exact semantic of this operation is as follows: + + - `lhs` and `rhs` are promoted to an imaginary integral type that has + infinite precision. + - The arithmetic operation is performed on the promoted operands. + - The infinite-precision result is truncated to the type of `result`. The + truncated result is assigned to `result`. + - If the truncated result is equal to the un-truncated result, `overflow` + is assigned to false. Otherwise, `overflow` is assigned to true. + }]; + + let arguments = (ins Arg:$kind, + CIR_IntType:$lhs, CIR_IntType:$rhs); + let results = (outs CIR_IntType:$result, CIR_BoolType:$overflow); + + let assemblyFormat = [{ + `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) `,` + `(` type($result) `,` type($overflow) `)` + attr-dict + }]; + + let builders = [ + OpBuilder<(ins "mlir::cir::IntType":$resultTy, + "mlir::cir::BinOpOverflowKind":$kind, + "mlir::Value":$lhs, + "mlir::Value":$rhs), [{ + auto overflowTy = mlir::cir::BoolType::get($_builder.getContext()); + build($_builder, $_state, resultTy, overflowTy, kind, lhs, rhs); + }]> + ]; +} + //===----------------------------------------------------------------------===// // BitsOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 700f6b527f13..107ec77075cb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -91,6 +91,52 @@ static void initializeAlloca(CIRGenFunction &CGF, } } +namespace { +struct WidthAndSignedness { + unsigned Width; + bool Signed; +}; +} // namespace + +static WidthAndSignedness +getIntegerWidthAndSignedness(const clang::ASTContext &context, + const clang::QualType Type) { + assert(Type->isIntegerType() && "Given type is not an integer."); + unsigned Width = Type->isBooleanType() ? 1 + : Type->isBitIntType() ? context.getIntWidth(Type) + : context.getTypeInfo(Type).Width; + bool Signed = Type->isSignedIntegerType(); + return {Width, Signed}; +} + +// Given one or more integer types, this function produces an integer type that +// encompasses them: any value in one of the given types could be expressed in +// the encompassing type. +static struct WidthAndSignedness +EncompassingIntegerType(ArrayRef Types) { + assert(Types.size() > 0 && "Empty list of types."); + + // If any of the given types is signed, we must return a signed type. + bool Signed = false; + for (const auto &Type : Types) { + Signed |= Type.Signed; + } + + // The encompassing type must have a width greater than or equal to the width + // of the specified types. Additionally, if the encompassing type is signed, + // its width must be strictly greater than the width of any unsigned types + // given. + unsigned Width = 0; + for (const auto &Type : Types) { + unsigned MinWidth = Type.Width + (Signed && !Type.Signed); + if (Width < MinWidth) { + Width = MinWidth; + } + } + + return {Width, Signed}; +} + RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue) { @@ -705,6 +751,157 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get( builder.createBitcast(AllocaAddr, builder.getVoidPtrTy())); } + + case Builtin::BI__builtin_add_overflow: + case Builtin::BI__builtin_sub_overflow: + case Builtin::BI__builtin_mul_overflow: { + const clang::Expr *LeftArg = E->getArg(0); + const clang::Expr *RightArg = E->getArg(1); + const clang::Expr *ResultArg = E->getArg(2); + + clang::QualType ResultQTy = + ResultArg->getType()->castAs()->getPointeeType(); + + WidthAndSignedness LeftInfo = + getIntegerWidthAndSignedness(CGM.getASTContext(), LeftArg->getType()); + WidthAndSignedness RightInfo = + getIntegerWidthAndSignedness(CGM.getASTContext(), RightArg->getType()); + WidthAndSignedness ResultInfo = + getIntegerWidthAndSignedness(CGM.getASTContext(), ResultQTy); + + // Note we compute the encompassing type with the consideration to the + // result type, so later in LLVM lowering we don't get redundant integral + // extension casts. + WidthAndSignedness EncompassingInfo = + EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo}); + + auto EncompassingCIRTy = mlir::cir::IntType::get( + builder.getContext(), EncompassingInfo.Width, EncompassingInfo.Signed); + auto ResultCIRTy = + CGM.getTypes().ConvertType(ResultQTy).cast(); + + mlir::Value Left = buildScalarExpr(LeftArg); + mlir::Value Right = buildScalarExpr(RightArg); + Address ResultPtr = buildPointerWithAlignment(ResultArg); + + // Extend each operand to the encompassing type, if necessary. + if (Left.getType() != EncompassingCIRTy) + Left = builder.createCast(mlir::cir::CastKind::integral, Left, + EncompassingCIRTy); + if (Right.getType() != EncompassingCIRTy) + Right = builder.createCast(mlir::cir::CastKind::integral, Right, + EncompassingCIRTy); + + // Perform the operation on the extended values. + mlir::cir::BinOpOverflowKind OpKind; + switch (BuiltinID) { + default: + llvm_unreachable("Unknown overflow builtin id."); + case Builtin::BI__builtin_add_overflow: + OpKind = mlir::cir::BinOpOverflowKind::Add; + break; + case Builtin::BI__builtin_sub_overflow: + OpKind = mlir::cir::BinOpOverflowKind::Sub; + break; + case Builtin::BI__builtin_mul_overflow: + OpKind = mlir::cir::BinOpOverflowKind::Mul; + break; + } + + auto Loc = getLoc(E->getSourceRange()); + auto ArithResult = + builder.createBinOpOverflowOp(Loc, ResultCIRTy, OpKind, Left, Right); + + // Here is a slight difference from the original clang CodeGen: + // - In the original clang CodeGen, the checked arithmetic result is + // first computed as a value of the encompassing type, and then it is + // truncated to the actual result type with a second overflow checking. + // - In CIRGen, the checked arithmetic operation directly produce the + // checked arithmetic result in its expected type. + // + // So we don't need a truncation and a second overflow checking here. + + // Finally, store the result using the pointer. + bool isVolatile = + ResultArg->getType()->getPointeeType().isVolatileQualified(); + builder.createStore(Loc, buildToMemory(ArithResult.result, ResultQTy), + ResultPtr, isVolatile); + + return RValue::get(ArithResult.overflow); + } + + case Builtin::BI__builtin_uadd_overflow: + case Builtin::BI__builtin_uaddl_overflow: + case Builtin::BI__builtin_uaddll_overflow: + case Builtin::BI__builtin_usub_overflow: + case Builtin::BI__builtin_usubl_overflow: + case Builtin::BI__builtin_usubll_overflow: + case Builtin::BI__builtin_umul_overflow: + case Builtin::BI__builtin_umull_overflow: + case Builtin::BI__builtin_umulll_overflow: + case Builtin::BI__builtin_sadd_overflow: + case Builtin::BI__builtin_saddl_overflow: + case Builtin::BI__builtin_saddll_overflow: + case Builtin::BI__builtin_ssub_overflow: + case Builtin::BI__builtin_ssubl_overflow: + case Builtin::BI__builtin_ssubll_overflow: + case Builtin::BI__builtin_smul_overflow: + case Builtin::BI__builtin_smull_overflow: + case Builtin::BI__builtin_smulll_overflow: { + // Scalarize our inputs. + mlir::Value X = buildScalarExpr(E->getArg(0)); + mlir::Value Y = buildScalarExpr(E->getArg(1)); + + const clang::Expr *ResultArg = E->getArg(2); + Address ResultPtr = buildPointerWithAlignment(ResultArg); + + // Decide which of the arithmetic operation we are lowering to: + mlir::cir::BinOpOverflowKind ArithKind; + switch (BuiltinID) { + default: + llvm_unreachable("Unknown overflow builtin id."); + case Builtin::BI__builtin_uadd_overflow: + case Builtin::BI__builtin_uaddl_overflow: + case Builtin::BI__builtin_uaddll_overflow: + case Builtin::BI__builtin_sadd_overflow: + case Builtin::BI__builtin_saddl_overflow: + case Builtin::BI__builtin_saddll_overflow: + ArithKind = mlir::cir::BinOpOverflowKind::Add; + break; + case Builtin::BI__builtin_usub_overflow: + case Builtin::BI__builtin_usubl_overflow: + case Builtin::BI__builtin_usubll_overflow: + case Builtin::BI__builtin_ssub_overflow: + case Builtin::BI__builtin_ssubl_overflow: + case Builtin::BI__builtin_ssubll_overflow: + ArithKind = mlir::cir::BinOpOverflowKind::Sub; + break; + case Builtin::BI__builtin_umul_overflow: + case Builtin::BI__builtin_umull_overflow: + case Builtin::BI__builtin_umulll_overflow: + case Builtin::BI__builtin_smul_overflow: + case Builtin::BI__builtin_smull_overflow: + case Builtin::BI__builtin_smulll_overflow: + ArithKind = mlir::cir::BinOpOverflowKind::Mul; + break; + } + + clang::QualType ResultQTy = + ResultArg->getType()->castAs()->getPointeeType(); + auto ResultCIRTy = + CGM.getTypes().ConvertType(ResultQTy).cast(); + + auto Loc = getLoc(E->getSourceRange()); + auto ArithResult = + builder.createBinOpOverflowOp(Loc, ResultCIRTy, ArithKind, X, Y); + + bool isVolatile = + ResultArg->getType()->getPointeeType().isVolatileQualified(); + builder.createStore(Loc, buildToMemory(ArithResult.result, ResultQTy), + ResultPtr, isVolatile); + + return RValue::get(ArithResult.overflow); + } } // If this is an alias for a lib function (e.g. __builtin_sin), emit diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6cc7fc75001c..5fd87badbbee 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1950,6 +1950,135 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { } }; +class CIRBinOpOverflowOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BinOpOverflowOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto loc = op.getLoc(); + auto arithKind = op.getKind(); + auto operandTy = op.getLhs().getType(); + auto resultTy = op.getResult().getType(); + + auto encompassedTyInfo = computeEncompassedTypeWidth(operandTy, resultTy); + auto encompassedLLVMTy = rewriter.getIntegerType(encompassedTyInfo.width); + + auto lhs = adaptor.getLhs(); + auto rhs = adaptor.getRhs(); + if (operandTy.getWidth() < encompassedTyInfo.width) { + if (operandTy.isSigned()) { + lhs = rewriter.create(loc, encompassedLLVMTy, lhs); + rhs = rewriter.create(loc, encompassedLLVMTy, rhs); + } else { + lhs = rewriter.create(loc, encompassedLLVMTy, lhs); + rhs = rewriter.create(loc, encompassedLLVMTy, rhs); + } + } + + auto intrinName = getLLVMIntrinName(arithKind, encompassedTyInfo.sign, + encompassedTyInfo.width); + auto intrinNameAttr = mlir::StringAttr::get(op.getContext(), intrinName); + + auto overflowLLVMTy = rewriter.getI1Type(); + auto intrinRetTy = mlir::LLVM::LLVMStructType::getLiteral( + rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy}); + + auto callLLVMIntrinOp = rewriter.create( + loc, intrinRetTy, intrinNameAttr, mlir::ValueRange{lhs, rhs}); + auto intrinRet = callLLVMIntrinOp.getResult(0); + + auto result = rewriter + .create(loc, intrinRet, + ArrayRef{0}) + .getResult(); + auto overflow = rewriter + .create( + loc, intrinRet, ArrayRef{1}) + .getResult(); + + if (resultTy.getWidth() < encompassedTyInfo.width) { + auto resultLLVMTy = getTypeConverter()->convertType(resultTy); + auto truncResult = + rewriter.create(loc, resultLLVMTy, result); + + // Extend the truncated result back to the encompassing type to check for + // any overflows during the truncation. + mlir::Value truncResultExt; + if (resultTy.isSigned()) + truncResultExt = rewriter.create( + loc, encompassedLLVMTy, truncResult); + else + truncResultExt = rewriter.create( + loc, encompassedLLVMTy, truncResult); + auto truncOverflow = rewriter.create( + loc, mlir::LLVM::ICmpPredicate::ne, truncResultExt, result); + + result = truncResult; + overflow = + rewriter.create(loc, overflow, truncOverflow); + } + + auto boolLLVMTy = + getTypeConverter()->convertType(op.getOverflow().getType()); + if (boolLLVMTy != rewriter.getI1Type()) + overflow = rewriter.create(loc, boolLLVMTy, overflow); + + rewriter.replaceOp(op, mlir::ValueRange{result, overflow}); + + return mlir::success(); + } + +private: + static std::string getLLVMIntrinName(mlir::cir::BinOpOverflowKind opKind, + bool isSigned, unsigned width) { + // The intrinsic name is `@llvm.{s|u}{opKind}.with.overflow.i{width}` + + std::string name = "llvm."; + + if (isSigned) + name.push_back('s'); + else + name.push_back('u'); + + switch (opKind) { + case mlir::cir::BinOpOverflowKind::Add: + name.append("add."); + break; + case mlir::cir::BinOpOverflowKind::Sub: + name.append("sub."); + break; + case mlir::cir::BinOpOverflowKind::Mul: + name.append("mul."); + break; + default: + llvm_unreachable("unknown checked arith kind"); + } + + name.append("with.overflow.i"); + name.append(std::to_string(width)); + + return name; + } + + struct EncompassedTypeInfo { + bool sign; + unsigned width; + }; + + static EncompassedTypeInfo + computeEncompassedTypeWidth(mlir::cir::IntType operandTy, + mlir::cir::IntType resultTy) { + auto sign = operandTy.getIsSigned() || resultTy.getIsSigned(); + auto width = + std::max(operandTy.getWidth() + (sign && operandTy.isUnsigned()), + resultTy.getWidth() + (sign && resultTy.isUnsigned())); + return {sign, width}; + } +}; + class CIRShiftOpLowering : public mlir::OpConversionPattern { public: @@ -3003,13 +3132,14 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRBitPopcountOpLowering, CIRAtomicCmpXchgLowering, CIRAtomicXchgLowering, CIRAtomicFetchLowering, CIRByteswapOpLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, - CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, - CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, - CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, - CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, - CIRBrOpLowering, CIRGetMemberOpLowering, CIRSwitchFlatOpLowering, - CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, - CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, + CIRBinOpLowering, CIRBinOpOverflowOpLowering, CIRShiftOpLowering, + CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, + CIRFuncLowering, CIRCastOpLowering, CIRGlobalOpLowering, + CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, + CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, + CIRGetMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, + CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, + CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, diff --git a/clang/test/CIR/CodeGen/builtins-overflow.cpp b/clang/test/CIR/CodeGen/builtins-overflow.cpp new file mode 100644 index 000000000000..d4652527cb56 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtins-overflow.cpp @@ -0,0 +1,364 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +bool test_add_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) { + return __builtin_add_overflow(x, y, res); +} + +// CHECK: cir.func @_Z32test_add_overflow_uint_uint_uintjjPj +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CHECK: } + +bool test_add_overflow_int_int_int(int x, int y, int *res) { + return __builtin_add_overflow(x, y, res); +} + +// CHECK: cir.func @_Z29test_add_overflow_int_int_intiiPi +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_add_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitInt(31) *res) { + return __builtin_add_overflow(x, y, res); +} + +// CHECK: cir.func @_Z38test_add_overflow_xint31_xint31_xint31DB31_S_PS_ +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : , (, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !cir.int, !cir.ptr> +// CHECK: } + +bool test_sub_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) { + return __builtin_sub_overflow(x, y, res); +} + +// CHECK: cir.func @_Z32test_sub_overflow_uint_uint_uintjjPj +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CHECK: } + +bool test_sub_overflow_int_int_int(int x, int y, int *res) { + return __builtin_sub_overflow(x, y, res); +} + +// CHECK: cir.func @_Z29test_sub_overflow_int_int_intiiPi +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_sub_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitInt(31) *res) { + return __builtin_sub_overflow(x, y, res); +} + +// CHECK: cir.func @_Z38test_sub_overflow_xint31_xint31_xint31DB31_S_PS_ +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : , (, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !cir.int, !cir.ptr> +// CHECK: } + +bool test_mul_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) { + return __builtin_mul_overflow(x, y, res); +} + +// CHECK: cir.func @_Z32test_mul_overflow_uint_uint_uintjjPj +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CHECK: } + +bool test_mul_overflow_int_int_int(int x, int y, int *res) { + return __builtin_mul_overflow(x, y, res); +} + +// CHECK: cir.func @_Z29test_mul_overflow_int_int_intiiPi +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_mul_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitInt(31) *res) { + return __builtin_mul_overflow(x, y, res); +} + +// CHECK: cir.func @_Z38test_mul_overflow_xint31_xint31_xint31DB31_S_PS_ +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : , (, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !cir.int, !cir.ptr> +// CHECK: } + +bool test_mul_overflow_ulong_ulong_long(unsigned long x, unsigned long y, unsigned long *res) { + return __builtin_mul_overflow(x, y, res); +} + +// CHECK: cir.func @_Z34test_mul_overflow_ulong_ulong_longmmPm +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_add_overflow_uint_int_int(unsigned x, int y, int *res) { + return __builtin_add_overflow(x, y, res); +} + +// CHECK: cir.func @_Z30test_add_overflow_uint_int_intjiPi +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[#PROM_X:]] = cir.cast(integral, %[[#X]] : !u32i), !cir.int +// CHECK-NEXT: %[[#PROM_Y:]] = cir.cast(integral, %[[#Y]] : !s32i), !cir.int +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#PROM_X]], %[[#PROM_Y]]) : , (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_add_overflow_volatile(int x, int y, volatile int *res) { + return __builtin_add_overflow(x, y, res); +} + +// CHECK: cir.func @_Z26test_add_overflow_volatileiiPVi +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store volatile %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_uadd_overflow(unsigned x, unsigned y, unsigned *res) { + return __builtin_uadd_overflow(x, y, res); +} + +// CHECK: cir.func @_Z18test_uadd_overflowjjPj +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CHECK: } + +bool test_uaddl_overflow(unsigned long x, unsigned long y, unsigned long *res) { + return __builtin_uaddl_overflow(x, y, res); +} + +// CHECK: cir.func @_Z19test_uaddl_overflowmmPm +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_uaddll_overflow(unsigned long long x, unsigned long long y, unsigned long long *res) { + return __builtin_uaddll_overflow(x, y, res); +} + +// CHECK: cir.func @_Z20test_uaddll_overflowyyPy +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_usub_overflow(unsigned x, unsigned y, unsigned *res) { + return __builtin_usub_overflow(x, y, res); +} + +// CHECK: cir.func @_Z18test_usub_overflowjjPj +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CHECK: } + +bool test_usubl_overflow(unsigned long x, unsigned long y, unsigned long *res) { + return __builtin_usubl_overflow(x, y, res); +} + +// CHECK: cir.func @_Z19test_usubl_overflowmmPm +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_usubll_overflow(unsigned long long x, unsigned long long y, unsigned long long *res) { + return __builtin_usubll_overflow(x, y, res); +} + +// CHECK: cir.func @_Z20test_usubll_overflowyyPy +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_umul_overflow(unsigned x, unsigned y, unsigned *res) { + return __builtin_umul_overflow(x, y, res); +} + +// CHECK: cir.func @_Z18test_umul_overflowjjPj +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CHECK: } + +bool test_umull_overflow(unsigned long x, unsigned long y, unsigned long *res) { + return __builtin_umull_overflow(x, y, res); +} + +// CHECK: cir.func @_Z19test_umull_overflowmmPm +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_umulll_overflow(unsigned long long x, unsigned long long y, unsigned long long *res) { + return __builtin_umulll_overflow(x, y, res); +} + +// CHECK: cir.func @_Z20test_umulll_overflowyyPy +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_sadd_overflow(int x, int y, int *res) { + return __builtin_sadd_overflow(x, y, res); +} + +// CHECK: cir.func @_Z18test_sadd_overflowiiPi +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_saddl_overflow(long x, long y, long *res) { + return __builtin_saddl_overflow(x, y, res); +} + +// CHECK: cir.func @_Z19test_saddl_overflowllPl +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CHECK: } + +bool test_saddll_overflow(long long x, long long y, long long *res) { + return __builtin_saddll_overflow(x, y, res); +} + +// CHECK: cir.func @_Z20test_saddll_overflowxxPx +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CHECK: } + +bool test_ssub_overflow(int x, int y, int *res) { + return __builtin_ssub_overflow(x, y, res); +} + +// CHECK: cir.func @_Z18test_ssub_overflowiiPi +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_ssubl_overflow(long x, long y, long *res) { + return __builtin_ssubl_overflow(x, y, res); +} + +// CHECK: cir.func @_Z19test_ssubl_overflowllPl +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CHECK: } + +bool test_ssubll_overflow(long long x, long long y, long long *res) { + return __builtin_ssubll_overflow(x, y, res); +} + +// CHECK: cir.func @_Z20test_ssubll_overflowxxPx +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CHECK: } + +bool test_smul_overflow(int x, int y, int *res) { + return __builtin_smul_overflow(x, y, res); +} + +// CHECK: cir.func @_Z18test_smul_overflowiiPi +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_smull_overflow(long x, long y, long *res) { + return __builtin_smull_overflow(x, y, res); +} + +// CHECK: cir.func @_Z19test_smull_overflowllPl +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CHECK: } + +bool test_smulll_overflow(long long x, long long y, long long *res) { + return __builtin_smulll_overflow(x, y, res); +} + +// CHECK: cir.func @_Z20test_smulll_overflowxxPx +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CHECK: } diff --git a/clang/test/CIR/Lowering/binop-overflow.cir b/clang/test/CIR/Lowering/binop-overflow.cir new file mode 100644 index 000000000000..c73e708e5320 --- /dev/null +++ b/clang/test/CIR/Lowering/binop-overflow.cir @@ -0,0 +1,67 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM + +!u32i = !cir.int +!s32i = !cir.int + +module { + cir.func @test_add_u32_u32_u32(%lhs: !u32i, %rhs: !u32i, %res: !cir.ptr) -> !cir.bool { + %result, %overflow = cir.binop.overflow(add, %lhs, %rhs) : !u32i, (!u32i, !cir.bool) + cir.store %result, %res : !u32i, !cir.ptr + cir.return %overflow : !cir.bool + } + + // MLIR: llvm.func @test_add_u32_u32_u32(%[[LHS:.+]]: i32, %[[RHS:.+]]: i32, %[[RES_PTR:.+]]: !llvm.ptr) -> i8 + // MLIR-NEXT: %[[#INTRIN_RET:]] = llvm.call_intrinsic "llvm.uadd.with.overflow.i32"(%[[LHS]], %[[RHS]]) : (i32, i32) -> !llvm.struct<(i32, i1)> + // MLIR-NEXT: %[[#RES:]] = llvm.extractvalue %[[#INTRIN_RET]][0] : !llvm.struct<(i32, i1)> + // MLIR-NEXT: %[[#OVFL:]] = llvm.extractvalue %[[#INTRIN_RET]][1] : !llvm.struct<(i32, i1)> + // MLIR-NEXT: %[[#OVFL_EXT:]] = llvm.zext %[[#OVFL]] : i1 to i8 + // MLIR-NEXT: llvm.store %[[#RES]], %[[RES_PTR]] : i32, !llvm.ptr + // MLIR-NEXT: llvm.return %[[#OVFL_EXT]] : i8 + // MLIR-NEXT: } + + // LLVM: define i8 @test_add_u32_u32_u32(i32 %[[#LHS:]], i32 %[[#RHS:]], ptr %[[#RES_PTR:]]) + // LLVM-NEXT: %[[#INTRIN_RET:]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %[[#LHS]], i32 %[[#RHS]]) + // LLVM-NEXT: %[[#RES:]] = extractvalue { i32, i1 } %[[#INTRIN_RET]], 0 + // LLVM-NEXT: %[[#OVFL:]] = extractvalue { i32, i1 } %[[#INTRIN_RET]], 1 + // LLVM-NEXT: %[[#OVFL_EXT:]] = zext i1 %[[#OVFL]] to i8 + // LLVM-NEXT: store i32 %[[#RES]], ptr %[[#RES_PTR]], align 4 + // LLVM-NEXT: ret i8 %[[#OVFL_EXT]] + // LLVM-NEXT: } + + cir.func @test_add_u32_u32_i32(%lhs: !u32i, %rhs: !u32i, %res: !cir.ptr) -> !cir.bool { + %result, %overflow = cir.binop.overflow(add, %lhs, %rhs) : !u32i, (!s32i, !cir.bool) + cir.store %result, %res : !s32i, !cir.ptr + cir.return %overflow : !cir.bool + } + + // MLIR: llvm.func @test_add_u32_u32_i32(%[[LHS:.+]]: i32, %[[RHS:.+]]: i32, %[[RES_PTR:.+]]: !llvm.ptr) -> i8 + // MLIR-NEXT: %[[#LHS_EXT:]] = llvm.zext %[[LHS]] : i32 to i33 + // MLIR-NEXT: %[[#RHS_EXT:]] = llvm.zext %[[RHS]] : i32 to i33 + // MLIR-NEXT: %[[#INTRIN_RET:]] = llvm.call_intrinsic "llvm.sadd.with.overflow.i33"(%[[#LHS_EXT]], %[[#RHS_EXT]]) : (i33, i33) -> !llvm.struct<(i33, i1)> + // MLIR-NEXT: %[[#RES_EXT:]] = llvm.extractvalue %[[#INTRIN_RET]][0] : !llvm.struct<(i33, i1)> + // MLIR-NEXT: %[[#ARITH_OVFL:]] = llvm.extractvalue %[[#INTRIN_RET]][1] : !llvm.struct<(i33, i1)> + // MLIR-NEXT: %[[#RES:]] = llvm.trunc %[[#RES_EXT]] : i33 to i32 + // MLIR-NEXT: %[[#RES_EXT_2:]] = llvm.sext %[[#RES]] : i32 to i33 + // MLIR-NEXT: %[[#TRUNC_OVFL:]] = llvm.icmp "ne" %[[#RES_EXT_2]], %[[#RES_EXT]] : i33 + // MLIR-NEXT: %[[#OVFL:]] = llvm.or %[[#ARITH_OVFL]], %[[#TRUNC_OVFL]] : i1 + // MLIR-NEXT: %[[#OVFL_EXT:]] = llvm.zext %[[#OVFL]] : i1 to i8 + // MLIR-NEXT: llvm.store %[[#RES]], %[[RES_PTR]] : i32, !llvm.ptr + // MLIR-NEXT: llvm.return %[[#OVFL_EXT]] : i8 + // MLIR-NEXT: } + + // LLVM: define i8 @test_add_u32_u32_i32(i32 %[[#LHS:]], i32 %[[#RHS:]], ptr %[[#RES_PTR:]]) + // LLVM-NEXT: %[[#LHS_EXT:]] = zext i32 %[[#LHS]] to i33 + // LLVM-NEXT: %[[#RHS_EXT:]] = zext i32 %[[#RHS]] to i33 + // LLVM-NEXT: %[[#INTRIN_RET:]] = call { i33, i1 } @llvm.sadd.with.overflow.i33(i33 %[[#LHS_EXT]], i33 %[[#RHS_EXT]]) + // LLVM-NEXT: %[[#RES_EXT:]] = extractvalue { i33, i1 } %[[#INTRIN_RET]], 0 + // LLVM-NEXT: %[[#ARITH_OVFL:]] = extractvalue { i33, i1 } %[[#INTRIN_RET]], 1 + // LLVM-NEXT: %[[#RES:]] = trunc i33 %[[#RES_EXT]] to i32 + // LLVM-NEXT: %[[#RES_EXT_2:]] = sext i32 %[[#RES]] to i33 + // LLVM-NEXT: %[[#TRUNC_OVFL:]] = icmp ne i33 %[[#RES_EXT_2]], %[[#RES_EXT]] + // LLVM-NEXT: %[[#OVFL:]] = or i1 %[[#ARITH_OVFL]], %[[#TRUNC_OVFL]] + // LLVM-NEXT: %[[#OVFL_EXT:]] = zext i1 %[[#OVFL]] to i8 + // LLVM-NEXT: store i32 %[[#RES]], ptr %[[#RES_PTR]], align 4 + // LLVM-NEXT: ret i8 %[[#OVFL_EXT]] + // LLVM-NEXT: } +}