diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 2577af98e5e5..757efe4648a2 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -136,6 +136,9 @@ struct MissingFeatures { // AArch64 Neon builtin related. static bool buildNeonShiftVector() { return false; } + // ABIInfo queries. + static bool allowBFloatArgsAndRet() { return false; } + // Misc static bool cacheRecordLayouts() { return false; } static bool capturedByInit() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 5b74321d36f0..3380d8d3bbcf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -1603,7 +1603,7 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, mlir::Value buildNeonCall(unsigned int builtinID, CIRGenFunction &cgf, llvm::SmallVector argTypes, - llvm::SmallVector args, + llvm::SmallVectorImpl &args, llvm::StringRef intrinsicName, mlir::Type funcResTy, mlir::Location loc, bool isConstrainedFPIntrinsic = false, @@ -1640,6 +1640,55 @@ mlir::Value buildNeonCall(unsigned int builtinID, CIRGenFunction &cgf, } } +mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( + unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, + const char *nameHint, unsigned modifier, const CallExpr *e, + llvm::SmallVectorImpl &ops, cir::Address ptrOp0, + cir::Address ptrOp1, llvm::Triple::ArchType arch) { + // Get the last argument, which specifies the vector type. + const clang::Expr *arg = e->getArg(e->getNumArgs() - 1); + std::optional neonTypeConst = + arg->getIntegerConstantExpr(getContext()); + if (!neonTypeConst) + return nullptr; + + // Determine the type of this overloaded NEON intrinsic. + NeonTypeFlags neonType(neonTypeConst->getZExtValue()); + bool isUnsigned = neonType.isUnsigned(); + bool isQuad = neonType.isQuad(); + const bool hasLegalHalfType = getTarget().hasLegalHalfType(); + assert(!MissingFeatures::allowBFloatArgsAndRet()); + // The value of allowBFloatArgsAndRet is true for AArch64, but it should + // come from ABI info. + const bool allowBFloatArgsAndRet = true; + + mlir::Type vTy = GetNeonType(this, neonType, hasLegalHalfType, false, + allowBFloatArgsAndRet); + if (!vTy) + return nullptr; + + unsigned intrinicId = llvmIntrinsic; + if ((modifier & UnsignedAlts) && !isUnsigned) + intrinicId = altLLVMIntrinsic; + + switch (builtinID) { + default: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vqadd_v: + mlir::Value res = buildNeonCall(builtinID, *this, {vTy, vTy}, ops, + (intrinicId != altLLVMIntrinsic) + ? "llvm.aarch64.neon.uqadd" + : "llvm.aarch64.neon.sqadd", + vTy, getLoc(e->getExprLoc())); + mlir::Type resultType = ConvertType(e->getType()); + // AArch64 intrinsic one-element vector type cast to + // scalar type expected by the builtin + return builder.createBitcast(res, resultType); + break; + } + return nullptr; +} + mlir::Value CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, @@ -2352,9 +2401,11 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, // defer to common code if it's been added to our special map. Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, AArch64SIMDIntrinsicsProvenSorted); - if (Builtin) { - llvm_unreachable("NYI"); - } + if (Builtin) + return buildCommonNeonBuiltinExpr( + Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, + Builtin->NameHint, Builtin->TypeModifier, E, Ops, + /*never use addresses*/ Address::invalid(), Address::invalid(), Arch); if (mlir::Value V = buildAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 8156d8fad059..048a0c17e24a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -980,6 +980,11 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value buildARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch); + mlir::Value buildCommonNeonBuiltinExpr( + unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, + const char *nameHint, unsigned modifier, const CallExpr *e, + llvm::SmallVectorImpl &ops, cir::Address ptrOp0, + cir::Address ptrOp1, llvm::Triple::ArchType arch); mlir::Value buildAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc,