diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp index 8af4bc658409d4..45e19cdea300b1 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp @@ -34,15 +34,14 @@ struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner { // Whether this is assigning args for a return. bool IsRet; - RVVArgDispatcher &RVVDispatcher; + // true if assignArg has been called for a mask argument, false otherwise. + bool AssignedFirstMaskArg = false; public: RISCVOutgoingValueAssigner( - RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet, - RVVArgDispatcher &RVVDispatcher) + RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet) : CallLowering::OutgoingValueAssigner(nullptr), - RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet), - RVVDispatcher(RVVDispatcher) {} + RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {} bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, @@ -52,9 +51,16 @@ struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner { const DataLayout &DL = MF.getDataLayout(); const RISCVSubtarget &Subtarget = MF.getSubtarget(); + std::optional FirstMaskArgument; + if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg && + ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) { + FirstMaskArgument = ValNo; + AssignedFirstMaskArg = true; + } + if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT, LocInfo, Flags, State, Info.IsFixed, IsRet, Info.Ty, - *Subtarget.getTargetLowering(), RVVDispatcher)) + *Subtarget.getTargetLowering(), FirstMaskArgument)) return true; StackSize = State.getStackSize(); @@ -175,15 +181,14 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner { // Whether this is assigning args from a return. bool IsRet; - RVVArgDispatcher &RVVDispatcher; + // true if assignArg has been called for a mask argument, false otherwise. + bool AssignedFirstMaskArg = false; public: RISCVIncomingValueAssigner( - RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet, - RVVArgDispatcher &RVVDispatcher) + RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet) : CallLowering::IncomingValueAssigner(nullptr), - RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet), - RVVDispatcher(RVVDispatcher) {} + RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {} bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, @@ -196,9 +201,16 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner { if (LocVT.isScalableVector()) MF.getInfo()->setIsVectorCall(); + std::optional FirstMaskArgument; + if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg && + ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) { + FirstMaskArgument = ValNo; + AssignedFirstMaskArg = true; + } + if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT, LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty, - *Subtarget.getTargetLowering(), RVVDispatcher)) + *Subtarget.getTargetLowering(), FirstMaskArgument)) return true; StackSize = State.getStackSize(); @@ -408,11 +420,9 @@ bool RISCVCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder, SmallVector SplitRetInfos; splitToValueTypes(OrigRetInfo, SplitRetInfos, DL, CC); - RVVArgDispatcher Dispatcher{&MF, getTLI(), - F.getReturnType()}; RISCVOutgoingValueAssigner Assigner( CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV, - /*IsRet=*/true, Dispatcher); + /*IsRet=*/true); RISCVOutgoingValueHandler Handler(MIRBuilder, MF.getRegInfo(), Ret); return determineAndHandleAssignments(Handler, Assigner, SplitRetInfos, MIRBuilder, CC, F.isVarArg()); @@ -521,7 +531,6 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, CallingConv::ID CC = F.getCallingConv(); SmallVector SplitArgInfos; - SmallVector TypeList; unsigned Index = 0; for (auto &Arg : F.args()) { // Construct the ArgInfo object from destination register and argument type. @@ -533,15 +542,12 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, // correspondingly and appended to SplitArgInfos. splitToValueTypes(AInfo, SplitArgInfos, DL, CC); - TypeList.push_back(Arg.getType()); - ++Index; } - RVVArgDispatcher Dispatcher{&MF, getTLI(), TypeList}; RISCVIncomingValueAssigner Assigner( CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV, - /*IsRet=*/false, Dispatcher); + /*IsRet=*/false); RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo()); SmallVector ArgLocs; @@ -579,13 +585,11 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, SmallVector SplitArgInfos; SmallVector Outs; - SmallVector TypeList; for (auto &AInfo : Info.OrigArgs) { // Handle any required unmerging of split value types from a given VReg into // physical registers. ArgInfo objects are constructed correspondingly and // appended to SplitArgInfos. splitToValueTypes(AInfo, SplitArgInfos, DL, CC); - TypeList.push_back(AInfo.Ty); } // TODO: Support tail calls. @@ -603,10 +607,9 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); Call.addRegMask(TRI->getCallPreservedMask(MF, Info.CallConv)); - RVVArgDispatcher ArgDispatcher{&MF, getTLI(), TypeList}; RISCVOutgoingValueAssigner ArgAssigner( CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV, - /*IsRet=*/false, ArgDispatcher); + /*IsRet=*/false); RISCVOutgoingValueHandler ArgHandler(MIRBuilder, MF.getRegInfo(), Call); if (!determineAndHandleAssignments(ArgHandler, ArgAssigner, SplitArgInfos, MIRBuilder, CC, Info.IsVarArg)) @@ -634,11 +637,9 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, SmallVector SplitRetInfos; splitToValueTypes(Info.OrigRet, SplitRetInfos, DL, CC); - RVVArgDispatcher RetDispatcher{&MF, getTLI(), - F.getReturnType()}; RISCVIncomingValueAssigner RetAssigner( CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV, - /*IsRet=*/true, RetDispatcher); + /*IsRet=*/true); RISCVCallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), Call); if (!determineAndHandleAssignments(RetHandler, RetAssigner, SplitRetInfos, MIRBuilder, CC, Info.IsVarArg)) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 3e7bc8c2367de6..5a572002091ff3 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -22,7 +22,6 @@ #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/MemoryLocation.h" #include "llvm/Analysis/VectorUtils.h" -#include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -18079,12 +18078,33 @@ static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, return false; } +static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo, + std::optional FirstMaskArgument, + CCState &State, const RISCVTargetLowering &TLI) { + const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); + if (RC == &RISCV::VRRegClass) { + // Assign the first mask argument to V0. + // This is an interim calling convention and it may be changed in the + // future. + if (FirstMaskArgument && ValNo == *FirstMaskArgument) + return State.AllocateReg(RISCV::V0); + return State.AllocateReg(ArgVRs); + } + if (RC == &RISCV::VRM2RegClass) + return State.AllocateReg(ArgVRM2s); + if (RC == &RISCV::VRM4RegClass) + return State.AllocateReg(ArgVRM4s); + if (RC == &RISCV::VRM8RegClass) + return State.AllocateReg(ArgVRM8s); + llvm_unreachable("Unhandled register class for ValueType"); +} + // Implements the RISC-V calling convention. Returns true upon failure. bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, - RVVArgDispatcher &RVVDispatcher) { + std::optional FirstMaskArgument) { unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); assert(XLen == 32 || XLen == 64); MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; @@ -18253,7 +18273,7 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, else if (ValVT == MVT::f64 && !UseGPRForF64) Reg = State.AllocateReg(ArgFPR64s); else if (ValVT.isVector()) { - Reg = RVVDispatcher.getNextPhysReg(); + Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI); if (!Reg) { // For return values, the vector must be passed fully via registers or // via the stack. @@ -18339,13 +18359,9 @@ void RISCVTargetLowering::analyzeInputArgs( unsigned NumArgs = Ins.size(); FunctionType *FType = MF.getFunction().getFunctionType(); - SmallVector TypeList; - if (IsRet) - TypeList.push_back(MF.getFunction().getReturnType()); - else - for (const Argument &Arg : MF.getFunction().args()) - TypeList.push_back(Arg.getType()); - RVVArgDispatcher Dispatcher{&MF, this, TypeList}; + std::optional FirstMaskArgument; + if (Subtarget.hasVInstructions()) + FirstMaskArgument = preAssignMask(Ins); for (unsigned i = 0; i != NumArgs; ++i) { MVT ArgVT = Ins[i].VT; @@ -18360,7 +18376,7 @@ void RISCVTargetLowering::analyzeInputArgs( RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI(); if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this, - Dispatcher)) { + FirstMaskArgument)) { LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " << ArgVT << '\n'); llvm_unreachable(nullptr); @@ -18374,13 +18390,9 @@ void RISCVTargetLowering::analyzeOutputArgs( CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const { unsigned NumArgs = Outs.size(); - SmallVector TypeList; - if (IsRet) - TypeList.push_back(MF.getFunction().getReturnType()); - else if (CLI) - for (const TargetLowering::ArgListEntry &Arg : CLI->getArgs()) - TypeList.push_back(Arg.Ty); - RVVArgDispatcher Dispatcher{&MF, this, TypeList}; + std::optional FirstMaskArgument; + if (Subtarget.hasVInstructions()) + FirstMaskArgument = preAssignMask(Outs); for (unsigned i = 0; i != NumArgs; i++) { MVT ArgVT = Outs[i].VT; @@ -18390,7 +18402,7 @@ void RISCVTargetLowering::analyzeOutputArgs( RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI(); if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this, - Dispatcher)) { + FirstMaskArgument)) { LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " << ArgVT << "\n"); llvm_unreachable(nullptr); @@ -18571,7 +18583,7 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, - RVVArgDispatcher &RVVDispatcher) { + std::optional FirstMaskArgument) { if (LocVT == MVT::i32 || LocVT == MVT::i64) { if (unsigned Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); @@ -18649,14 +18661,13 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, } if (LocVT.isVector()) { - MCPhysReg AllocatedVReg = RVVDispatcher.getNextPhysReg(); - if (AllocatedVReg) { + if (unsigned Reg = + allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) { // Fixed-length vectors are located in the corresponding scalable-vector // container types. if (ValVT.isFixedLengthVector()) LocVT = TLI.getContainerForFixedLengthVector(LocVT); - State.addLoc( - CCValAssign::getReg(ValNo, ValVT, AllocatedVReg, LocVT, LocInfo)); + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); } else { // Try and pass the address via a "fast" GPR. if (unsigned GPRReg = State.AllocateReg(getFastCCArgGPRs(ABI))) { @@ -19284,15 +19295,17 @@ bool RISCVTargetLowering::CanLowerReturn( SmallVector RVLocs; CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); - RVVArgDispatcher Dispatcher{&MF, this, MF.getFunction().getReturnType()}; + std::optional FirstMaskArgument; + if (Subtarget.hasVInstructions()) + FirstMaskArgument = preAssignMask(Outs); for (unsigned i = 0, e = Outs.size(); i != e; ++i) { MVT VT = Outs[i].VT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI(); if (RISCV::CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, - ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, - nullptr, *this, Dispatcher)) + ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, + *this, FirstMaskArgument)) return false; } return true; @@ -21089,119 +21102,6 @@ unsigned RISCVTargetLowering::getMinimumJumpTableEntries() const { return Subtarget.getMinimumJumpTableEntries(); } -void RVVArgDispatcher::constructArgInfos(ArrayRef TypeList) { - const DataLayout &DL = MF->getDataLayout(); - const Function &F = MF->getFunction(); - LLVMContext &Context = F.getContext(); - - bool FirstVMaskAssigned = false; - for (Type *Ty : TypeList) { - StructType *STy = dyn_cast(Ty); - if (STy && STy->containsHomogeneousScalableVectorTypes()) { - Type *ElemTy = STy->getTypeAtIndex(0U); - EVT VT = TLI->getValueType(DL, ElemTy); - MVT RegisterVT = - TLI->getRegisterTypeForCallingConv(Context, F.getCallingConv(), VT); - - RVVArgInfos.push_back({STy->getNumElements(), RegisterVT, false}); - } else { - SmallVector ValueVTs; - ComputeValueVTs(*TLI, DL, Ty, ValueVTs); - - for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues; - ++Value) { - EVT VT = ValueVTs[Value]; - MVT RegisterVT = - TLI->getRegisterTypeForCallingConv(Context, F.getCallingConv(), VT); - unsigned NumRegs = - TLI->getNumRegistersForCallingConv(Context, F.getCallingConv(), VT); - - // Skip non-RVV register type - if (!RegisterVT.isVector()) - continue; - - if (RegisterVT.isFixedLengthVector()) - RegisterVT = TLI->getContainerForFixedLengthVector(RegisterVT); - - if (!FirstVMaskAssigned && - RegisterVT.getVectorElementType() == MVT::i1) { - RVVArgInfos.push_back({1, RegisterVT, true}); - FirstVMaskAssigned = true; - --NumRegs; - } - - RVVArgInfos.insert(RVVArgInfos.end(), NumRegs, {1, RegisterVT, false}); - } - } - } -} - -void RVVArgDispatcher::allocatePhysReg(unsigned NF, unsigned LMul, - unsigned StartReg) { - assert((StartReg % LMul) == 0 && - "Start register number should be multiple of lmul"); - const MCPhysReg *VRArrays; - switch (LMul) { - default: - report_fatal_error("Invalid lmul"); - case 1: - VRArrays = ArgVRs; - break; - case 2: - VRArrays = ArgVRM2s; - break; - case 4: - VRArrays = ArgVRM4s; - break; - case 8: - VRArrays = ArgVRM8s; - break; - } - - for (unsigned i = 0; i < NF; ++i) - if (StartReg) - AllocatedPhysRegs.push_back(VRArrays[(StartReg - 8) / LMul + i]); - else - AllocatedPhysRegs.push_back(MCPhysReg()); -} - -/// This function determines if each RVV argument is passed by register, if the -/// argument can be assigned to a VR, then give it a specific register. -/// Otherwise, assign the argument to 0 which is a invalid MCPhysReg. -void RVVArgDispatcher::compute() { - uint32_t AssignedMap = 0; - auto allocate = [&](const RVVArgInfo &ArgInfo) { - // Allocate first vector mask argument to V0. - if (ArgInfo.FirstVMask) { - AllocatedPhysRegs.push_back(RISCV::V0); - return; - } - - unsigned RegsNeeded = divideCeil( - ArgInfo.VT.getSizeInBits().getKnownMinValue(), RISCV::RVVBitsPerBlock); - unsigned TotalRegsNeeded = ArgInfo.NF * RegsNeeded; - for (unsigned StartReg = 0; StartReg + TotalRegsNeeded <= NumArgVRs; - StartReg += RegsNeeded) { - uint32_t Map = ((1 << TotalRegsNeeded) - 1) << StartReg; - if ((AssignedMap & Map) == 0) { - allocatePhysReg(ArgInfo.NF, RegsNeeded, StartReg + 8); - AssignedMap |= Map; - return; - } - } - - allocatePhysReg(ArgInfo.NF, RegsNeeded, 0); - }; - - for (unsigned i = 0; i < RVVArgInfos.size(); ++i) - allocate(RVVArgInfos[i]); -} - -MCPhysReg RVVArgDispatcher::getNextPhysReg() { - assert(CurIdx < AllocatedPhysRegs.size() && "Index out of range"); - return AllocatedPhysRegs[CurIdx++]; -} - namespace llvm::RISCVVIntrinsicsTable { #define GET_RISCVVIntrinsicsTable_IMPL diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index c28552354bf422..ace5b3fd2b95b4 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -24,7 +24,6 @@ namespace llvm { class InstructionCost; class RISCVSubtarget; struct RISCVRegisterInfo; -class RVVArgDispatcher; namespace RISCVISD { // clang-format off @@ -876,7 +875,7 @@ class RISCVTargetLowering : public TargetLowering { ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, - RVVArgDispatcher &RVVDispatcher); + std::optional FirstMaskArgument); private: void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo, @@ -1016,68 +1015,19 @@ class RISCVTargetLowering : public TargetLowering { unsigned getMinimumJumpTableEntries() const override; }; -/// As per the spec, the rules for passing vector arguments are as follows: -/// -/// 1. For the first vector mask argument, use v0 to pass it. -/// 2. For vector data arguments or rest vector mask arguments, starting from -/// the v8 register, if a vector register group between v8-v23 that has not been -/// allocated can be found and the first register number is a multiple of LMUL, -/// then allocate this vector register group to the argument and mark these -/// registers as allocated. Otherwise, pass it by reference and are replaced in -/// the argument list with the address. -/// 3. For tuple vector data arguments, starting from the v8 register, if -/// NFIELDS consecutive vector register groups between v8-v23 that have not been -/// allocated can be found and the first register number is a multiple of LMUL, -/// then allocate these vector register groups to the argument and mark these -/// registers as allocated. Otherwise, pass it by reference and are replaced in -/// the argument list with the address. -class RVVArgDispatcher { -public: - static constexpr unsigned NumArgVRs = 16; - - struct RVVArgInfo { - unsigned NF; - MVT VT; - bool FirstVMask = false; - }; - - RVVArgDispatcher(const MachineFunction *MF, const RISCVTargetLowering *TLI, - ArrayRef TypeList) - : MF(MF), TLI(TLI) { - constructArgInfos(TypeList); - compute(); - } - - MCPhysReg getNextPhysReg(); - -private: - SmallVector RVVArgInfos; - SmallVector AllocatedPhysRegs; - - const MachineFunction *MF = nullptr; - const RISCVTargetLowering *TLI = nullptr; - - unsigned CurIdx = 0; - - void constructArgInfos(ArrayRef TypeList); - void compute(); - void allocatePhysReg(unsigned NF = 1, unsigned LMul = 1, - unsigned StartReg = 0); -}; - namespace RISCV { bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, - RVVArgDispatcher &RVVDispatcher); + std::optional FirstMaskArgument); bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, - RVVArgDispatcher &RVVDispatcher); + std::optional FirstMaskArgument); bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll index 90edb994ce8222..78e8700a9feff8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll @@ -162,90 +162,3 @@ define void @caller_tuple_argument({, } %x) } declare void @callee_tuple_argument({, }) - -; %0 -> v8 -; %1 -> v9 -define @case1( %0, %1) { -; CHECK-LABEL: case1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vadd.vv v8, v8, v9 -; CHECK-NEXT: ret - %a = add %0, %1 - ret %a -} - -; %0 -> v8 -; %1 -> v10-v11 -; %2 -> v9 -define @case2_1( %0, %1, %2) { -; CHECK-LABEL: case2_1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vadd.vv v8, v8, v9 -; CHECK-NEXT: ret - %a = add %0, %2 - ret %a -} -define @case2_2( %0, %1, %2) { -; CHECK-LABEL: case2_2: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vadd.vv v8, v10, v10 -; CHECK-NEXT: ret - %a = add %1, %1 - ret %a -} - -; %0 -> v8 -; %1 -> {v10-v11, v12-v13} -; %2 -> v9 -define @case3_1( %0, {, } %1, %2) { -; CHECK-LABEL: case3_1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vadd.vv v8, v8, v9 -; CHECK-NEXT: ret - %add = add %0, %2 - ret %add -} -define @case3_2( %0, {, } %1, %2) { -; CHECK-LABEL: case3_2: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vadd.vv v8, v10, v12 -; CHECK-NEXT: ret - %a = extractvalue { , } %1, 0 - %b = extractvalue { , } %1, 1 - %add = add %a, %b - ret %add -} - -; %0 -> v8 -; %1 -> {by-ref, by-ref} -; %2 -> v9 -define @case4_1( %0, {, } %1, %2) { -; CHECK-LABEL: case4_1: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: vl8re64.v v8, (a1) -; CHECK-NEXT: vl8re64.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vadd.vv v8, v16, v8 -; CHECK-NEXT: ret - %a = extractvalue { , } %1, 0 - %b = extractvalue { , } %1, 1 - %add = add %a, %b - ret %add -} -define @case4_2( %0, {, } %1, %2) { -; CHECK-LABEL: case4_2: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vadd.vv v8, v8, v9 -; CHECK-NEXT: ret - %add = add %0, %2 - ret %add -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll index 6a712080fda74a..a320aecc6fce49 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll @@ -18,10 +18,10 @@ define {, } @vector_deinterleave_load_nxv16i ; CHECK-NEXT: vmerge.vim v14, v10, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v12, v10, 1, v0 -; CHECK-NEXT: vnsrl.wi v10, v12, 0 -; CHECK-NEXT: vmsne.vi v8, v10, 0 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: vnsrl.wi v10, v12, 8 -; CHECK-NEXT: vmsne.vi v9, v10, 0 +; CHECK-NEXT: vmsne.vi v8, v10, 0 ; CHECK-NEXT: ret %vec = load , ptr %p %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv32i1( %vec) diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll index d98597fabcd953..ef4baf34d23f03 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll @@ -8,18 +8,18 @@ define {, } @vector_deinterleave_nxv16i1_nxv ; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vmerge.vim v12, v8, 1, v0 +; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v14, v8, 1, v0 -; CHECK-NEXT: vnsrl.wi v10, v12, 0 -; CHECK-NEXT: vmsne.vi v8, v10, 0 -; CHECK-NEXT: vnsrl.wi v10, v12, 8 -; CHECK-NEXT: vmsne.vi v9, v10, 0 +; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 +; CHECK-NEXT: vmsne.vi v0, v12, 0 +; CHECK-NEXT: vnsrl.wi v12, v8, 8 +; CHECK-NEXT: vmsne.vi v8, v12, 0 ; CHECK-NEXT: ret %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv32i1( %vec) ret {, } %retval @@ -102,13 +102,12 @@ define {, } @vector_deinterleave_nxv64i1_nxv ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v28, v8, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; CHECK-NEXT: vmsne.vi v7, v24, 0 +; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v24, v16, 8 ; CHECK-NEXT: vnsrl.wi v28, v8, 8 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; CHECK-NEXT: vmsne.vi v9, v24, 0 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmsne.vi v8, v24, 0 ; CHECK-NEXT: ret %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv128i1( %vec) ret {, } %retval