diff --git a/src/arch/generic/mmu.hh b/src/arch/generic/mmu.hh index e4b241d02c..d6f55d3a8d 100644 --- a/src/arch/generic/mmu.hh +++ b/src/arch/generic/mmu.hh @@ -123,6 +123,14 @@ class BaseMMU : public SimObject translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode); + virtual void setOldPriv(ThreadContext *tc) { + panic("BaseMMU::setOldPriv() not implemented"); + } + + virtual void useNewPriv(ThreadContext *tc) { + panic("BaseMMU::useNewPriv() not implemented"); + } + class MMUTranslationGen : public TranslationGen { private: diff --git a/src/arch/riscv/isa/decoder.isa b/src/arch/riscv/isa/decoder.isa index e88ebd9d26..858ecece1e 100644 --- a/src/arch/riscv/isa/decoder.isa +++ b/src/arch/riscv/isa/decoder.isa @@ -953,7 +953,7 @@ decode QUADRANT default Unknown::unknown() { 0x2: decode AMOFUNCT { 0x2: LoadReserved::lr_w({{ Rd_sd = Mem_sw; - }}, mem_flags=LLSC); + }}, inst_flags=IsLoadReserved, mem_flags=LLSC); 0x3: StoreCond::sc_w({{ Mem_uw = Rs2_uw; }}, {{ @@ -1026,7 +1026,7 @@ decode QUADRANT default Unknown::unknown() { 0x3: decode AMOFUNCT { 0x2: LoadReserved::lr_d({{ Rd_sd = Mem_sd; - }}, mem_flags=LLSC); + }}, mem_flags=LLSC, inst_flags=IsLoadReserved); 0x3: StoreCond::sc_d({{ Mem = Rs2; }}, {{ diff --git a/src/arch/riscv/mmu.hh b/src/arch/riscv/mmu.hh index f8afaa7380..595a5f60d2 100644 --- a/src/arch/riscv/mmu.hh +++ b/src/arch/riscv/mmu.hh @@ -94,6 +94,18 @@ class MMU : public BaseMMU { return static_cast(dtb)->pmp; } + + void + setOldPriv(ThreadContext *tc) override { + static_cast(dtb)->setOldPriv(tc); + static_cast(itb)->setOldPriv(tc); + } + + void + useNewPriv(ThreadContext *tc) override { + static_cast(dtb)->useNewPriv(tc); + static_cast(itb)->useNewPriv(tc); + } }; } // namespace RiscvISA diff --git a/src/arch/riscv/tlb.cc b/src/arch/riscv/tlb.cc index 73d6897931..f1e66d514e 100644 --- a/src/arch/riscv/tlb.cc +++ b/src/arch/riscv/tlb.cc @@ -1961,6 +1961,13 @@ TLB::doTranslate(const RequestPtr &req, ThreadContext *tc, PrivilegeMode TLB::getMemPriv(ThreadContext *tc, BaseMMU::Mode mode) { + if (use_old_priv && mode != BaseMMU::Execute) { + if (mode == BaseMMU::Execute) { + return old_priv_ex; + } else { + return old_priv_ldst; + } + } STATUS status = (STATUS)tc->readMiscReg(MISCREG_STATUS); PrivilegeMode pmode = (PrivilegeMode)tc->readMiscReg(MISCREG_PRV); if (mode != BaseMMU::Execute && status.mprv == 1) diff --git a/src/arch/riscv/tlb.hh b/src/arch/riscv/tlb.hh index 6386d859d8..b89be21adc 100644 --- a/src/arch/riscv/tlb.hh +++ b/src/arch/riscv/tlb.hh @@ -98,6 +98,9 @@ class TLB : public BaseTLB uint64_t lastPc; uint64_t traceFlag; + bool use_old_priv; + PrivilegeMode old_priv_ldst; + PrivilegeMode old_priv_ex; Walker *walker; @@ -251,6 +254,14 @@ class TLB : public BaseTLB TlbEntry *lookupL2TLB(Addr vpn, uint16_t asid, BaseMMU::Mode mode, bool hidden, int f_level, bool sign_used, uint8_t translateMode); + void setOldPriv(ThreadContext *tc) { + use_old_priv = true; + old_priv_ex = getMemPriv(tc, BaseMMU::Execute); + old_priv_ldst = getMemPriv(tc, BaseMMU::Read); + } + void useNewPriv(ThreadContext *tc) { + use_old_priv = false; + } std::vector tlbL2L1; // our TLB diff --git a/src/cpu/StaticInstFlags.py b/src/cpu/StaticInstFlags.py index 72d2c94009..308b2e8267 100644 --- a/src/cpu/StaticInstFlags.py +++ b/src/cpu/StaticInstFlags.py @@ -61,6 +61,7 @@ class StaticInstFlags(Enum): 'IsLoad', # Reads from memory (load or prefetch). 'IsStore', # Writes to memory. 'IsAtomic', # Does atomic RMW to memory. + 'IsLoadReserved', # Load reserved 'IsStoreConditional', # Store conditional instruction. 'IsInstPrefetch', # Instruction-cache prefetch. 'IsDataPrefetch', # Data-cache prefetch. diff --git a/src/cpu/base.cc b/src/cpu/base.cc index 2e3aaf1d15..7646f17434 100644 --- a/src/cpu/base.cc +++ b/src/cpu/base.cc @@ -1064,7 +1064,38 @@ BaseCPU::diffWithNEMU(ThreadID tid, InstSeqNum seq) "[0m, GEM5 value: \033[31m%#lx\033[0m\n", "stval", ref_val, gem5_val); } + + // mtval + gem5_val = readMiscRegNoEffect( + RiscvISA::MiscRegIndex::MISCREG_MTVAL, tid); + ref_val = diffAllStates->referenceRegFile.mtval; + DPRINTF(Diff, "stvmtvalal:\tGEM5: %#lx,\tREF: %#lx\n", gem5_val, ref_val); + if (gem5_val != ref_val) { + diffMsg += csprintf("Diff at \033[31m%s\033[0m Ref value: \033[31m%#lx\033" + "[0m, GEM5 value: \033[31m%#lx\033[0m\n", "mtval", + ref_val, gem5_val); + diffInfo.errorCsrsValue[CsrRegIndex::mtval] = 1; + diffAllStates->gem5RegFile.mtval = gem5_val; + if (!diff_at) + diff_at = ValueDiff; + } //DIFFTEST_STVDIFFTEST_STVAL + + // mode + gem5_val = readMiscRegNoEffect( + RiscvISA::MiscRegIndex::MISCREG_PRV, tid); + ref_val = diffAllStates->referenceRegFile.mode; + DPRINTF(Diff, "priv:\tGEM5: %#lx,\tREF: %#lx\n", gem5_val, ref_val); + if (gem5_val != ref_val) { + diffMsg += csprintf("Diff at \033[31m%s\033[0m Ref value: \033[31m%#lx\033" + "[0m, GEM5 value: \033[31m%#lx\033[0m\n", "priv", + ref_val, gem5_val); + // diffInfo.errorCsrsValue[CsrRegIndex::stval] = 1; + diffAllStates->gem5RegFile.mode = gem5_val; + if (!diff_at) + diff_at = ValueDiff; + } + // mcause gem5_val = readMiscRegNoEffect( RiscvISA::MiscRegIndex::MISCREG_MCAUSE, tid); @@ -1076,6 +1107,21 @@ BaseCPU::diffWithNEMU(ThreadID tid, InstSeqNum seq) csprintf("Diff at \033[31m%s\033[0m Ref value: \033[31m%#lx\033[0m, GEM5 value: \033[31m%#lx\033[0m\n", "mcause", ref_val, gem5_val); } + + // scause + gem5_val = readMiscRegNoEffect( + RiscvISA::MiscRegIndex::MISCREG_SCAUSE, tid); + ref_val = diffAllStates->referenceRegFile.scause; + DPRINTF(Diff, "scause:\tGEM5: %#lx,\tREF: %#lx\n", gem5_val, ref_val); + if (gem5_val != ref_val) { + diffMsg += + csprintf("Diff at \033[31m%s\033[0m Ref value: \033[31m%#lx\033[0m, GEM5 value: \033[31m%#lx\033[0m\n", + "scause", ref_val, gem5_val); + diffInfo.errorCsrsValue[CsrRegIndex::scause] = 1; + diffAllStates->gem5RegFile.scause = gem5_val; + if (!diff_at) + diff_at = ValueDiff; + } // satp gem5_val = readMiscRegNoEffect(RiscvISA::MiscRegIndex::MISCREG_SATP, tid); @@ -1389,12 +1435,13 @@ BaseCPU::difftestStep(ThreadID tid, InstSeqNum seq) bool is_fence = diffInfo.inst->isReadBarrier() || diffInfo.inst->isWriteBarrier(); bool fence_should_diff = is_fence && !diffInfo.inst->isMicroop(); + bool lr_should_diff = diffInfo.inst->isLoadReserved(); bool amo_should_diff = diffInfo.inst->isAtomic() && diffInfo.inst->numDestRegs() > 0; bool is_sc = diffInfo.inst->isStoreConditional() && diffInfo.inst->isDelayedCommit(); bool other_should_diff = !diffInfo.inst->isAtomic() && !is_fence && !is_sc && (!diffInfo.inst->isMicroop() || diffInfo.inst->isLastMicroop()); - if (fence_should_diff || amo_should_diff || is_sc || other_should_diff) { + if (fence_should_diff || amo_should_diff || is_sc || other_should_diff || lr_should_diff) { should_diff = true; if (!diffAllStates->hasCommit && diffInfo.pc->instAddr() == 0x80000000u) { diffAllStates->hasCommit = true; diff --git a/src/cpu/o3/commit.cc b/src/cpu/o3/commit.cc index e4c30a2d52..e90c24e175 100644 --- a/src/cpu/o3/commit.cc +++ b/src/cpu/o3/commit.cc @@ -169,6 +169,7 @@ Commit::Commit(CPU *_cpu, branch_prediction::BPredUnit *_bp, const BaseO3CPUPara faultNum.insert(RiscvISA::ExceptionCode::LOAD_PAGE); faultNum.insert(RiscvISA::ExceptionCode::STORE_PAGE); faultNum.insert(RiscvISA::ExceptionCode::INST_PAGE); + faultNum.insert(RiscvISA::ExceptionCode::AMO_PAGE); faultNum.insert(RiscvISA::ExceptionCode::INST_G_PAGE); faultNum.insert(RiscvISA::ExceptionCode::LOAD_G_PAGE); faultNum.insert(RiscvISA::ExceptionCode::STORE_G_PAGE); @@ -641,6 +642,8 @@ Commit::squashAll(ThreadID tid) toIEW->commitInfo[tid].squashedTargetId = committedTargetId; toIEW->commitInfo[tid].squashedLoopIter = committedLoopIter; + cpu->mmu->useNewPriv(cpu->getContext(tid)); + squashInflightAndUpdateVersion(tid); } @@ -840,6 +843,8 @@ Commit::handleInterrupt() DPRINTF(CommitTrace, "Handle interrupt No.%lx\n", cpu->getInterruptsNO() | (1ULL << 63)); cpu->processInterrupts(cpu->getInterrupts()); + cpu->mmu->setOldPriv(cpu->getContext(0)); + thread[0]->noSquashFromTC = false; commitStatus[0] = TrapPending; @@ -1527,6 +1532,8 @@ Commit::commitHead(const DynInstPtr &head_inst, unsigned inst_num) head_inst->notAnInst() ? nullStaticInstPtr : head_inst->staticInst); + cpu->mmu->setOldPriv(cpu->getContext(tid)); + // Exit state update mode to avoid accidental updating. thread[tid]->noSquashFromTC = false; diff --git a/src/cpu/o3/fetch.cc b/src/cpu/o3/fetch.cc index 414cd09905..03fb22b51e 100644 --- a/src/cpu/o3/fetch.cc +++ b/src/cpu/o3/fetch.cc @@ -835,7 +835,7 @@ Fetch::finishTranslation(const Fault &fault, const RequestPtr &mem_req) // If we have, just wait around for commit to squash something and put // us on the right track if (!cpu->system->isMemAddr(mem_req->getPaddr())) { - warn("Address %#x is outside of physical memory, stopping fetch, %lu\n", + DPRINTF(Fetch, "Address %#x is outside of physical memory, stopping fetch, %lu\n", mem_req->getPaddr(), curTick()); fetchStatus[tid] = NoGoodAddr; setAllFetchStalls(StallReason::OtherFetchStall); diff --git a/src/cpu/o3/issue_queue.cc b/src/cpu/o3/issue_queue.cc index 3ea644c68d..b42b8abdc5 100644 --- a/src/cpu/o3/issue_queue.cc +++ b/src/cpu/o3/issue_queue.cc @@ -171,7 +171,7 @@ IssueQue::resetDepGraph(int numPhysRegs) subDepGraph.resize(numPhysRegs); } -void +bool IssueQue::checkScoreboard(const DynInstPtr& inst) { for (int i = 0; i < inst->numSrcRegs(); i++) { @@ -182,11 +182,17 @@ IssueQue::checkScoreboard(const DynInstPtr& inst) // check bypass data ready or not if (!scheduler->bypassScoreboard[src->flatIndex()]) [[unlikely]] { auto dst_inst = scheduler->getInstByDstReg(src->flatIndex()); - panic("[sn %lu] %s can't get data from bypassNetwork, dst inst: %s\n", inst->seqNum, inst->srcRegIdx(i), - dst_inst->genDisassembly()); + if (!dst_inst || !dst_inst->isLoad()) { + panic("dst is not load"); + } + scheduler->loadCancel(dst_inst); + DPRINTF(Schedule, "[sn %lu] %s can't get data from bypassNetwork, dst inst: %s\n", + inst->seqNum, inst->srcRegIdx(i), dst_inst->genDisassembly()); + return false; } } inst->checkOldVdElim(); + return true; } void @@ -217,7 +223,9 @@ IssueQue::issueToFu() readyQclassify[inst->opClass()]->push(inst); // retry continue; } - checkScoreboard(inst); + if (!checkScoreboard(inst)) { + continue; + } addToFu(inst); if (!opPipelined[inst->opClass()]) [[unlikely]] { // set fu busy diff --git a/src/cpu/o3/issue_queue.hh b/src/cpu/o3/issue_queue.hh index efd2f65ff1..09bef8fb4c 100644 --- a/src/cpu/o3/issue_queue.hh +++ b/src/cpu/o3/issue_queue.hh @@ -128,7 +128,7 @@ class IssueQue : public SimObject void replay(const DynInstPtr& inst); void addToFu(const DynInstPtr& inst); - void checkScoreboard(const DynInstPtr& inst); + bool checkScoreboard(const DynInstPtr& inst); void issueToFu(); void wakeUpDependents(const DynInstPtr& inst, bool speculative); void selectInst(); diff --git a/src/cpu/o3/lsq.cc b/src/cpu/o3/lsq.cc index 65a9eb89d0..de95a04c18 100644 --- a/src/cpu/o3/lsq.cc +++ b/src/cpu/o3/lsq.cc @@ -909,6 +909,14 @@ LSQ::pushRequest(const DynInstPtr& inst, bool isLoad, uint8_t *data, request->initiateTranslation(); } + if (!isLoad && !inst->isVector() && size > 1 && addr % size != 0) { + warn( "Store misaligned: size: %u, Addr: %#lx, code: %d\n", size, + addr, RiscvISA::ExceptionCode::STORE_ADDR_MISALIGNED); + return std::make_shared(request->mainReq()->getVaddr(), + request->mainReq()->getgPaddr(), + RiscvISA::ExceptionCode::STORE_ADDR_MISALIGNED); + } + /* This is the place were instructions get the effAddr. */ if (request->isTranslationComplete()) { if (request->isMemAccessRequired()) { diff --git a/src/cpu/static_inst.hh b/src/cpu/static_inst.hh index 50d74d212b..2b49920437 100644 --- a/src/cpu/static_inst.hh +++ b/src/cpu/static_inst.hh @@ -156,6 +156,7 @@ class StaticInst : public RefCounted, public StaticInstFlags bool isLoad() const { return flags[IsLoad]; } bool isStore() const { return flags[IsStore]; } bool isAtomic() const { return flags[IsAtomic]; } + bool isLoadReserved() const { return flags[IsLoadReserved]; } bool isStoreConditional() const { return flags[IsStoreConditional]; } bool isInstPrefetch() const { return flags[IsInstPrefetch]; } bool isDataPrefetch() const { return flags[IsDataPrefetch]; }