diff --git a/LICENCE b/LICENCE index 432b78d79..e95d6d263 100644 --- a/LICENCE +++ b/LICENCE @@ -24,6 +24,7 @@ Copyright (c) 2017-2024 Jan Henrik Weinstock Jessica Clarke Jon French + KULeuven, for contributions by Lowie Deferme Martin Berger Michael Sammler Microsoft, for contributions by Robert Norton-Wright and Nathaniel Wesley Filardo diff --git a/Makefile b/Makefile index 6e6483320..f76acab83 100644 --- a/Makefile +++ b/Makefile @@ -47,6 +47,8 @@ SAIL_DEFAULT_INST += riscv_insts_zbkx.sail SAIL_DEFAULT_INST += riscv_insts_zicond.sail +SAIL_DEFAULT_INST += riscv_insts_hext.sail + SAIL_DEFAULT_INST += riscv_insts_vext_utils.sail SAIL_DEFAULT_INST += riscv_insts_vext_fp_utils.sail SAIL_DEFAULT_INST += riscv_insts_vext_vset.sail @@ -74,6 +76,9 @@ SAIL_SYS_SRCS = riscv_csr_begin.sail # Start of CSR scattered definitions SAIL_SYS_SRCS += riscv_vext_control.sail # helpers for the 'V' extension SAIL_SYS_SRCS += riscv_sys_exceptions.sail # default basic helpers for exception handling SAIL_SYS_SRCS += riscv_sync_exception.sail # define the exception structure used in the model +SAIL_SYS_SRCS += riscv_zicsr_control.sail # helpers for the Zicsr extension +SAIL_SYS_SRCS += riscv_hext_control.sail # helpers for the hypervisor extension +SAIL_SYS_SRCS += riscv_csr_hext.sail # scattered definition for hypervisor extension CSRs SAIL_SYS_SRCS += riscv_softfloat_interface.sail riscv_fdext_regs.sail riscv_fdext_control.sail SAIL_SYS_SRCS += riscv_sys_control.sail # general exception handling @@ -97,15 +102,16 @@ SAIL_VM_SRCS += riscv_vmem.sail PRELUDE = prelude.sail $(SAIL_XLEN) $(SAIL_FLEN) $(SAIL_VLEN) prelude_mem_metadata.sail prelude_mem.sail SAIL_REGS_SRCS = riscv_reg_type.sail riscv_freg_type.sail riscv_regs.sail riscv_pc_access.sail riscv_sys_regs.sail +SAIL_REGS_SRCS += riscv_hext_regs.sail SAIL_REGS_SRCS += riscv_pmp_regs.sail riscv_pmp_control.sail SAIL_REGS_SRCS += riscv_ext_regs.sail $(SAIL_CHECK_SRCS) SAIL_REGS_SRCS += riscv_vreg_type.sail riscv_vext_regs.sail SAIL_ARCH_SRCS = $(PRELUDE) SAIL_ARCH_SRCS += riscv_types_common.sail riscv_types_ext.sail riscv_types.sail -SAIL_ARCH_SRCS += riscv_vmem_types.sail $(SAIL_REGS_SRCS) $(SAIL_SYS_SRCS) riscv_platform.sail +SAIL_ARCH_SRCS += riscv_types_hext.sail riscv_vmem_types.sail $(SAIL_REGS_SRCS) $(SAIL_SYS_SRCS) riscv_platform.sail SAIL_ARCH_SRCS += riscv_mem.sail $(SAIL_VM_SRCS) -SAIL_ARCH_RVFI_SRCS = $(PRELUDE) rvfi_dii.sail riscv_types_common.sail riscv_types_ext.sail riscv_types.sail riscv_vmem_types.sail $(SAIL_REGS_SRCS) $(SAIL_SYS_SRCS) riscv_platform.sail riscv_mem.sail $(SAIL_VM_SRCS) riscv_types_kext.sail +SAIL_ARCH_RVFI_SRCS = $(PRELUDE) rvfi_dii.sail riscv_types_common.sail riscv_types_ext.sail riscv_types.sail riscv_vmem_types.sail riscv_types_hext.sail $(SAIL_REGS_SRCS) $(SAIL_SYS_SRCS) riscv_platform.sail riscv_mem.sail $(SAIL_VM_SRCS) riscv_types_kext.sail SAIL_ARCH_SRCS += riscv_types_kext.sail # Shared/common code for the cryptography extension. SAIL_STEP_SRCS = riscv_step_common.sail riscv_step_ext.sail riscv_decode_ext.sail riscv_fetch.sail riscv_step.sail diff --git a/c_emulator/riscv_platform.c b/c_emulator/riscv_platform.c index b82a5fbb7..3aaa3dfab 100644 --- a/c_emulator/riscv_platform.c +++ b/c_emulator/riscv_platform.c @@ -72,6 +72,11 @@ bool sys_enable_zicboz(unit u) return rv_enable_zicboz; } +bool sys_enable_hext(unit u) +{ + return rv_enable_hext; +} + uint64_t sys_pmp_count(unit u) { return rv_pmp_count; diff --git a/c_emulator/riscv_platform.h b/c_emulator/riscv_platform.h index 8e2dc265a..3091aebc7 100644 --- a/c_emulator/riscv_platform.h +++ b/c_emulator/riscv_platform.h @@ -13,6 +13,7 @@ bool sys_enable_vext(unit); bool sys_enable_bext(unit); bool sys_enable_zicbom(unit); bool sys_enable_zicboz(unit); +bool sys_enable_hext(unit); uint64_t sys_pmp_count(unit); uint64_t sys_pmp_grain(unit); diff --git a/c_emulator/riscv_platform_impl.c b/c_emulator/riscv_platform_impl.c index ec5b452bf..2e884fdf0 100644 --- a/c_emulator/riscv_platform_impl.c +++ b/c_emulator/riscv_platform_impl.c @@ -20,6 +20,7 @@ bool rv_enable_vext = true; bool rv_enable_bext = false; bool rv_enable_zicbom = false; bool rv_enable_zicboz = false; +bool rv_enable_hext = false; bool rv_enable_dirty_update = false; bool rv_enable_misaligned = false; diff --git a/c_emulator/riscv_platform_impl.h b/c_emulator/riscv_platform_impl.h index a7cf134f8..50aa4d1bd 100644 --- a/c_emulator/riscv_platform_impl.h +++ b/c_emulator/riscv_platform_impl.h @@ -24,6 +24,7 @@ extern bool rv_enable_vext; extern bool rv_enable_bext; extern bool rv_enable_zicbom; extern bool rv_enable_zicboz; +extern bool rv_enable_hext; extern bool rv_enable_writable_misa; extern bool rv_enable_dirty_update; extern bool rv_enable_misaligned; diff --git a/c_emulator/riscv_sim.c b/c_emulator/riscv_sim.c index 3f812c5ec..7872dbac8 100644 --- a/c_emulator/riscv_sim.c +++ b/c_emulator/riscv_sim.c @@ -134,6 +134,7 @@ static struct option options[] = { {"pmp-count", required_argument, 0, OPT_PMP_COUNT }, {"pmp-grain", required_argument, 0, OPT_PMP_GRAIN }, {"enable-next", no_argument, 0, 'N' }, + {"enable-hext", no_argument, 0, 'H' }, {"ram-size", required_argument, 0, 'z' }, {"disable-compressed", no_argument, 0, 'C' }, {"disable-writable-misa", no_argument, 0, 'I' }, @@ -272,6 +273,7 @@ static int process_args(int argc, char **argv) c = getopt_long(argc, argv, "a" "B" + "H" "d" "m" "P" @@ -310,6 +312,10 @@ static int process_args(int argc, char **argv) fprintf(stderr, "enabling B extension.\n"); rv_enable_bext = true; break; + case 'H': + fprintf(stderr, "enabling hypervisor extension.\n"); + rv_enable_hext = true; + break; case 'd': fprintf(stderr, "enabling dirty update.\n"); rv_enable_dirty_update = true; diff --git a/model/prelude.sail b/model/prelude.sail index 8c03c58da..ee3184092 100644 --- a/model/prelude.sail +++ b/model/prelude.sail @@ -201,6 +201,17 @@ overload reverse = {reverse_bits_in_byte} overload operator / = {quot_positive_round_zero, quot_round_zero} overload operator * = {mult_atom, mult_int} +/* helpers for bitvector option type */ + +val some_or : forall 'n, 'n >= 0 . (option(bits('n)), bits('n)) -> bits('n) +function some_or(o, d) = match o { + Some(v) => v, + None() => d, +} + +val some_or_zero : forall 'n, 'n >= 0. (implicit('n), option(bits('n))) -> bits('n) +function some_or_zero(n, o) = some_or(o, zeros(n)) + /* helper for vector extension * 1. EEW between 8 and 64 * 2. EMUL in vmvr.v instructions between 1 and 8 diff --git a/model/riscv_csr_hext.sail b/model/riscv_csr_hext.sail new file mode 100644 index 000000000..1a9e422d0 --- /dev/null +++ b/model/riscv_csr_hext.sail @@ -0,0 +1,49 @@ +/*=======================================================================================*/ +/* This Sail RISC-V architecture model, comprising all files and */ +/* directories except where otherwise noted is subject the BSD */ +/* two-clause license in the LICENSE file. */ +/* */ +/* SPDX-License-Identifier: BSD-2-Clause */ +/*=======================================================================================*/ +/* Mapping of hypervisor extension CSR addresses to their names. */ + +/* extra machine registers */ +mapping clause csr_name_map = 0x34B <-> "mtval2" +mapping clause csr_name_map = 0x34A <-> "mtinst" + +/* hypervisor trap setup */ +mapping clause csr_name_map = 0x600 <-> "hstatus" +mapping clause csr_name_map = 0x602 <-> "hedeleg" +mapping clause csr_name_map = 0x603 <-> "hideleg" +mapping clause csr_name_map = 0x604 <-> "hie" +mapping clause csr_name_map = 0x606 <-> "hcounteren" +mapping clause csr_name_map = 0x607 <-> "hgeie" + +/* hypervisor counter/timer virtualization */ +mapping clause csr_name_map = 0x605 <-> "htimedelta" +mapping clause csr_name_map = 0x615 <-> "htimedeltah" + +/* hypervisor config */ +mapping clause csr_name_map = 0x60A <-> "henvcfg" +mapping clause csr_name_map = 0x61A <-> "henvcfgh" + +/* hypervisor trap handling */ +mapping clause csr_name_map = 0x643 <-> "htval" +mapping clause csr_name_map = 0x644 <-> "hip" +mapping clause csr_name_map = 0x645 <-> "hvip" +mapping clause csr_name_map = 0x64A <-> "htinst" +mapping clause csr_name_map = 0xE12 <-> "hgeip" + +/* hypervisor protection and translation */ +mapping clause csr_name_map = 0x680 <-> "hgatp" + +/* virtual supervisor registers */ +mapping clause csr_name_map = 0x200 <-> "vsstatus" +mapping clause csr_name_map = 0x204 <-> "vsie" +mapping clause csr_name_map = 0x205 <-> "vstvec" +mapping clause csr_name_map = 0x240 <-> "vsscratch" +mapping clause csr_name_map = 0x241 <-> "vsepc" +mapping clause csr_name_map = 0x242 <-> "vscause" +mapping clause csr_name_map = 0x243 <-> "vstval" +mapping clause csr_name_map = 0x244 <-> "vsip" +mapping clause csr_name_map = 0x280 <-> "vsatp" diff --git a/model/riscv_fetch.sail b/model/riscv_fetch.sail index 170c38712..c9492bb88 100644 --- a/model/riscv_fetch.sail +++ b/model/riscv_fetch.sail @@ -20,16 +20,16 @@ function fetch() -> FetchResult = Ext_FetchAddr_Error(e) => F_Ext_Error(e), Ext_FetchAddr_OK(use_pc) => { if (use_pc[0] != bitzero | (use_pc[1] != bitzero & not(extensionEnabled(Ext_Zca)))) - then F_Error(E_Fetch_Addr_Align(), PC) + then F_Error(E_Fetch_Addr_Align(), mem_exception_context(PC, cur_virtualization == V1)) else match translateAddr(use_pc, Execute()) { - TR_Failure(e, _) => F_Error(e, PC), + TR_Failure(e, c, _) => F_Error(e, c), TR_Address(ppclo, _) => { /* split instruction fetch into 16-bit granules to handle RVC, as * well as to generate precise fault addresses in any fetch * exceptions. */ match mem_read(Execute(), ppclo, 2, false, false, false) { - MemException(e) => F_Error(e, PC), + MemException(e) => F_Error(e, mem_exception_context(PC, cur_virtualization == V1)), MemValue(ilo) => { if isRVC(ilo) then F_RVC(ilo) @@ -40,10 +40,10 @@ function fetch() -> FetchResult = Ext_FetchAddr_Error(e) => F_Ext_Error(e), Ext_FetchAddr_OK(use_pc_hi) => { match translateAddr(use_pc_hi, Execute()) { - TR_Failure(e, _) => F_Error(e, PC_hi), + TR_Failure(e, c, _) => F_Error(e, c), TR_Address(ppchi, _) => { match mem_read(Execute(), ppchi, 2, false, false, false) { - MemException(e) => F_Error(e, PC_hi), + MemException(e) => F_Error(e, mem_exception_context(PC_hi, cur_virtualization == V1)), MemValue(ihi) => F_Base(append(ihi, ilo)) } } diff --git a/model/riscv_fetch_rvfi.sail b/model/riscv_fetch_rvfi.sail index b81d43915..cb77b9c3f 100644 --- a/model/riscv_fetch_rvfi.sail +++ b/model/riscv_fetch_rvfi.sail @@ -18,9 +18,9 @@ function fetch() -> FetchResult = { Ext_FetchAddr_OK(use_pc) => { /* then check PC alignment */ if (use_pc[0] != bitzero | (use_pc[1] != bitzero & not(extensionEnabled(Ext_Zca)))) - then F_Error(E_Fetch_Addr_Align(), PC) + then F_Error(E_Fetch_Addr_Align(), mem_exception_context(PC, cur_virtualization == V1)) else match translateAddr(use_pc, Execute()) { - TR_Failure(e, _) => F_Error(e, PC), + TR_Failure(e, c, _) => F_Error(e, c), TR_Address(_, _) => { let i = rvfi_instruction[rvfi_insn]; rvfi_inst_data[rvfi_insn] = zero_extend(i); @@ -33,7 +33,7 @@ function fetch() -> FetchResult = { Ext_FetchAddr_Error(e) => F_Ext_Error(e), Ext_FetchAddr_OK(use_pc_hi) => match translateAddr(use_pc_hi, Execute()) { - TR_Failure(e, _) => F_Error(e, PC), + TR_Failure(e, c, _) => F_Error(e, c), TR_Address(_, _) => F_Base(i) } } diff --git a/model/riscv_hext_control.sail b/model/riscv_hext_control.sail new file mode 100644 index 000000000..fcde9631d --- /dev/null +++ b/model/riscv_hext_control.sail @@ -0,0 +1,190 @@ +/*=======================================================================================*/ +/* This Sail RISC-V architecture model, comprising all files and */ +/* directories except where otherwise noted is subject the BSD */ +/* two-clause license in the LICENSE file. */ +/* */ +/* SPDX-License-Identifier: BSD-2-Clause */ +/*=======================================================================================*/ + +function clause is_CSR_defined(0x34A) = extensionEnabled(Ext_H) // mtinst +function clause is_CSR_defined(0x34B) = extensionEnabled(Ext_H) // mtval2 +function clause is_CSR_defined(0x600) = extensionEnabled(Ext_H) // hstatus +function clause is_CSR_defined(0x602) = extensionEnabled(Ext_H) // hedeleg +function clause is_CSR_defined(0x603) = extensionEnabled(Ext_H) // hideleg +function clause is_CSR_defined(0x604) = extensionEnabled(Ext_H) // hie +function clause is_CSR_defined(0x606) = extensionEnabled(Ext_H) // hcounteren +function clause is_CSR_defined(0x607) = extensionEnabled(Ext_H) // hgeie +function clause is_CSR_defined(0x643) = extensionEnabled(Ext_H) // htval +function clause is_CSR_defined(0x644) = extensionEnabled(Ext_H) // hip +function clause is_CSR_defined(0x645) = extensionEnabled(Ext_H) // hvip +function clause is_CSR_defined(0x64A) = extensionEnabled(Ext_H) // htinst +function clause is_CSR_defined(0xE12) = extensionEnabled(Ext_H) // hgeip +function clause is_CSR_defined(0x60A) = extensionEnabled(Ext_H) // henvcfg +function clause is_CSR_defined(0x61A) = extensionEnabled(Ext_H) // henvcfgh +function clause is_CSR_defined(0x680) = extensionEnabled(Ext_H) // hgatp +function clause is_CSR_defined(0x605) = extensionEnabled(Ext_H) // htimedelta +function clause is_CSR_defined(0x615) = extensionEnabled(Ext_H) // htimedeltah +function clause is_CSR_defined(0x200) = extensionEnabled(Ext_H) // vsstatus +function clause is_CSR_defined(0x204) = extensionEnabled(Ext_H) // vsie +function clause is_CSR_defined(0x205) = extensionEnabled(Ext_H) // vstvec +function clause is_CSR_defined(0x240) = extensionEnabled(Ext_H) // vsscratch +function clause is_CSR_defined(0x241) = extensionEnabled(Ext_H) // vsepc +function clause is_CSR_defined(0x242) = extensionEnabled(Ext_H) // vscause +function clause is_CSR_defined(0x243) = extensionEnabled(Ext_H) // vstval +function clause is_CSR_defined(0x244) = extensionEnabled(Ext_H) // vsip +function clause is_CSR_defined(0x280) = extensionEnabled(Ext_H) // vsatp + +function clause read_CSR(0x34A) = mtinst +function clause read_CSR(0x34B) = mtval2 +function clause read_CSR(0x600) = hstatus.bits +function clause read_CSR(0x602) = hedeleg.bits +function clause read_CSR(0x603) = hideleg.bits +function clause read_CSR(0x604) = lower_mie_to_hie(mie).bits +function clause read_CSR(0x606) = zero_extend(hcounteren.bits) +function clause read_CSR(0x607) = hgeie +function clause read_CSR(0x643) = htval +function clause read_CSR(0x644) = lower_mip_to_hip(mip).bits +function clause read_CSR(0x645) = lower_mip_to_hvip(mip).bits +function clause read_CSR(0x64A) = htinst +function clause read_CSR(0xE12) = hgeip +function clause read_CSR(0x60A) = henvcfg.bits[sizeof(xlen) - 1 .. 0] +function clause read_CSR(0x61A if xlen == 32) = henvcfg.bits[63 .. 32] +function clause read_CSR(0x605) = htimedelta[sizeof(xlen) - 1 .. 0] +function clause read_CSR(0x615 if xlen == 32) = htimedelta[63 .. 32] +function clause read_CSR(0x200) = vsstatus.bits[sizeof(xlen) - 1 .. 0] +function clause read_CSR(0x204) = lower_mie_to_vsie(mie, hideleg).bits +function clause read_CSR(0x205) = vstvec.bits +function clause read_CSR(0x240) = vsscratch +function clause read_CSR(0x241) = vsepc +function clause read_CSR(0x242) = vscause.bits +function clause read_CSR(0x243) = vstval +function clause read_CSR(0x244) = lower_mip_to_vsip(mip, hideleg).bits +function clause read_CSR(0x280) = vsatp + +function clause write_CSR(0x34A, value) = { mtinst = value; mtinst } +function clause write_CSR(0x34B, value) = { mtval2 = value; mtval2 } +function clause write_CSR(0x600, value) = { hstatus = legalize_hstatus(hstatus, value); hstatus.bits } +function clause write_CSR(0x602, value) = { hedeleg = legalize_hedeleg(hedeleg, value); hedeleg.bits } +function clause write_CSR(0x603, value) = { hideleg = legalize_hideleg(hideleg, value); hideleg.bits } +function clause write_CSR(0x604, value) = { mie = legalize_hie(mie, value); lower_mie_to_hie(mie).bits } +function clause write_CSR(0x606, value) = { hcounteren = legalize_hcounteren(hcounteren, value); zero_extend(hcounteren.bits) } +function clause write_CSR(0x607, value) = { hgeie = legalize_hgeie(value); hgeie } +function clause write_CSR((0x60A, value) if xlen == 32) = { henvcfg = legalize_henvcfg(henvcfg, henvcfg.bits[63 .. 32] @ value); henvcfg.bits[(sizeof(xlen) - 1) .. 0] } +function clause write_CSR((0x60A, value) if xlen == 64) = { henvcfg = legalize_henvcfg(henvcfg, value); henvcfg.bits[(sizeof(xlen) - 1) .. 0] } +function clause write_CSR((0x61A, value) if xlen == 32) = { henvcfg = legalize_henvcfg(henvcfg, value @ henvcfg.bits[31 .. 0]); henvcfg.bits[63 .. 32]} +function clause write_CSR(0x643, value) = { htval = value; htval } +function clause write_CSR(0x644, value) = { mip = legalize_hip(mip, value); lower_mip_to_hip(mip).bits } +function clause write_CSR(0x645, value) = { mip = legalize_hvip(mip, value); lower_mip_to_hvip(mip).bits } +function clause write_CSR(0x64A, value) = { htinst = value; htinst } +// function clause write_CSR(0xE12, _) = { hgeip } // hgeip is read-only +function clause write_CSR(0x605, value) = { htimedelta[(sizeof(xlen) - 1) .. 0] = value; htimedelta[(sizeof(xlen) - 1) .. 0] } +function clause write_CSR((0x615, value) if xlen == 32) = {htimedelta[63 .. 32] = value; htimedelta[63 .. 32] } +function clause write_CSR(0x200, value) = { vsstatus = legalize_vsstatus(vsstatus, value); vsstatus.bits[(sizeof(xlen) - 1) .. 0] } +function clause write_CSR(0x204, value) = { mie = legalize_vsie(mie, hideleg, value); lower_mie_to_vsie(mie, hideleg).bits } +function clause write_CSR(0x205, value) = { vstvec = Mk_Mtvec(value); vstvec.bits } +function clause write_CSR(0x240, value) = { vsscratch = value; vsscratch } +function clause write_CSR(0x241, value) = { vsepc = value; vsepc } +function clause write_CSR(0x242, value) = { vscause = Mk_Mcause(value); vscause.bits } +function clause write_CSR(0x243, value) = { vstval = value; vstval } +function clause write_CSR(0x244, value) = { mip = legalize_vsip(mip, hideleg, value); lower_mip_to_vsip(mip, hideleg).bits } +function clause write_CSR(0x280, value) = { vsatp = value; vsatp } + +/*! Initialize hypervisor extension */ +function init_hext() -> unit = { + /* Start with V=0 */ + + cur_virtualization = V0; + + /* Init H-extension specific fields of M-mode CRSs */ + + mstatus[MPV] = virtMode_to_bits(cur_virtualization); + mstatus[GVA] = 0b0; + + mtinst = zeros(); + mtval2 = zeros(); + + mideleg = [mideleg with VSEI = 0b1, VSTI = 0b1, VSSI = 0b1]; + mideleg = [mideleg with SGEI = 0b1]; /* Note: SGEI is "read-only one" only if GEILEN > 0 */ + + /* Init H-extension specific HS-mode CSRs */ + + hstatus = Mk_Hstatus(zeros()); + hstatus = set_hstatus_VSXL(hstatus, misa[MXL]); /* Dynamic XLEN changes are not (yet) supported */ + + hedeleg = Mk_Medeleg(zeros()); + hideleg = Mk_Minterrupts(zeros()); + hcounteren = Mk_Counteren(zeros()); + htval = zeros(); + htval = zeros(); + hgeie = legalize_hgeie(zeros()); + hgeip = zeros(); + henvcfg = Mk_HEnvcfg(zeros()); + htimedelta = zeros(); + + /* Init VS-mode CSRs */ + + vsstatus = Mk_Sstatus(zeros()); + vsstatus = set_sstatus_UXL(vsstatus, misa[MXL]); /* Dynamic XLEN changes are not (yet) supported */ + + vstvec = Mk_Mtvec(zeros()); + vsscratch = zeros(); + vsepc = zeros(); + vscause = Mk_Mcause(zeros()); + vstval = zeros(); + vsatp = zeros(); +} + +/* FIXME: The cases below should probably be checked in unittests instead of listed here + * + * According to the spec. (V20211203 - Section 9.6), this should happen in the following cases: + * [X] in VS-mode, attempts to access a non-high-half counter CSR when the corresponding bit + * in hcounteren is 0 and the same bit in mcounteren is 1; + * [ ] in VS-mode, if XLEN=32, attempts to access a high-half counter CSR when the + * corresponding bit in hcounteren is 0 and the same bit in mcounteren is 1; + * [X] in VU-mode, attempts to access a non-high-half counter CSR when the corresponding bit + * in either hcounteren or scounteren is 0 and the same bit in mcounteren is 1; + * [ ] in VU-mode, if XLEN=32, attempts to access a high-half counter CSR when the + * corresponding bit in either hcounteren or scounteren is 0 and the same bit in + * mcounteren is 1; + * [x] in VS-mode or VU-mode, attempts to access an implemented non-high-half hypervisor CSR + * or VS CSR when the same access (read/write) would be allowed in HS-mode, assuming + * mstatus.TVM=0; + * [ ] in VS-mode or VU-mode, if XLEN=32, attempts to access an implemented high-half + * hypervisor CSR or high-half VS CSR when the same access (read/write) to the CSR’s + * low-half partner would be allowed in HS-mode, assuming mstatus.TVM=0; + * [x] in VU-mode, attempts to access an implemented non-high-half supervisor CSR when the + * same access (read/write) would be allowed in HS-mode, assuming mstatus.TVM=0; + * [ ] in VU-mode, if XLEN=32, attempts to access an implemented high-half supervisor CSR + * when the same access to the CSR’s low-half + * partner would be allowed in HS-mode, assuming mstatus.TVM=0; + * [ ] in VS-mode, attempts to access satp, when hstatus.VTVM=1 + */ +/*! Determine if an illegal CSR access should raise a "virtual instruction exception" instead of an "illegal instruction exception" */ +function csr_access_raises_virtual_instr(csr : csreg, p : Privilege, v: Virtualization, isWrite : bool) -> bool = { + if v == V0 + then false + // Would HS-access be allowed? + // Note: Can't reuse check_CSR since it also checks mstatus.TVM + else is_CSR_defined(csr) & + check_CSR_access(csrAccess(csr), csrPriv(csr), Supervisor, V0, isWrite) & + check_Counteren(csr, Supervisor, V0) & + check_seed_CSR(csr, Supervisor, isWrite) +} + +/*! Is it legal for mtinst or htinst to hold a tranformed instruction on a given fault */ +function exc_causes_transformed_inst_in_xtinst(e : ExceptionType) -> bool = + match e { + E_Load_Addr_Align() => true, + E_Load_Access_Fault() => true, + E_SAMO_Addr_Align() => true, + E_SAMO_Access_Fault() => true, + E_Load_Page_Fault() => true, + E_SAMO_Page_Fault() => true, + E_Load_GPage_Fault() => true, + E_SAMO_GPage_Fault() => true, + _ => false, + } + +// FIXME: Define proper platform parameter for this +val plat_xtinst_has_transformed_inst : unit -> bool +function plat_xtinst_has_transformed_inst() = extensionEnabled(Ext_H) diff --git a/model/riscv_hext_regs.sail b/model/riscv_hext_regs.sail new file mode 100644 index 000000000..63d3cbb7c --- /dev/null +++ b/model/riscv_hext_regs.sail @@ -0,0 +1,330 @@ +/*=======================================================================================*/ +/* This Sail RISC-V architecture model, comprising all files and */ +/* directories except where otherwise noted is subject the BSD */ +/* two-clause license in the LICENSE file. */ +/* */ +/* SPDX-License-Identifier: BSD-2-Clause */ +/*=======================================================================================*/ + +/* Current Virtualization Mode */ + +register cur_virtualization : Virtualization + +/* Hypervisor Status Register */ + +bitfield Hstatus : xlenbits = { + /* VSXL only exists if HSXLEN == 64 so it's modelled via + * explicit an getter and setter below. */ + /* VSXL : 33 .. 32 */ + VTSR : 22, + VTW : 21, + VTVM : 20, + VGEIN : 17 .. 12, + HU : 9, + SPVP : 8, + SPV : 7, + GVA : 6, + VSBE : 5 +} +register hstatus : Hstatus + +function get_hstatus_VSXL(h : Hstatus) -> arch_xlen = { + if sizeof(xlen) == 32 + then arch_to_bits(RV32) + else h.bits[33 .. 32] +} + +function set_hstatus_VSXL(h : Hstatus, a : arch_xlen) -> Hstatus = { + if sizeof(xlen) == 32 + then h + else Mk_Hstatus([h.bits with 33..32 = a]) +} + +function legalize_hstatus(o : Hstatus, v : xlenbits) -> Hstatus = { + let v = Mk_Hstatus(v); + + let h = [Mk_Hstatus(zeros()) with + VTSR = v[VTSR], + VTW = v[VTW], + VTVM = v[VTVM], + VGEIN = v[VGEIN], /* Note: 0 <= hstatus.VGEIN <= GEILEN */ + HU = v[HU], + SPVP = v[SPVP], + SPV = v[SPV], + GVA = v[GVA], + VSBE = v[VSBE] + ]; + + /* Dynamic XLEN changes are not (yet) supported */ + let h = set_hstatus_VSXL(h, get_hstatus_VSXL(o)); + + h +} + +/* Hypervisor Trap Delegation Registers */ + +register hedeleg : Medeleg + +function legalize_hedeleg(o: Medeleg, v: xlenbits) -> Medeleg = { + let h = Mk_Medeleg(v); + /* Mask read-only zero bits */ + let h = [h with SAMO_GPage_Fault = 0b0]; + let h = [h with Virtual_Instr = 0b0]; + let h = [h with Load_GPage_Fault = 0b0]; + let h = [h with Fetch_GPage_Fault = 0b0]; + let h = [h with MEnvCall = 0b0]; + let h = [h with VSEnvCall = 0b0]; + let h = [h with SEnvCall = 0b0]; + h +} + +register hideleg : Minterrupts + +function legalize_hideleg(h : Minterrupts, v : xlenbits) -> Minterrupts = { + let v = Mk_Minterrupts(v); + /* VS-level interrupts can be delegated */ + let h = [h with VSEI = v[VSEI]]; + let h = [h with VSTI = v[VSTI]]; + let h = [h with VSSI = v[VSSI]]; + /* Other interrupts can't be delegated to VS-mode */ + h +} + +/* Hypervisor Interrupt enable/pending registers */ + +/*! Returns new mip from the previous mip (m) and the written hvip (v) */ +function legalize_hvip(m: Minterrupts, v : xlenbits) -> Minterrupts = { + let v = Mk_Minterrupts(v); + let m = [m with VSEI = v[VSEI]]; + let m = [m with VSTI = v[VSTI]]; + let m = [m with VSSI = v[VSSI]]; + // TODO: Hook to update non-standard portion + m +} + +/*! Returns hvip view of mip (m) */ +function lower_mip_to_hvip(m : Minterrupts) -> Minterrupts = { + let h = Mk_Minterrupts(zeros()); + let h = [h with VSEI = m[VSEI]]; + let h = [h with VSTI = m[VSTI]]; + let h = [h with VSSI = m[VSSI]]; + // TODO: Hook to read non-standard portion + h +} + +/*! Returns new mip from the previous mip (m) and the written hip (v) */ +function legalize_hip(m : Minterrupts, v : xlenbits) -> Minterrupts = { + let v = Mk_Minterrupts(v); + let m = [m with VSSI = v[VSSI]]; + // TODO: Hook to update non-standard portion + m +} + +/*! Returns hip view of mip (m) */ +function lower_mip_to_hip(m : Minterrupts) -> Minterrupts = { + let h = Mk_Minterrupts(zeros()); + let h = [h with SGEI = m[SGEI]]; + let h = [h with VSEI = m[VSEI]]; + let h = [h with VSTI = m[VSTI]]; + let h = [h with VSSI = m[VSSI]]; + // TODO: Hook to read non-standard portion + h +} + +/*! Returns new mie from the previous mie (m) and the written hie (v) */ +function legalize_hie(m : Minterrupts, v : xlenbits) -> Minterrupts = { + let v = Mk_Minterrupts(v); + let m = [m with SGEI = v[SGEI]]; + let m = [m with VSEI = v[VSEI]]; + let m = [m with VSTI = v[VSTI]]; + let m = [m with VSSI = v[VSSI]]; + /* Note: Writable bits in sie are read-only zero in hie */ + // TODO: Hook to update non-standard portion + m +} + +/*! Returns hie view of mie */ +function lower_mie_to_hie(m : Minterrupts) -> Minterrupts = { + let h = Mk_Minterrupts(zeros()); + let h = [h with SGEI = m[SGEI]]; + let h = [h with VSEI = m[VSEI]]; + let h = [h with VSTI = m[VSTI]]; + let h = [h with VSSI = m[VSSI]]; + /* Note: Writable bits in sie are read-only zero in hie */ + // TODO: Hook to read non-standard portion + h +} + +/* Hypervisor Guest External Interrupts */ + +register hgeie : xlenbits +register hgeip : xlenbits + +// FIXME: hgeip should be written by interrupt controller +// TODO: hip.SGEI is 1 if bitwise and of hgeip and hgeie is nonzero in any bit +// TODO: hip.VSEI is 1 if hgeip[hstatus.VGEIN] is 1 + +function legalize_hgeie(v : xlenbits) -> xlenbits = { + [v with 0 = bitzero] +} + +/* Hypervisor environment configuration register */ + +bitfield HEnvcfg : bits(64) = { + // Supervisor TimeCmp Extension + STCE : 63, + // Page Based Memory Types Extension + PBMTE : 62, + // Hardware update of A/D bits Extension + ADUE : 61, + // Reserved WPRI bits. + wpri_1 : 60 .. 8, + // Cache Block Zero instruction Enable + CBZE : 7, + // Cache Block Clean and Flush instruction Enable + CBCFE : 6, + // Cache Block Invalidate instruction Enable + CBIE : 5 .. 4, + // Reserved WPRI bits. + wpri_0 : 3 .. 1, + // Fence of I/O implies Memory + FIOM : 0, +} +register henvcfg : HEnvcfg + +function legalize_henvcfg(o : HEnvcfg, v : bits(64)) -> HEnvcfg = { + let v = Mk_HEnvcfg(v); + let o = [o with FIOM = if sys_enable_writable_fiom() then v[FIOM] else 0b0]; + // Other extensions are not implemented yet so all other fields are read only zero. + o +} + +/* Hypervisor hpm-related registers */ + +register hcounteren : Counteren + +function legalize_hcounteren(c : Counteren, v : xlenbits) -> Counteren = { + /* no HPM counters yet */ + [c with IR = [v[2]], TM = [v[1]], CY = [v[0]]] +} + +register htimedelta : bits(64) + +/* Machine & Hypervisor trap information */ + +register mtinst : xlenbits +register mtval2 : xlenbits + +register htinst : xlenbits +register htval : xlenbits + +/* Hypervisor Guest Address Translation and Protection */ + +bitfield Hgatp64 : bits(64) = { + Mode : 63 .. 60, + Vmid : 57 .. 44, + PPN : 43 .. 0 +} + +function legalize_hgatp64(a : Architecture, o : bits(64), v : bits(64)) -> bits(64) = { + let h = Mk_Hgatp64(v[63 .. 60] @ 0b00 @ v[57 .. 44] @ v[43 .. 2] @ 0b00); + match hgatp64Mode_of_bits(a, h[Mode]) { + Some(Bare) => h[Mode] @ zeros(60), /* remaining fields should be zero when hgatp.Mode is Bare */ + Some(Sv32x4) => o, /* Sv32x4 is currently unsupported for hgatp64 */ + Some(Sv39x4) => h.bits, + Some(Sv48x4) => h.bits, + Some(Sv57x4) => o, /* Sv57x4 is not yet implemented */ + _ => o, /* (V)S-stage modes are illegal for hgatp */ + } +} + +bitfield Hgatp32 : bits(32) = { + Mode : 31, + Vmid : 28 .. 22, + PPN : 21 .. 0 +} + +function legalize_hgatp32(a : Architecture, o : bits(32), v : bits(32)) -> bits(32) = { + let h = Mk_Hgatp32(v[31 .. 31] @ 0b00 @ v[28 .. 22] @ v[21 .. 2] @ 0b00); + match hgatp64Mode_of_bits(a, zero_extend(h[Mode])) { + None() => o, + Some(Bare) => h[Mode] @ zeros(31), /* remaining fields should be zero when hgatp.Mode is Bare */ + Some(_) => h.bits + } +} + +/* Virtual Supervisor Status Register */ + +register vsstatus : Sstatus + +function legalize_vsstatus(vs: Sstatus, v: xlenbits) -> Sstatus = { + let m = Mk_Mstatus(vs.bits); /* Convert to Mstatus */ + let s = legalize_sstatus(m, v); /* Legalize Mstatus for sstatus */ + Mk_Sstatus(s.bits) /* Convert back to Sstatus */ +} + +/* Virtual Supervisor Trap Vector Base Address Register */ + +register vstvec : Mtvec + +/* Virtual Supervisor Scratch Register */ + +register vsscratch : xlenbits + +/* Virtual Supervisor Exception Program Counter */ + +register vsepc : xlenbits + +/* Virtual Supervisor Cause Register */ + +register vscause : Mcause + +/* Virtual Supervisor Trap Value Register */ + +register vstval : xlenbits + +/* Virtual Supervisor interrupt enable/pending registers */ + +/*! Returns new mip from the previous mip (m) and the written vsip (v) as delegated by hideleg (d) */ +function legalize_vsip(m: Minterrupts, d : Minterrupts, v : xlenbits) -> Minterrupts = { + let v = Mk_Minterrupts(v); + /* Since S-mode bits in vsip are aliases for their corresponding VS-mode bits + in hip when delegated by hideleg, only vsip.SSIP (hip.VSSIP) might be writable */ + let m = [m with VSSI = v[SSI] & d[VSSI]]; + m +} + +/*! Returns vsip view of mip (m) as delegated by hideleg (d) */ +function lower_mip_to_vsip(m : Minterrupts, d : Minterrupts) -> Minterrupts = { + let vs = Mk_Minterrupts(zeros()); + /* Standard S-mode bits in vsip are aliases of corresponding VS-mode bits in + hip if delegated by hideleg */ + let vs = [vs with SEI = m[VSEI] & d[VSEI]]; + let vs = [vs with STI = m[VSTI] & d[VSTI]]; + let vs = [vs with SSI = m[VSSI] & d[VSSI]]; + vs +} + +/*! Returns new mie from previous mie (m) and the written vsie (v) as delegated by hideleg (d) */ +function legalize_vsie(m : Minterrupts, d : Minterrupts, v : xlenbits) -> Minterrupts = { + let v = Mk_Minterrupts(v); + let m = [m with VSEI = v[SEI] & d[VSEI]]; + let m = [m with VSTI = v[STI] & d[VSTI]]; + let m = [m with VSSI = v[SSI] & d[VSSI]]; + m +} + +/*! Returns vsie view of mie (m) as delegated by hideleg (d) */ +function lower_mie_to_vsie(m : Minterrupts, d : Minterrupts) -> Minterrupts = { + let vs = Mk_Minterrupts(zeros()); + /* Standard S-mode bits in vsie are aliases of corresponding VS-mode bits in + hie if delegated by hideleg */ + let vs = [vs with SEI = m[VSEI] & d[VSEI]]; + let vs = [vs with STI = m[VSTI] & d[VSTI]]; + let vs = [vs with SSI = m[VSSI] & d[VSSI]]; + vs +} + +/* Virtual Supervisor address translation & Protection */ + +register vsatp : xlenbits diff --git a/model/riscv_insts_aext.sail b/model/riscv_insts_aext.sail index 17863b820..c1167fb7d 100644 --- a/model/riscv_insts_aext.sail +++ b/model/riscv_insts_aext.sail @@ -89,7 +89,7 @@ function clause execute(LOADRES(aq, rl, rs1, width, rd)) = { if not(is_aligned(vaddr, width)) then { handle_mem_exception(vaddr, E_Load_Addr_Align()); RETIRE_FAIL } else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); RETIRE_FAIL }, TR_Address(addr, _) => match mem_read(Read(Data), addr, width_bytes, aq, aq & rl, true) { MemValue(result) => { load_reservation(addr); X(rd) = sign_extend(result); RETIRE_SUCCESS }, @@ -136,7 +136,7 @@ function clause execute (STORECON(aq, rl, rs2, rs1, width, rd)) = { else { match translateAddr(vaddr, Write(Data)) { /* Write and ReadWrite are equivalent here: * both result in a SAMO exception */ - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); RETIRE_FAIL }, TR_Address(addr, _) => { // Check reservation with physical address. if not(match_reservation(addr)) then { @@ -205,7 +205,7 @@ function clause execute (AMO(op, aq, rl, rs2, rs1, width, rd)) = { if not(is_aligned(vaddr, width)) then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); RETIRE_FAIL } else match translateAddr(vaddr, ReadWrite(Data, Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); RETIRE_FAIL }, TR_Address(addr, _) => { let eares = mem_write_ea(addr, width_bytes, aq & rl, rl, true); let rs2_val = X(rs2)[width_bytes * 8 - 1 .. 0]; diff --git a/model/riscv_insts_base.sail b/model/riscv_insts_base.sail index d99e74c65..0aed73e24 100644 --- a/model/riscv_insts_base.sail +++ b/model/riscv_insts_base.sail @@ -335,7 +335,7 @@ function clause execute(LOAD(imm, rs1, rd, is_unsigned, width, aq, rl)) = { if check_misaligned(vaddr, width) then { handle_mem_exception(vaddr, E_Load_Addr_Align()); RETIRE_FAIL } else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); RETIRE_FAIL }, TR_Address(paddr, _) => match mem_read(Read(Data), paddr, width_bytes, aq, rl, false) { @@ -391,7 +391,7 @@ function clause execute (STORE(imm, rs2, rs1, width, aq, rl)) = { if check_misaligned(vaddr, width) then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); RETIRE_FAIL } else match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); RETIRE_FAIL }, TR_Address(paddr, _) => { let eares = mem_write_ea(paddr, width_bytes, aq, rl, false); match (eares) { @@ -625,15 +625,14 @@ mapping clause encdec = ECALL() <-> 0b000000000000 @ 0b00000 @ 0b000 @ 0b00000 @ 0b1110011 function clause execute ECALL() = { - let t : sync_exception = - struct { trap = match (cur_privilege) { - User => E_U_EnvCall(), - Supervisor => E_S_EnvCall(), - Machine => E_M_EnvCall() - }, - excinfo = (None() : option(xlenbits)), - ext = None() }; - set_next_pc(exception_handler(cur_privilege, CTL_TRAP(t), PC)); + let e : ExceptionType = match (cur_privilege, cur_virtualization) { + (User, _) => E_U_EnvCall(), + (Supervisor, V0) => E_S_EnvCall(), + (Supervisor, V1) => E_VS_EnvCall(), + (Machine,_) => E_M_EnvCall() + }; + let c : ExceptionContext = empty_exception_context(); + set_next_pc(exception_handler(cur_privilege, cur_virtualization, CTL_TRAP(e, c), PC)); RETIRE_FAIL } @@ -651,7 +650,7 @@ function clause execute MRET() = { else if not(ext_check_xret_priv (Machine)) then { ext_fail_xret_priv(); RETIRE_FAIL } else { - set_next_pc(exception_handler(cur_privilege, CTL_MRET(), PC)); + set_next_pc(exception_handler(cur_privilege, cur_virtualization, CTL_MRET(), PC)); RETIRE_SUCCESS } } @@ -665,18 +664,26 @@ mapping clause encdec = SRET() <-> 0b0001000 @ 0b00010 @ 0b00000 @ 0b000 @ 0b00000 @ 0b1110011 function clause execute SRET() = { - let sret_illegal : bool = match cur_privilege { - User => true, - Supervisor => not(extensionEnabled(Ext_S)) | mstatus[TSR] == 0b1, - Machine => not(extensionEnabled(Ext_S)) + let sret_exc : option(ExceptionType) = match (cur_privilege, cur_virtualization) { + (User, _) => Some(E_Illegal_Instr()), + (Supervisor, V0) => if not(extensionEnabled(Ext_S)) | mstatus[TSR] == 0b1 + then Some(E_Illegal_Instr()) + else None(), + (Supervisor, V1) => if hstatus[VTSR] == 0b1 + then Some(E_Virtual_Instr()) + else None(), + (Machine, _) => if not(extensionEnabled(Ext_S)) + then Some(E_Illegal_Instr()) + else None(), }; - if sret_illegal - then { handle_illegal(); RETIRE_FAIL } - else if not(ext_check_xret_priv (Supervisor)) - then { ext_fail_xret_priv(); RETIRE_FAIL } - else { - set_next_pc(exception_handler(cur_privilege, CTL_SRET(), PC)); - RETIRE_SUCCESS + match sret_exc { + Some(e) => { handle_instr_fault(e); RETIRE_FAIL }, + None() => if not(ext_check_xret_priv (Supervisor)) + then { ext_fail_xret_priv(); RETIRE_FAIL } + else { + set_next_pc(exception_handler(cur_privilege, cur_virtualization, CTL_SRET(), PC)); + RETIRE_SUCCESS + } } } @@ -701,13 +708,51 @@ union clause ast = WFI : unit mapping clause encdec = WFI() <-> 0b000100000101 @ 0b00000 @ 0b000 @ 0b00000 @ 0b1110011 +/*! + * Cause exception if WFI does not complete within an + * implementation-specific bounded time limit. This time limit + * may be zero. + */ +function timeout_wfi(timeout_exc: ExceptionType) -> Retired = { + /* Since the time limit can be zero, always causing an exception + * is an incomplete but valid implementation + * + * Check https://github.com/riscv/riscv-isa-manual/issues/961 for more on this + */ + handle_instr_fault(timeout_exc); + RETIRE_FAIL +} + function clause execute WFI() = - match cur_privilege { - Machine => { platform_wfi(); RETIRE_SUCCESS }, - Supervisor => if mstatus[TW] == 0b1 - then { handle_illegal(); RETIRE_FAIL } - else { platform_wfi(); RETIRE_SUCCESS }, - User => { handle_illegal(); RETIRE_FAIL } + match (cur_privilege, cur_virtualization) { + (Machine, _) => { platform_wfi(); RETIRE_SUCCESS }, + (Supervisor, V0) => if mstatus[TW] == 0b1 + then { timeout_wfi(E_Illegal_Instr()) } + else { platform_wfi(); RETIRE_SUCCESS }, + (Supervisor, V1) => if mstatus[TW] == 0b1 + then { timeout_wfi(E_Illegal_Instr()) } + else if hstatus[VTW] == 0b1 + then { timeout_wfi(E_Virtual_Instr()) } + else { platform_wfi(); RETIRE_SUCCESS }, + /* + * SPEC: "When TW=1, then if WFI is executed in any less-privileged mode, + * and it does not complete within an implementation-specific, + * bounded time limit, the WFI instruction causes an illegal + * instruction exception" + * + * SPEC: "When S-mode is implemented, then executing WFI in U-mode causes + * an illegal instruction exception, unless it completes within an + * implementation-specific, bounded time limit" + */ + (User, V0) => if mstatus[TW] == 0b1 | extensionEnabled(Ext_S) + then { timeout_wfi(E_Illegal_Instr()) } + else { handle_illegal(); RETIRE_FAIL }, // FIXME: WFI is "optionally available for U-mode" according to spec + /* + * SPEC: A virtual instruction exception is raised "in VU-mode, attempts to execute WFI when mstatus.TW=0" + */ + (User, V1) => if mstatus[TW] == 0b1 + then { timeout_wfi(E_Illegal_Instr()) } + else { handle_virtual_instr(); RETIRE_FAIL }, } mapping clause assembly = WFI() <-> "wfi" @@ -721,14 +766,20 @@ mapping clause encdec = SFENCE_VMA(rs1, rs2) function clause execute SFENCE_VMA(rs1, rs2) = { let addr : option(xlenbits) = if rs1 == 0b00000 then None() else Some(X(rs1)); let asid : option(xlenbits) = if rs2 == 0b00000 then None() else Some(X(rs2)); - match cur_privilege { - User => { handle_illegal(); RETIRE_FAIL }, - Supervisor => match (architecture(get_mstatus_SXL(mstatus)), mstatus[TVM]) { - (Some(_), 0b1) => { handle_illegal(); RETIRE_FAIL }, - (Some(_), 0b0) => { flush_TLB(asid, addr); RETIRE_SUCCESS }, - (_, _) => internal_error(__FILE__, __LINE__, "unimplemented sfence architecture") - }, - Machine => { flush_TLB(asid, addr); RETIRE_SUCCESS } + match (cur_privilege, cur_virtualization) { + (User, V0) => { handle_illegal(); RETIRE_FAIL }, + (User, V1) => { handle_virtual_instr(); RETIRE_FAIL }, + (Supervisor, V0) => match (architecture(get_mstatus_SXL(mstatus)), mstatus[TVM]) { + (Some(_), 0b1) => { handle_illegal(); RETIRE_FAIL }, + (Some(_), 0b0) => { flush_TLB(asid, Stage_S, addr); RETIRE_SUCCESS }, + (_, _) => internal_error(__FILE__, __LINE__, "unimplemented sfence architecture") + }, + (Supervisor, V1) => match (architecture(get_hstatus_VSXL(hstatus)), hstatus[VTVM]) { + (Some(_), 0b1) => { handle_virtual_instr(); RETIRE_FAIL }, + (Some(_), 0b0) => { flush_TLB(asid, Stage_VS, addr); RETIRE_SUCCESS }, + (_, _) => internal_error(__FILE__, __LINE__, "unimplemented sfence architecture") + }, + (Machine, _) => { flush_TLB(asid, Stage_S, addr); RETIRE_SUCCESS } } } diff --git a/model/riscv_insts_begin.sail b/model/riscv_insts_begin.sail index 4287535c0..ba71b7067 100644 --- a/model/riscv_insts_begin.sail +++ b/model/riscv_insts_begin.sail @@ -26,6 +26,10 @@ scattered mapping encdec val encdec_compressed : ast <-> bits(16) scattered mapping encdec_compressed +/* Function for transformed instructions as defined in the hypervisor extension */ +val enc_transformed : (ast, bits(5)) -> bits(32) +scattered function enc_transformed + /* * We declare the ILLEGAL/C_ILLEGAL ast clauses here instead of in * riscv_insts_end, so that model extensions can make use of them. diff --git a/model/riscv_insts_end.sail b/model/riscv_insts_end.sail index 728c21e43..efc8c3e32 100644 --- a/model/riscv_insts_end.sail +++ b/model/riscv_insts_end.sail @@ -26,6 +26,11 @@ mapping clause assembly = C_ILLEGAL(s) <-> "c.illegal" ^ spc() ^ hex_bits_16(s) /* ****************************************************************** */ +/* All other instructions transform to zero */ +function clause enc_transformed(_,_) = zeros() + +/* ****************************************************************** */ + /* End definitions */ end extension end extensionEnabled @@ -34,6 +39,7 @@ end execute end assembly end encdec end encdec_compressed +end enc_transformed val print_insn : ast -> string function print_insn insn = assembly(insn) diff --git a/model/riscv_insts_fext.sail b/model/riscv_insts_fext.sail index d74e8217a..0b30958b6 100644 --- a/model/riscv_insts_fext.sail +++ b/model/riscv_insts_fext.sail @@ -324,7 +324,7 @@ function clause execute(LOAD_FP(imm, rs1, rd, width)) = { if check_misaligned(vaddr, width) then { handle_mem_exception(vaddr, E_Load_Addr_Align()); RETIRE_FAIL } else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); RETIRE_FAIL }, TR_Address(addr, _) => { let (aq, rl, res) = (false, false, false); match (width) { @@ -390,7 +390,7 @@ function clause execute (STORE_FP(imm, rs2, rs1, width)) = { if check_misaligned(vaddr, width) then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); RETIRE_FAIL } else match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); RETIRE_FAIL }, TR_Address(addr, _) => { let eares : MemoryOpResult(unit) = match width { BYTE => MemValue () /* bogus placeholder for illegal size */, diff --git a/model/riscv_insts_hext.sail b/model/riscv_insts_hext.sail new file mode 100644 index 000000000..0ade5bb62 --- /dev/null +++ b/model/riscv_insts_hext.sail @@ -0,0 +1,243 @@ +/*=======================================================================================*/ +/* This Sail RISC-V architecture model, comprising all files and */ +/* directories except where otherwise noted is subject the BSD */ +/* two-clause license in the LICENSE file. */ +/* */ +/* SPDX-License-Identifier: BSD-2-Clause */ +/*=======================================================================================*/ + +union clause ast = HLV : (word_width, bool, regidx, regidx) + +mapping clause encdec = HLV(width, is_unsigned, rs1, rd) + <-> 0b0110 @ size_enc(width) @ 0b0 @ 0b0000 @ bool_bits(is_unsigned) @ rs1 @ 0b100 @ rd @ 0b1110011 + +function clause execute(HLV(width, is_unsigned, rs1, rd)) = + /* Unsigned loads are only valid for widths strictly less than xlen, signed loads also present for widths equal to xlen */ + if (is_unsigned & size_bytes(width) >= sizeof(xlen_bytes)) | (size_bytes(width) > sizeof(xlen_bytes)) + then { handle_illegal(); RETIRE_FAIL } + else if (cur_virtualization == V1) + then { handle_virtual_instr(); RETIRE_FAIL } + else if (cur_privilege == User) & (hstatus[HU] == 0b0) + then { handle_illegal(); RETIRE_FAIL } + else { + let width_bytes = size_bytes(width); + assert(width_bytes <= xlen_bytes); + /* Extensions can perform additional checks on address validity. */ + match ext_data_get_addr(rs1, zeros(), Read(Data), width_bytes) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + /* Address could be misaligned */ + if check_misaligned(vaddr, width) + then { handle_mem_exception(vaddr, E_Load_Addr_Align()); RETIRE_FAIL } + /* Address translation and protection should be performed as if V=1 */ + else match translateAddr_pv(vaddr, Read(Data), privLevel_of_bits(zero_extend(hstatus[SPVP])), V1) { + TR_Failure(e, c, _) => { handle_exception(e, c); RETIRE_FAIL }, + TR_Address(paddr, _) => match mem_read(Read(Data), paddr, width_bytes, false, false, false) { + MemValue(result) => { X(rd) = extend_value(is_unsigned, result); RETIRE_SUCCESS }, + MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + }, + } + } + } + +mapping clause assembly = HLV(width, is_unsigned, rs1, rd) + <-> "hlv." ^ size_mnemonic(width) ^ maybe_u(is_unsigned) ^ spc() ^ reg_name(rd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" + +/* ****************************************************************** */ + +union clause ast = HLVX : (word_width, regidx, regidx) + +mapping clause encdec = HLVX(width, rs1, rd) + <-> 0b0110 @ size_enc(width) @ 0b0 @ 0b00011 @ rs1 @ 0b100 @ rd @ 0b1110011 + +function clause execute(HLVX(width, rs1, rd)) = + if (cur_virtualization == V1) + then { handle_virtual_instr(); RETIRE_FAIL } + else if (cur_privilege == User) & (hstatus[HU] == 0b0) + then { handle_illegal(); RETIRE_FAIL } + else { + let width_bytes = size_bytes(width); + assert(width_bytes <= xlen_bytes); + /* Extensions can perform additional checks on address validity. */ + match ext_data_get_addr(rs1, zeros(), Read(Data), width_bytes) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + /* Address could be misaligned */ + if check_misaligned(vaddr, width) + then { handle_mem_exception(vaddr, E_Load_Addr_Align()); RETIRE_FAIL } + /* Address translation and protection should be performed as for an instruction fetch while V=1 */ + else match translateAddr_pv(vaddr, Execute(), privLevel_of_bits(zero_extend(hstatus[SPVP])), V1) { + /* Update exception's access type because HLVX should raise load exceptions but translateAddr may return fetch exceptions */ + TR_Failure(e, c, _) => { handle_exception(exceptionType_update_AccessType(e, Read(Data)), c); RETIRE_FAIL }, + TR_Address(paddr, _) => match mem_read(Read(Data), paddr, width_bytes, false, false, false) { + /* AccessType is Read since HLVX should not override pmp */ + MemValue(result) => { X(rd) = extend_value(true, result); RETIRE_SUCCESS }, + MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + }, + } + } + } + +mapping clause assembly = HLVX(width, rs1, rd) +<-> "hlvx." ^ size_mnemonic(width) ^ "u" ^ spc() ^ reg_name(rd) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" + +/* ****************************************************************** */ + +union clause ast = HSV : (word_width, regidx, regidx) + +mapping clause encdec = HSV(width, rs1, rs2) + <-> 0b0110 @ size_enc(width) @ 0b1 @ rs2 @ rs1 @ 0b100 @ 0b00000 @ 0b1110011 + +function clause execute(HSV(width, rs1, rs2)) = + if size_bytes(width) > sizeof(xlen_bytes) + then { handle_illegal(); RETIRE_FAIL } + else if (cur_virtualization == V1) + then { handle_virtual_instr(); RETIRE_FAIL } + else if (cur_privilege == User) & (hstatus[HU] == 0b0) + then { handle_illegal(); RETIRE_FAIL } + /* Extensions can perform additional checks on address validity. */ + else match ext_data_get_addr(rs1, zeros(), Write(Data), size_bytes(width)) { + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, + Ext_DataAddr_OK(vaddr) => + /* Address could be misaligned */ + if check_misaligned(vaddr, width) + then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); RETIRE_FAIL } + /* Address translation and protection should be performed as if V=1 */ + else match translateAddr_pv(vaddr, Write(Data), privLevel_of_bits(zero_extend(hstatus[SPVP])), V1) { + TR_Failure(e, c, _) => { handle_exception(e, c); RETIRE_FAIL }, + TR_Address(paddr, _) => { + let eares : MemoryOpResult(unit) = match width { + BYTE => mem_write_ea(paddr, 1, false, false, false), + HALF => mem_write_ea(paddr, 2, false, false, false), + WORD => mem_write_ea(paddr, 4, false, false, false), + DOUBLE => mem_write_ea(paddr, 8, false, false, false) + }; + match (eares) { + MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + MemValue(_) => { + let rs2_val = X(rs2); + let res : MemoryOpResult(bool) = match (width) { + BYTE => mem_write_value(paddr, 1, rs2_val[7..0], false, false, false), + HALF => mem_write_value(paddr, 2, rs2_val[15..0], false, false, false), + WORD => mem_write_value(paddr, 4, rs2_val[31..0], false, false, false), + DOUBLE if sizeof(xlen) >= 64 => mem_write_value(paddr, 8, rs2_val, false, false, false), + _ => report_invalid_width(__FILE__, __LINE__, width, "hsv"), + }; + match (res) { + MemValue(true) => RETIRE_SUCCESS, + MemValue(false) => internal_error(__FILE__, __LINE__, "hsv got false from mem_write_value"), + MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } + } + } + } + }, + } + } + +mapping clause assembly = HSV(width, rs1, rs2) + <-> "hsv." ^ size_mnemonic(width) ^ spc() ^ reg_name(rs2) ^ sep() ^ "(" ^ reg_name(rs1) ^ ")" + +/* ****************************************************************** */ + +union clause ast = HFENCE_VVMA : (regidx, regidx) + +mapping clause encdec = HFENCE_VVMA(rs1, rs2) + <-> 0b0010001 @ rs2 @ rs1 @ 0b000 @ 0b00000 @ 0b1110011 + +function clause execute HFENCE_VVMA(rs1, rs2) = { + let addr : option(xlenbits) = if rs1 == 0b00000 then None() else Some(X(rs1)); + let asid : option(xlenbits) = if rs2 == 0b00000 then None() else Some(X(rs2)); + match (cur_privilege, cur_virtualization) { + (User, V1) => { handle_virtual_instr(); RETIRE_FAIL }, + (User, V0) => { handle_illegal(); RETIRE_FAIL }, + (Supervisor, V1) => { handle_virtual_instr(); RETIRE_FAIL }, + (Supervisor, V0) => { flush_TLB(asid, Stage_VS, addr); RETIRE_SUCCESS }, + (Machine, _) => { flush_TLB(asid, Stage_VS, addr); RETIRE_SUCCESS } + } +} + +mapping clause assembly = HFENCE_VVMA(rs1, rs2) + <-> "hfence.vvma" ^ spc() ^ reg_name(rs1) ^ sep() ^ reg_name(rs2) + +/* ****************************************************************** */ + +union clause ast = HFENCE_GVMA : (regidx, regidx) + +mapping clause encdec = HFENCE_GVMA(rs1, rs2) + <-> 0b0110001 @ rs2 @ rs1 @ 0b000 @ 0b00000 @ 0b1110011 + +function clause execute HFENCE_GVMA(rs1, rs2) = { + let addr : option(xlenbits) = if rs1 == 0b00000 then None() else Some(X(rs1)); + let vmid : option(xlenbits) = if rs2 == 0b00000 then None() else Some(X(rs2)); + match (cur_privilege, cur_virtualization) { + (User, V1) => { handle_virtual_instr(); RETIRE_FAIL }, + (User, V0) => { handle_illegal(); RETIRE_FAIL }, + (Supervisor, V1) => { handle_virtual_instr(); RETIRE_FAIL }, + (Supervisor, V0) => match mstatus[TVM] { + 0b1 => { handle_illegal(); RETIRE_FAIL }, + 0b0 => { flush_TLB(vmid, Stage_G, addr); RETIRE_SUCCESS } + }, + (Machine, _) => { flush_TLB(vmid, Stage_G, addr); RETIRE_SUCCESS } + } +} + +mapping clause assembly = HFENCE_GVMA(rs1, rs2) + <-> "hfence.gvma" ^ spc() ^ reg_name(rs1) ^ sep() ^ reg_name(rs2) + +/* ****************************************************************** */ +/* Transformed instruction mappings */ + +// Note: Should enc_transformed be implemented here since it originates from H-ext or next to the relevant instruction definitions? + +function clause enc_transformed(LOAD(_, _, rd, uns, size, _, _), addr_offset) = + 0b0000000 @ 0b00000 @ addr_offset @ bool_bits(uns) @ size_enc(size) @ rd @ 0b0000011 + +function clause enc_transformed(C_LW(_, _, rd), addr_offset) = { + let expanded_inst = LOAD(zeros(), zeros(), 0b01 @ rd, false, WORD, false, false); + let transformed_inst = enc_transformed(expanded_inst, addr_offset); + [transformed_inst with 1 = bitzero] /* Set transformed_inst[1] to 0b0 */ +} + +function clause enc_transformed(C_LD(_, _, rd), addr_offset) = { + let expanded_inst = LOAD(zeros(), zeros(), 0b01 @ rd, false, DOUBLE, false, false); + let transformed_inst = enc_transformed(expanded_inst, addr_offset); + [transformed_inst with 1 = bitzero] /* Set transformed_inst[1] to 0b0 */ +} + +function clause enc_transformed(LOAD_FP(_, _, rd, size), addr_offset) = + 0b0000000 @ 0b00000 @ addr_offset @ 0b0 @ size_enc(size) @ rd @ 0b0000111 + +function clause enc_transformed(STORE(_, rs2, _, size, _, _), addr_offset) = + 0b0000000 @ rs2 @ addr_offset @ 0b0 @ size_enc(size) @ 0b00000 @ 0b0100011 + +function clause enc_transformed(C_SW(_, _, rs2), addr_offset) = { + let expanded_inst = STORE(zeros(), 0b01 @ rs2, zeros(), WORD, false, false); + let transformed_inst = enc_transformed(expanded_inst, addr_offset); + [transformed_inst with 1 = bitzero] /* Set transformed_inst[1] to 0b0 */ +} +function clause enc_transformed(C_SD(_, _, rs2), addr_offset) = { + let expanded_inst = STORE(zeros(), 0b01 @ rs2, zeros(), DOUBLE, false, false); + let transformed_inst = enc_transformed(expanded_inst, addr_offset); + [transformed_inst with 1 = bitzero] /* Set transformed_inst[1] to 0b0 */ +} + +function clause enc_transformed(STORE_FP(_, rs2, _, size), addr_offset) = + 0b0000000 @ rs2 @ addr_offset @ 0b0 @ size_enc(size) @ 0b00000 @ 0b0100111 + +function clause enc_transformed(LOADRES(aq, rl, _, size, rd), addr_offset) = + 0b00010 @ bool_bits(aq) @ bool_bits(rl) @ 0b00000 @ addr_offset @ 0b0 @ size_enc(size) @ rd @ 0b0101111 + +function clause enc_transformed(STORECON(aq, rl, rs2, _, size, rd), addr_offset) = + 0b00011 @ bool_bits(aq) @ bool_bits(rl) @ rs2 @ addr_offset @ 0b0 @ size_enc(size) @ rd @ 0b0101111 + +function clause enc_transformed(AMO(op, aq, rl, rs2, _, size, rd), addr_offset) = + encdec_amoop(op) @ bool_bits(aq) @ bool_bits(rl) @ rs2 @ addr_offset @ 0b0 @ size_enc(size) @ rd @ 0b0101111 + +function clause enc_transformed(HLV(width, is_unsigned, _, rd), addr_offset) = + 0b0110 @ size_enc(width) @ 0b0 @ 0b0000 @ bool_bits(is_unsigned) @ addr_offset @ 0b100 @ rd @ 0b1110011 + +function clause enc_transformed(HSV(width, _, rs2), addr_offset) = + 0b0110 @ size_enc(width) @ 0b1 @ rs2 @ addr_offset @ 0b100 @ 0b00000 @ 0b1110011 + +function clause enc_transformed(HLVX(width, _, rd), addr_offset) = + 0b0110 @ size_enc(width) @ 0b0 @ 0b00011 @ addr_offset @ 0b100 @ rd @ 0b1110011 diff --git a/model/riscv_insts_vext_mem.sail b/model/riscv_insts_vext_mem.sail index cc8161304..d06b59f93 100644 --- a/model/riscv_insts_vext_mem.sail +++ b/model/riscv_insts_vext_mem.sail @@ -87,7 +87,7 @@ function process_vlseg (nf, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) = if check_misaligned(vaddr, width_type) then { handle_mem_exception(vaddr, E_Load_Addr_Align()); return RETIRE_FAIL } else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); return RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); return RETIRE_FAIL }, TR_Address(paddr, _) => { match mem_read(Read(Data), paddr, load_width_bytes, false, false, false) { MemValue(elem) => write_single_element(load_width_bytes * 8, i, vd + to_bits(5, j * EMUL_reg), elem), @@ -167,8 +167,8 @@ function process_vlsegff (nf, vm, vd, load_width_bytes, rs1, EMUL_pow, num_elem) trimmed = true } } else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { - if i == 0 then { handle_mem_exception(vaddr, e); return RETIRE_FAIL } + TR_Failure(e, c, _) => { + if i == 0 then { handle_exception(e, c); return RETIRE_FAIL } else { vl = to_bits(xlen, i); print_reg("CSR vl <- " ^ BitStr(vl)); @@ -257,7 +257,7 @@ function process_vsseg (nf, vm, vs3, load_width_bytes, rs1, EMUL_pow, num_elem) if check_misaligned(vaddr, width_type) then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); return RETIRE_FAIL } else match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); return RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); return RETIRE_FAIL }, TR_Address(paddr, _) => { let eares : MemoryOpResult(unit) = mem_write_ea(paddr, load_width_bytes, false, false, false); match (eares) { @@ -328,7 +328,7 @@ function process_vlsseg (nf, vm, vd, load_width_bytes, rs1, rs2, EMUL_pow, num_e if check_misaligned(vaddr, width_type) then { handle_mem_exception(vaddr, E_Load_Addr_Align()); return RETIRE_FAIL } else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); return RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); return RETIRE_FAIL }, TR_Address(paddr, _) => { match mem_read(Read(Data), paddr, load_width_bytes, false, false, false) { MemValue(elem) => write_single_element(load_width_bytes * 8, i, vd + to_bits(5, j * EMUL_reg), elem), @@ -394,7 +394,7 @@ function process_vssseg (nf, vm, vs3, load_width_bytes, rs1, rs2, EMUL_pow, num_ if check_misaligned(vaddr, width_type) then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); return RETIRE_FAIL } else match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); return RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); return RETIRE_FAIL }, TR_Address(paddr, _) => { let eares : MemoryOpResult(unit) = mem_write_ea(paddr, load_width_bytes, false, false, false); match (eares) { @@ -466,7 +466,7 @@ function process_vlxseg (nf, vm, vd, EEW_index_bytes, EEW_data_bytes, EMUL_index if check_misaligned(vaddr, width_type) then { handle_mem_exception(vaddr, E_Load_Addr_Align()); return RETIRE_FAIL } else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); return RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); return RETIRE_FAIL }, TR_Address(paddr, _) => { match mem_read(Read(Data), paddr, EEW_data_bytes, false, false, false) { MemValue(elem) => write_single_element(EEW_data_bytes * 8, i, vd + to_bits(5, j * EMUL_data_reg), elem), @@ -557,7 +557,7 @@ function process_vsxseg (nf, vm, vs3, EEW_index_bytes, EEW_data_bytes, EMUL_inde if check_misaligned(vaddr, width_type) then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); return RETIRE_FAIL } else match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); return RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); return RETIRE_FAIL }, TR_Address(paddr, _) => { let eares : MemoryOpResult(unit) = mem_write_ea(paddr, EEW_data_bytes, false, false, false); match (eares) { @@ -650,7 +650,7 @@ function process_vlre (nf, vd, load_width_bytes, rs1, elem_per_reg) = { if check_misaligned(vaddr, width_type) then { handle_mem_exception(vaddr, E_Load_Addr_Align()); return RETIRE_FAIL } else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); return RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); return RETIRE_FAIL }, TR_Address(paddr, _) => { match mem_read(Read(Data), paddr, load_width_bytes, false, false, false) { MemValue(elem) => write_single_element(load_width_bytes * 8, i, vd + to_bits(5, cur_field), elem), @@ -674,7 +674,7 @@ function process_vlre (nf, vd, load_width_bytes, rs1, elem_per_reg) = { if check_misaligned(vaddr, width_type) then { handle_mem_exception(vaddr, E_Load_Addr_Align()); return RETIRE_FAIL } else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); return RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); return RETIRE_FAIL }, TR_Address(paddr, _) => { match mem_read(Read(Data), paddr, load_width_bytes, false, false, false) { MemValue(elem) => write_single_element(load_width_bytes * 8, i, vd + to_bits(5, j), elem), @@ -732,7 +732,7 @@ function process_vsre (nf, load_width_bytes, rs1, vs3, elem_per_reg) = { if check_misaligned(vaddr, width_type) then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); return RETIRE_FAIL } else match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); return RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); return RETIRE_FAIL }, TR_Address(paddr, _) => { let eares : MemoryOpResult(unit) = mem_write_ea(paddr, load_width_bytes, false, false, false); match (eares) { @@ -766,7 +766,7 @@ function process_vsre (nf, load_width_bytes, rs1, vs3, elem_per_reg) = { if check_misaligned(vaddr, width_type) then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); return RETIRE_FAIL } else match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); return RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); return RETIRE_FAIL }, TR_Address(paddr, _) => { let eares : MemoryOpResult(unit) = mem_write_ea(paddr, load_width_bytes, false, false, false); match (eares) { @@ -834,7 +834,7 @@ function process_vm(vd_or_vs3, rs1, num_elem, evl, op) = { if check_misaligned(vaddr, width_type) then { handle_mem_exception(vaddr, E_Load_Addr_Align()); return RETIRE_FAIL } else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); return RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); return RETIRE_FAIL }, TR_Address(paddr, _) => { match mem_read(Read(Data), paddr, 1, false, false, false) { MemValue(elem) => write_single_element(8, i, vd_or_vs3, elem), @@ -850,7 +850,7 @@ function process_vm(vd_or_vs3, rs1, num_elem, evl, op) = { if check_misaligned(vaddr, width_type) then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); return RETIRE_FAIL } else match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); return RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); return RETIRE_FAIL }, TR_Address(paddr, _) => { let eares : MemoryOpResult(unit) = mem_write_ea(paddr, 1, false, false, false); match (eares) { diff --git a/model/riscv_insts_zicbom.sail b/model/riscv_insts_zicbom.sail index a8fdd8bcb..f1997ba21 100644 --- a/model/riscv_insts_zicbom.sail +++ b/model/riscv_insts_zicbom.sail @@ -11,9 +11,9 @@ enum clause extension = Ext_Zicbom function clause extensionEnabled(Ext_Zicbom) = sys_enable_zicbom() -function cbo_clean_flush_enabled(p : Privilege) -> bool = feature_enabled_for_priv(p, menvcfg[CBCFE][0], senvcfg[CBCFE][0]) -function cbo_inval_enabled(p : Privilege) -> bool = feature_enabled_for_priv(p, menvcfg[CBIE][0], senvcfg[CBIE][0]) -function cbo_inval_as_inval(p : Privilege) -> bool = feature_enabled_for_priv(p, menvcfg[CBIE][1], senvcfg[CBIE][1]) +function cbo_clean_flush_enabled(p : Privilege, v : Virtualization) -> bool = feature_enabled_for_priv(p, v, menvcfg[CBCFE][0], henvcfg[CBCFE][0], senvcfg[CBCFE][0]) +function cbo_inval_enabled(p : Privilege, v : Virtualization) -> bool = feature_enabled_for_priv(p, v, menvcfg[CBIE][0], henvcfg[CBIE][0], senvcfg[CBIE][0]) +function cbo_inval_as_inval(p : Privilege, v : Virtualization) -> bool = feature_enabled_for_priv(p, v, menvcfg[CBIE][1], henvcfg[CBIE][1], senvcfg[CBIE][1]) /* ****************************************************************** */ union clause ast = RISCV_ZICBOM : (cbop_zicbom, regidx) @@ -52,7 +52,7 @@ function process_clean_inval(rs1, cbop) = { match ext_data_get_addr(rs1, offset, Read(Data), cache_block_size) { Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, Ext_DataAddr_OK(vaddr) => { - let res: option(ExceptionType) = match translateAddr(vaddr, Read(Data)) { + let res: option((ExceptionType, ExceptionContext)) = match translateAddr(vaddr, Read(Data)) { TR_Address(paddr, _) => { // "A cache-block management instruction is permitted to access the // specified cache block whenever a load instruction or store instruction @@ -68,11 +68,11 @@ function process_clean_inval(rs1, cbop) = { match (exc_read, exc_write) { // Access is permitted if read OR write are allowed. If neither // are allowed then we always report a store exception. - (Some(exc_read), Some(exc_write)) => Some(exc_write), + (Some(exc_read), Some(exc_write)) => Some((exc_write, mem_exception_context(vaddr, cur_virtualization == V1))), _ => None(), } }, - TR_Failure(e, _) => Some(e) + TR_Failure(e, c, _) => Some((e, c)) }; // "If access to the cache block is not permitted, a cache-block management // instruction raises a store page fault or store guest-page fault exception @@ -81,7 +81,7 @@ function process_clean_inval(rs1, cbop) = { match res { // The model has no caches so there's no action required. None() => RETIRE_SUCCESS, - Some(e) => { + Some((e, c)) => { let e : ExceptionType = match e { E_Load_Access_Fault() => E_SAMO_Access_Fault(), E_SAMO_Access_Fault() => E_SAMO_Access_Fault(), @@ -91,7 +91,7 @@ function process_clean_inval(rs1, cbop) = { // for fetch access and it's can't be misaligned. _ => internal_error(__FILE__, __LINE__, "unexpected exception for cmo.clean/inval"), }; - handle_mem_exception(vaddr, e); + handle_exception(e, c); RETIRE_FAIL } } @@ -101,12 +101,12 @@ function process_clean_inval(rs1, cbop) = { function clause execute(RISCV_ZICBOM(cbop, rs1)) = match cbop { - CBO_CLEAN if cbo_clean_flush_enabled(cur_privilege) => + CBO_CLEAN if cbo_clean_flush_enabled(cur_privilege, cur_virtualization) => process_clean_inval(rs1, cbop), - CBO_FLUSH if cbo_clean_flush_enabled(cur_privilege) => + CBO_FLUSH if cbo_clean_flush_enabled(cur_privilege, cur_virtualization) => process_clean_inval(rs1, cbop), - CBO_INVAL if cbo_inval_enabled(cur_privilege) => - process_clean_inval(rs1, if cbo_inval_as_inval(cur_privilege) then CBO_INVAL else CBO_FLUSH), + CBO_INVAL if cbo_inval_enabled(cur_privilege, cur_virtualization) => + process_clean_inval(rs1, if cbo_inval_as_inval(cur_privilege, cur_virtualization) then CBO_INVAL else CBO_FLUSH), _ => { handle_illegal(); RETIRE_FAIL diff --git a/model/riscv_insts_zicboz.sail b/model/riscv_insts_zicboz.sail index abf18dc82..c1ca765c9 100644 --- a/model/riscv_insts_zicboz.sail +++ b/model/riscv_insts_zicboz.sail @@ -11,7 +11,7 @@ enum clause extension = Ext_Zicboz function clause extensionEnabled(Ext_Zicboz) = sys_enable_zicboz() -function cbo_zero_enabled(p : Privilege) -> bool = feature_enabled_for_priv(p, menvcfg[CBZE][0], senvcfg[CBZE][0]) +function cbo_zero_enabled(p : Privilege, v : Virtualization) -> bool = feature_enabled_for_priv(p, v, menvcfg[CBZE][0], henvcfg[CBZE][0], senvcfg[CBZE][0]) /* ****************************************************************** */ union clause ast = RISCV_ZICBOZ : (regidx) @@ -23,7 +23,7 @@ mapping clause assembly = RISCV_ZICBOZ(rs1) <-> "cbo.zero" ^ spc() ^ "(" ^ opt_spc() ^ reg_name(rs1) ^ opt_spc() ^ ")" function clause execute(RISCV_ZICBOZ(rs1)) = { - if cbo_zero_enabled(cur_privilege) then { + if cbo_zero_enabled(cur_privilege, cur_virtualization) then { let rs1_val = X(rs1); let cache_block_size_exp = plat_cache_block_size_exp(); let cache_block_size = 2 ^ cache_block_size_exp; @@ -40,7 +40,7 @@ function clause execute(RISCV_ZICBOZ(rs1)) = { // // This implementation does a single atomic write. match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + TR_Failure(e, c, _) => { handle_exception(e, c); RETIRE_FAIL }, TR_Address(paddr, _) => { let eares : MemoryOpResult(unit) = mem_write_ea(paddr, cache_block_size, false, false, false); match (eares) { diff --git a/model/riscv_insts_zicsr.sail b/model/riscv_insts_zicsr.sail index 5e4d47b9e..17e2b34a8 100644 --- a/model/riscv_insts_zicsr.sail +++ b/model/riscv_insts_zicsr.sail @@ -30,7 +30,7 @@ function clause read_CSR(0xF12) = marchid function clause read_CSR(0xF13) = mimpid function clause read_CSR(0xF14) = mhartid function clause read_CSR(0xF15) = mconfigptr -function clause read_CSR(0x300) = mstatus.bits +function clause read_CSR(0x300) = mstatus.bits[xlen - 1 .. 0] function clause read_CSR(0x301) = misa.bits function clause read_CSR(0x302) = medeleg.bits function clause read_CSR(0x303) = mideleg.bits @@ -38,7 +38,7 @@ function clause read_CSR(0x304) = mie.bits function clause read_CSR(0x305) = get_mtvec() function clause read_CSR(0x306) = zero_extend(mcounteren.bits) function clause read_CSR(0x30A) = menvcfg.bits[xlen - 1 .. 0] -function clause read_CSR(0x310 if xlen == 32) = mstatush.bits +function clause read_CSR(0x310 if xlen == 32) = mstatus.bits[63 .. 32] function clause read_CSR(0x31A if xlen == 32) = menvcfg.bits[63 .. 32] function clause read_CSR(0x320) = zero_extend(mcountinhibit.bits) @@ -46,7 +46,7 @@ function clause read_CSR(0x320) = zero_extend(mcountinhibit.bits) function clause read_CSR(0b0011001 /* 0x320 */ @ index : bits(5) if unsigned(index) >= 3) = read_mhpmevent(hpmidx_from_bits(index)) function clause read_CSR(0x340) = mscratch -function clause read_CSR(0x341) = get_xret_target(Machine) & pc_alignment_mask() +function clause read_CSR(0x341) = get_xret_target(Machine, V0) & pc_alignment_mask() function clause read_CSR(0x342) = mcause.bits function clause read_CSR(0x343) = mtval function clause read_CSR(0x344) = mip.bits @@ -73,24 +73,27 @@ function clause read_CSR(0b1011100 /* 0xB80 */ @ index : bits(5) if xlen == 32 & function clause read_CSR(0x7a0) = ~(tselect) /* this indicates we don't have any trigger support */ /* supervisor mode */ -function clause read_CSR(0x100) = lower_mstatus(mstatus).bits -function clause read_CSR(0x104) = lower_mie(mie, mideleg).bits -function clause read_CSR(0x105) = get_stvec() +function clause read_CSR(0x100) = (if cur_virtualization == V1 then vsstatus.bits else lower_mstatus(mstatus).bits)[(xlen - 1) .. 0] +function clause read_CSR(0x104) = if cur_virtualization == V1 then lower_mie_to_vsie(mie, hideleg).bits else lower_mie(mie, mideleg).bits +function clause read_CSR(0x105) = if cur_virtualization == V1 then get_vstvec() else get_stvec() function clause read_CSR(0x106) = zero_extend(scounteren.bits) function clause read_CSR(0x10A) = senvcfg.bits[xlen - 1 .. 0] -function clause read_CSR(0x140) = sscratch -function clause read_CSR(0x141) = get_xret_target(Supervisor) & pc_alignment_mask() -function clause read_CSR(0x142) = scause.bits -function clause read_CSR(0x143) = stval -function clause read_CSR(0x144) = lower_mip(mip, mideleg).bits -function clause read_CSR(0x180) = satp +function clause read_CSR(0x140) = if cur_virtualization == V1 then vsscratch else sscratch +function clause read_CSR(0x141) = get_xret_target(Supervisor, cur_virtualization) & pc_alignment_mask() +function clause read_CSR(0x142) = if cur_virtualization == V1 then vscause.bits else scause.bits +function clause read_CSR(0x143) = if cur_virtualization == V1 then vstval else stval +function clause read_CSR(0x144) = if cur_virtualization == V1 then lower_mip_to_vsip(mip, hideleg).bits else lower_mip(mip, mideleg).bits + +/* address translation & protection */ +function clause read_CSR(0x180) = if cur_virtualization == V1 then vsatp else satp +function clause read_CSR(0x680) = hgatp /* user mode counters */ function clause read_CSR(0xC00) = mcycle[(xlen - 1) .. 0] -function clause read_CSR(0xC01) = mtime[(xlen - 1) .. 0] +function clause read_CSR(0xC01) = (if cur_virtualization == V0 then mtime else mtime + htimedelta)[(sizeof(xlen) - 1) .. 0] function clause read_CSR(0xC02) = minstret[(xlen - 1) .. 0] function clause read_CSR(0xC80 if xlen == 32) = mcycle[63 .. 32] -function clause read_CSR(0xC81 if xlen == 32) = mtime[63 .. 32] +function clause read_CSR(0xC81 if xlen == 32) = (if cur_virtualization == V0 then mtime else mtime + htimedelta)[63 .. 32] function clause read_CSR(0xC82 if xlen == 32) = minstret[63 .. 32] /* Hardware Performance Monitoring user mode counters */ @@ -101,7 +104,8 @@ function clause read_CSR(0b1100100 /* 0xC80 */ @ index : bits(5) if xlen == 32 & function clause read_CSR(0x015) = read_seed_csr() /* machine mode */ -function clause write_CSR(0x300, value) = { mstatus = legalize_mstatus(mstatus, value); mstatus.bits } +function clause write_CSR((0x300, value) if xlen == 32) = { mstatus = legalize_mstatus(mstatus, mstatus.bits[63 .. 32] @ value); mstatus.bits[31 .. 0] } +function clause write_CSR((0x300, value) if xlen == 64) = { mstatus = legalize_mstatus(mstatus, value); mstatus.bits } function clause write_CSR(0x301, value) = { misa = legalize_misa(misa, value); misa.bits } function clause write_CSR(0x302, value) = { medeleg = legalize_medeleg(medeleg, value); medeleg.bits } function clause write_CSR(0x303, value) = { mideleg = legalize_mideleg(mideleg, value); mideleg.bits } @@ -110,11 +114,11 @@ function clause write_CSR(0x305, value) = { set_mtvec(value) } function clause write_CSR(0x306, value) = { mcounteren = legalize_mcounteren(mcounteren, value); zero_extend(mcounteren.bits) } function clause write_CSR((0x30A, value) if xlen == 32) = { menvcfg = legalize_menvcfg(menvcfg, menvcfg.bits[63 .. 32] @ value); menvcfg.bits[31 .. 0] } function clause write_CSR((0x30A, value) if xlen == 64) = { menvcfg = legalize_menvcfg(menvcfg, value); menvcfg.bits } -function clause write_CSR((0x310, value) if xlen == 32) = { mstatush.bits } // ignore writes for now +function clause write_CSR((0x310, value) if xlen == 32) = { mstatus = legalize_mstatus(mstatus, value @ mstatus.bits[31 .. 0]); mstatus.bits[63 .. 32] } function clause write_CSR((0x31A, value) if xlen == 32) = { menvcfg = legalize_menvcfg(menvcfg, value @ menvcfg.bits[31 .. 0]); menvcfg.bits[63 .. 32] } function clause write_CSR(0x320, value) = { mcountinhibit = legalize_mcountinhibit(mcountinhibit, value); zero_extend(mcountinhibit.bits) } function clause write_CSR(0x340, value) = { mscratch = value; mscratch } -function clause write_CSR(0x341, value) = { set_xret_target(Machine, value) } +function clause write_CSR(0x341, value) = { set_xret_target(Machine, V0, value) } function clause write_CSR(0x342, value) = { mcause.bits = value; mcause.bits } function clause write_CSR(0x343, value) = { mtval = value; mtval } function clause write_CSR(0x344, value) = { mip = legalize_mip(mip, value); mip.bits } @@ -159,17 +163,47 @@ function clause write_CSR((0b1011100 /* 0xB80 */ @ index : bits(5), value) if xl function clause write_CSR(0x7a0, value) = { tselect = value; tselect } /* supervisor mode */ -function clause write_CSR(0x100, value) = { mstatus = legalize_sstatus(mstatus, value); mstatus.bits } -function clause write_CSR(0x104, value) = { mie = legalize_sie(mie, mideleg, value); mie.bits } -function clause write_CSR(0x105, value) = { set_stvec(value) } +function clause write_CSR(0x100, value) = match cur_virtualization { + V0 => { mstatus = legalize_sstatus(mstatus, value); mstatus.bits[(xlen - 1) .. 0] }, + V1 => { vsstatus = legalize_vsstatus(vsstatus, value); vsstatus.bits[(xlen - 1) .. 0] }, + } +function clause write_CSR(0x104, value) = match cur_virtualization { + V0 => { mie = legalize_sie(mie, mideleg, value); mie.bits }, + V1 => { mie = legalize_vsie(mie, hideleg, value); mie.bits }, + } +function clause write_CSR(0x105, value) = match cur_virtualization { + V0 => { set_stvec(value) }, + V1 => { set_vstvec(value) }, + } function clause write_CSR(0x106, value) = { scounteren = legalize_scounteren(scounteren, value); zero_extend(scounteren.bits) } function clause write_CSR(0x10A, value) = { senvcfg = legalize_senvcfg(senvcfg, zero_extend(value)); senvcfg.bits[xlen - 1 .. 0] } -function clause write_CSR(0x140, value) = { sscratch = value; sscratch } -function clause write_CSR(0x141, value) = { set_xret_target(Supervisor, value) } -function clause write_CSR(0x142, value) = { scause.bits = value; scause.bits } -function clause write_CSR(0x143, value) = { stval = value; stval } -function clause write_CSR(0x144, value) = { mip = legalize_sip(mip, mideleg, value); mip.bits } -function clause write_CSR(0x180, value) = { satp = legalize_satp(cur_Architecture(), satp, value); satp } +function clause write_CSR(0x140, value) = match cur_virtualization { + V0 => { sscratch = value; sscratch }, + V1 => { vsscratch = value; vsscratch }, + } +function clause write_CSR(0x141, value) = match cur_virtualization { + V0 => { set_xret_target(Supervisor, V0, value) }, + V1 => { set_xret_target(Supervisor, V1, value) }, + } +function clause write_CSR(0x142, value) = match cur_virtualization { + V0 => { scause.bits = value; scause.bits }, + V1 => { vscause.bits = value; vscause.bits }, + } +function clause write_CSR(0x143, value) = match cur_virtualization { + V0 => { stval = value; stval }, + V1 => { vstval = value; vstval}, + } +function clause write_CSR(0x144, value) = match cur_virtualization { + V0 => { mip = legalize_sip(mip, mideleg, value); mip.bits }, + V1 => { mip = legalize_vsip(mip, hideleg, value); mip.bits }, + } + +/* Address translation & protection */ +function clause write_CSR(0x180, value) = match cur_virtualization { + V0 => { satp = legalize_satp(cur_Architecture(), satp, value); satp }, + V1 => { vsatp = legalize_satp(cur_Architecture(), vsatp, value); vsatp }, + } +function clause write_CSR(0x680, value) = { hgatp = legalize_hgatp(cur_Architecture(), hgatp, value); hgatp } /* user mode: seed (entropy source). writes are ignored */ function clause write_CSR(0x015, value) = write_seed_csr() @@ -180,8 +214,13 @@ function clause execute CSR(csr, rs1, rd, is_imm, op) = { CSRRW => true, _ => if is_imm then unsigned(rs1_val) != 0 else unsigned(rs1) != 0 }; - if not(check_CSR(csr, cur_privilege, isWrite)) - then { handle_illegal(); RETIRE_FAIL } + if not(check_CSR(csr, cur_privilege, cur_virtualization, isWrite)) + then { + if extensionEnabled(Ext_H) & csr_access_raises_virtual_instr(csr, cur_privilege, cur_virtualization, isWrite) + then handle_virtual_instr() + else handle_illegal(); + RETIRE_FAIL + } else if not(ext_check_CSR(csr, cur_privilege, isWrite)) then { ext_check_CSR_fail(); RETIRE_FAIL } else { diff --git a/model/riscv_platform.sail b/model/riscv_platform.sail index b29eb8325..474995c5d 100644 --- a/model/riscv_platform.sail +++ b/model/riscv_platform.sail @@ -450,16 +450,22 @@ function tick_platform() -> unit = { /* Platform-specific handling of instruction faults */ -function handle_illegal() -> unit = { - let info = if plat_mtval_has_illegal_inst_bits () - then Some(instbits) - else None(); - let t : sync_exception = struct { trap = E_Illegal_Instr(), - excinfo = info, - ext = None() }; - set_next_pc(exception_handler(cur_privilege, CTL_TRAP(t), PC)) +function handle_instr_fault(e: ExceptionType) -> unit = { + /* handle_instr_fault only supports the following exception causes */ + assert(match e { + E_Illegal_Instr() => true, + E_Virtual_Instr() => true, + _ => false, + }, "exception not supported by current handler: " ^ to_str(e)); + let c = instr_exception_context(zero_extend(instbits), plat_mtval_has_illegal_inst_bits()) in + set_next_pc(exception_handler(cur_privilege, cur_virtualization, CTL_TRAP(e, c), PC)) } +// Note: Would its purpose be clearer if called 'handle_illegal_instr()'? +function handle_illegal() -> unit = handle_instr_fault(E_Illegal_Instr()) + +function handle_virtual_instr() -> unit = handle_instr_fault(E_Virtual_Instr()) + /* Platform-specific wait-for-interrupt */ function platform_wfi() -> unit = { diff --git a/model/riscv_regs.sail b/model/riscv_regs.sail index 26faef210..5672852e2 100644 --- a/model/riscv_regs.sail +++ b/model/riscv_regs.sail @@ -12,7 +12,8 @@ register PC : xlenbits register nextPC : xlenbits /* internal state to hold instruction bits for faulting instructions */ -register instbits : xlenbits +register instbits : ilenbits +register instbits_transformed : ilenbits /* register file and accessors */ diff --git a/model/riscv_step.sail b/model/riscv_step.sail index 21bd82e47..dad9def88 100644 --- a/model/riscv_step.sail +++ b/model/riscv_step.sail @@ -24,11 +24,11 @@ function step(step_no : int) -> bool = { minstret_increment = mcountinhibit[IR] == 0b0; let (retired, stepped) : (Retired, bool) = - match dispatchInterrupt(cur_privilege) { - Some(intr, priv) => { + match dispatchInterrupt(cur_privilege, cur_virtualization) { + Some(intr, priv, virt) => { if get_config_print_instr() then print_bits("Handling interrupt: ", interruptType_to_bits(intr)); - handle_interrupt(intr, priv); + handle_interrupt(intr, priv, virt); (RETIRE_FAIL, false) }, None() => { @@ -40,8 +40,8 @@ function step(step_no : int) -> bool = { (RETIRE_FAIL, false) }, /* standard error */ - F_Error(e, addr) => { - handle_mem_exception(addr, e); + F_Error(e, c) => { + handle_exception(e, c); (RETIRE_FAIL, false) }, /* non-error cases: */ @@ -49,9 +49,10 @@ function step(step_no : int) -> bool = { sail_instr_announce(h); instbits = zero_extend(h); let ast = ext_decode_compressed(h); + instbits_transformed = enc_transformed(ast, zeros()); // BUG: Address offset may be nonzero for misaligned memory accesses if get_config_print_instr() then { - print_instr("[" ^ dec_str(step_no) ^ "] [" ^ to_str(cur_privilege) ^ "]: " ^ BitStr(PC) ^ " (" ^ BitStr(h) ^ ") " ^ to_str(ast)); + print_instr("[" ^ dec_str(step_no) ^ "] [" ^ to_str(cur_privilege, cur_virtualization) ^ "]: " ^ BitStr(PC) ^ " (" ^ BitStr(h) ^ ") " ^ to_str(ast)); }; /* check for RVC once here instead of every RVC execute clause. */ if extensionEnabled(Ext_Zca) then { @@ -66,9 +67,10 @@ function step(step_no : int) -> bool = { sail_instr_announce(w); instbits = zero_extend(w); let ast = ext_decode(w); + instbits_transformed = enc_transformed(ast, zeros()); // BUG: Address offset may be nonzero for misaligned memory accesses if get_config_print_instr() then { - print_instr("[" ^ dec_str(step_no) ^ "] [" ^ to_str(cur_privilege) ^ "]: " ^ BitStr(PC) ^ " (" ^ BitStr(w) ^ ") " ^ to_str(ast)); + print_instr("[" ^ dec_str(step_no) ^ "] [" ^ to_str(cur_privilege, cur_virtualization) ^ "]: " ^ BitStr(PC) ^ " (" ^ BitStr(w) ^ ") " ^ to_str(ast)); }; nextPC = PC + 4; (execute(ast), true) diff --git a/model/riscv_step_common.sail b/model/riscv_step_common.sail index 5384e5b50..e2204026f 100644 --- a/model/riscv_step_common.sail +++ b/model/riscv_step_common.sail @@ -11,8 +11,8 @@ */ union FetchResult = { - F_Ext_Error : ext_fetch_addr_error, /* For extensions */ - F_Base : word, /* Base ISA */ - F_RVC : half, /* Compressed ISA */ - F_Error : (ExceptionType, xlenbits) /* standard exception and PC */ + F_Ext_Error : ext_fetch_addr_error, /* For extensions */ + F_Base : word, /* Base ISA */ + F_RVC : half, /* Compressed ISA */ + F_Error : (ExceptionType, ExceptionContext) /* standard exception and PC */ } diff --git a/model/riscv_sync_exception.sail b/model/riscv_sync_exception.sail index 48efdb708..a076ba769 100644 --- a/model/riscv_sync_exception.sail +++ b/model/riscv_sync_exception.sail @@ -6,10 +6,61 @@ /* SPDX-License-Identifier: BSD-2-Clause */ /*=======================================================================================*/ -/* model context for synchronous exceptions, parameterized for extensions */ +/* Model context for synchronous exceptions, parameterized for out-of-tree extensions */ -struct sync_exception = { - trap : ExceptionType, - excinfo : option(xlenbits), - ext : option(ext_exception) /* for extensions */ +struct ExceptionContext = { + excinfo : option(xlenbits), /* Faulting address or instruction */ + info_is_gva : bool, /* H-extension: true if excinfo holds guest-virtual-address */ + excinfo2 : option(xlenbits), /* H-extension: faulting guest-physical address */ + excinst : option(xlenbits), /* H-extension: transformed instruction or pseudoinstruction */ + ext : option(ext_exception) /* For out-of-tree extensions */ } + +/* Create contexts for various exception classes */ + +function empty_exception_context() -> ExceptionContext = + struct { + excinfo = None(), + info_is_gva = false, + excinfo2 = None(), + excinst = None(), + ext = None(), + } + +function mem_exception_context(vaddr : xlenbits, is_gva : bool) -> ExceptionContext = + struct { + excinfo = Some(vaddr), + info_is_gva = is_gva, + excinfo2 = None(), + excinst = None(), + ext = None(), + } + +function vmem_exception_context(vaddr : xlenbits, is_gva : bool, gpaddr : option(xlenbits), pseudoinst : option(xlenbits)) -> ExceptionContext = + struct { + excinfo = Some(vaddr), + info_is_gva = is_gva, + excinfo2 = gpaddr, + excinst = pseudoinst, + ext = None(), + } + +function instr_exception_context(inst : xlenbits, tval_has_ill_inst_bits : bool) -> ExceptionContext = + struct { + excinfo = if tval_has_ill_inst_bits then Some(inst) else None(), + info_is_gva = false, + excinfo2 = None(), + excinst = None(), + ext = None(), + } + +// For out-of-tree extensions +val ext_exception_context : forall ('ext : Type) . ('ext) -> ExceptionContext +function ext_exception_context(ext) = + struct { + excinfo = None(), + info_is_gva = false, + excinfo2 = None(), + excinst = None(), + ext = None(), + } diff --git a/model/riscv_sys_control.sail b/model/riscv_sys_control.sail index 620240cb9..1ec5031d6 100644 --- a/model/riscv_sys_control.sail +++ b/model/riscv_sys_control.sail @@ -13,9 +13,6 @@ function clause extensionEnabled(Ext_Zkr) = true /* CSR access control */ -function csrAccess(csr : csreg) -> csrRW = csr[11..10] -function csrPriv(csr : csreg) -> priv_level = csr[9..8] - // TODO: These is_CSR_defined definitions should be moved to the files // corresponding to their extensions rather than all be here. @@ -104,59 +101,6 @@ function clause is_CSR_defined(0b1100100 /* 0xC80 */ @ index : bits(5) if unsign /* user mode: Zkr */ function clause is_CSR_defined(0x015) = extensionEnabled(Ext_Zkr) - -val check_CSR_access : (csrRW, priv_level, Privilege, bool) -> bool -function check_CSR_access(csrrw, csrpr, p, isWrite) = - not(isWrite == true & csrrw == 0b11) /* read/write */ - & (privLevel_to_bits(p) >=_u csrpr) /* privilege */ - -function check_TVM_SATP(csr : csreg, p : Privilege) -> bool = - not(csr == 0x180 & p == Supervisor & mstatus[TVM] == 0b1) - -// There are several features that are controlled by machine/supervisor enable -// bits (m/senvcfg, m/scounteren, etc.). This abstracts that logic. -function feature_enabled_for_priv(p : Privilege, machine_enable_bit : bit, supervisor_enable_bit : bit) -> bool = match p { - Machine => true, - Supervisor => machine_enable_bit == bitone, - User => machine_enable_bit == bitone & (not(extensionEnabled(Ext_S)) | supervisor_enable_bit == bitone), -} - -// Return true if the counter is enabled OR the CSR is not a counter. -function check_Counteren(csr : csreg, p : Privilege) -> bool = { - // Check if it is not a counter. - if csr <_u 0xC00 | 0xC1F <_u csr - then return true; - - // Check the relevant bit in m/scounteren. - let index = unsigned(csr[4 .. 0]); - feature_enabled_for_priv(p, mcounteren.bits[index], scounteren.bits[index]) -} - -/* Seed may only be accessed if we are doing a write, and access has been - * allowed in the current priv mode - */ -function check_seed_CSR (csr : csreg, p : Privilege, isWrite : bool) -> bool = { - if not(csr == 0x015) then { - true - } else if not(isWrite) then { - /* Read-only access to the seed CSR is not allowed */ - false - } else { - match (p) { - Machine => true, - Supervisor => false, /* TODO: base this on mseccfg */ - User => false, /* TODO: base this on mseccfg */ - } - } -} - -function check_CSR(csr : csreg, p : Privilege, isWrite : bool) -> bool = - is_CSR_defined(csr) - & check_CSR_access(csrAccess(csr), csrPriv(csr), p, isWrite) - & check_TVM_SATP(csr, p) - & check_Counteren(csr, p) - & check_seed_CSR(csr, p, isWrite) - /* Reservation handling for LR/SC. * * The reservation state is maintained external to the model since the @@ -178,13 +122,44 @@ val cancel_reservation = impure {interpreter: "Platform.cancel_reservation", c: /* Exception delegation: given an exception and the privilege at which * it occured, returns the privilege at which it should be handled. */ -function exception_delegatee(e : ExceptionType, p : Privilege) -> Privilege = { - let idx = num_of_ExceptionType(e); - let super = bit_to_bool(medeleg.bits[idx]); - let deleg = if extensionEnabled(Ext_S) & super then Supervisor else Machine; - /* We cannot transition to a less-privileged mode. */ - if privLevel_to_bits(deleg) <_u privLevel_to_bits(p) - then p else deleg +function exception_delegatee(e : ExceptionType, cur_p : Privilege, cur_v : Virtualization) -> (Privilege, Virtualization) = { + /* Determine delegation privilege and virtualization mode based on xedeleg CSRs + * + * medeleg delegates to HS-mode + * hedeleg delegates to VS-mode + */ + let idx = num_of_ExceptionType(e); + let del_to_hs = bit_to_bool(medeleg.bits[idx]); + let del_to_vs = bit_to_bool(hedeleg.bits[idx]) & (cur_v == V1); + let (del_p, del_v): (Privilege, Virtualization) = match (del_to_hs, del_to_vs) { + (false, _,) => (Machine, V0), + (true, false) => (Supervisor, V0), + (true, true) => (Supervisor, V1), + }; + + /* Check if delegation privilege exists: + * + * delegation to M-mode is unconditionally possible + * delegation to HS-mode is only possible if supervisor mode is implemented + * delegation to VS-mode is only possible if hypervisor extension is present (its presence implies supervisor mode implemented) + */ + let (del_p, del_v): (Privilege, Virtualization) = match (del_p, del_v) { + (Machine, _) => (Machine, V0), + (Supervisor, V0) => if extensionEnabled(Ext_S) then (Supervisor, V0) + else (Machine, V0), + (Supervisor, V1) => if extensionEnabled(Ext_H) then (Supervisor, V1) + else if extensionEnabled(Ext_S) then (Supervisor, V0) + else (Machine, V0), + (User, _) => internal_error(__FILE__, __LINE__, "Unreachable: illegal delegation privilege") + }; + /* Check transition to less-privileged mode: + * + * trap delegation should never cause a transition to a lower privilege mode + * trap delegation should never cause a transition to a more virtualized mode + */ + if (privLevel_to_bits(del_p) >=_u privLevel_to_bits(cur_p)) & (virtMode_to_bits(del_v) <=_u virtMode_to_bits(cur_v)) + then (del_p, del_v) + else (cur_p, cur_v) } /* Interrupts are prioritized in privilege order, and for each @@ -192,15 +167,32 @@ function exception_delegatee(e : ExceptionType, p : Privilege) -> Privilege = { */ function findPendingInterrupt(ip : xlenbits) -> option(InterruptType) = { let ip = Mk_Minterrupts(ip); - if ip[MEI] == 0b1 then Some(I_M_External) - else if ip[MSI] == 0b1 then Some(I_M_Software) - else if ip[MTI] == 0b1 then Some(I_M_Timer) - else if ip[SEI] == 0b1 then Some(I_S_External) - else if ip[SSI] == 0b1 then Some(I_S_Software) - else if ip[STI] == 0b1 then Some(I_S_Timer) + if ip[MEI] == 0b1 then Some(I_M_External) + else if ip[MSI] == 0b1 then Some(I_M_Software) + else if ip[MTI] == 0b1 then Some(I_M_Timer) + else if ip[SEI] == 0b1 then Some(I_S_External) + else if ip[SSI] == 0b1 then Some(I_S_Software) + else if ip[STI] == 0b1 then Some(I_S_Timer) + else if ip[SGEI] == 0b1 then Some(I_G_External) + else if ip[VSEI] == 0b1 then Some(I_VS_External) + else if ip[VSSI] == 0b1 then Some(I_VS_Software) + else if ip[VSTI] == 0b1 then Some(I_VS_Timer) + else if ip[UEI] == 0b1 then Some(I_U_External) + else if ip[USI] == 0b1 then Some(I_U_Software) + else if ip[UTI] == 0b1 then Some(I_U_Timer) else None() } +/*! Translate VS-level interrupts to their S-level counterpart */ +function translateVSInterrupts(i : Minterrupts) -> Minterrupts = { + let t = i; + let t = [[t with SEI = i[VSEI]] with VSEI = 0b0]; + let t = [[t with SSI = i[VSSI]] with VSSI = 0b0]; + let t = [[t with STI = i[VSTI]] with VSTI = 0b0]; + // TODO: add hook for extensions to translate interrupts as well + t +} + /* Process the pending interrupts xip at a privilege according to * the enabled flags xie and the delegation in xideleg. Return * either the set of pending interrupts, or the set of interrupts @@ -231,7 +223,7 @@ function processPending(xip : Minterrupts, xie : Minterrupts, xideleg : xlenbits * We don't use the lowered views of {xie,xip} here, since the spec * allows for example the M_Timer to be delegated to the S-mode. */ -function getPendingSet(priv : Privilege) -> option((xlenbits, Privilege)) = { +function getPendingSet(priv : Privilege, virt : Virtualization) -> option((xlenbits, Privilege, Virtualization)) = { let effective_pending = mip.bits & mie.bits; if effective_pending == zeros() then None() /* fast path */ else { @@ -240,11 +232,21 @@ function getPendingSet(priv : Privilege) -> option((xlenbits, Privilege)) = { * considered blocked. */ let mIE = priv != Machine | (priv == Machine & mstatus[MIE] == 0b1); - let sIE = extensionEnabled(Ext_S) & (priv == User | (priv == Supervisor & mstatus[SIE] == 0b1)); + let sIE = extensionEnabled(Ext_S) & (virt == V1 | priv == User | (priv == Supervisor & mstatus[SIE] == 0b1)); + let vsIE = extensionEnabled(Ext_H) & virt == V1 & (priv == User | (priv == Supervisor & vsstatus[SIE] == 0b1)); // Enabled H-ext implies presence of S-mode + match processPending(mip, mie, mideleg.bits, mIE) { Ints_Empty() => None(), - Ints_Pending(p) => Some((p, Machine)), - Ints_Delegated(d) => if sIE then Some((d, Supervisor)) else None(), + Ints_Pending(p) => Some((p, Machine, V0)), + Ints_Delegated(d) => match processPending(Mk_Minterrupts(d), mie, hideleg.bits, sIE) { + Ints_Empty() => None(), + Ints_Pending(p) => Some((p, Supervisor, V0)), + Ints_Delegated(d) => match processPending(Mk_Minterrupts(d), mie, zeros(), vsIE) { + Ints_Empty() => None(), + Ints_Pending(p) => Some(translateVSInterrupts(Mk_Minterrupts(p)).bits, Supervisor, V1), + Ints_Delegated(d) => internal_error(__FILE__, __LINE__, "Delegate interrupts beyond VS-mode") + } + } } } } @@ -252,12 +254,12 @@ function getPendingSet(priv : Privilege) -> option((xlenbits, Privilege)) = { /* Examine the current interrupt state and return an interrupt to be * * handled (if any), and the privilege it should be handled at. */ -function dispatchInterrupt(priv : Privilege) -> option((InterruptType, Privilege)) = { - match getPendingSet(priv) { +function dispatchInterrupt(priv : Privilege, virt : Virtualization) -> option((InterruptType, Privilege, Virtualization)) = { + match getPendingSet(priv, virt) { None() => None(), - Some(ip, p) => match findPendingInterrupt(ip) { + Some(ip, p, v) => match findPendingInterrupt(ip) { None() => None(), - Some(i) => Some((i, p)), + Some(i) => Some((i, p, v)), } } } @@ -265,7 +267,7 @@ function dispatchInterrupt(priv : Privilege) -> option((InterruptType, Privilege /* types of privilege transitions */ union ctl_result = { - CTL_TRAP : sync_exception, + CTL_TRAP : (ExceptionType, ExceptionContext), CTL_SRET : unit, CTL_MRET : unit, } @@ -291,125 +293,211 @@ $endif /* handle exceptional ctl flow by updating nextPC and operating privilege */ -function trap_handler(del_priv : Privilege, intr : bool, c : exc_code, pc : xlenbits, info : option(xlenbits), ext : option(ext_exception)) +function trap_handler(del_priv : Privilege, del_virt : Virtualization, pc : xlenbits, cause : TrapCause, context : ExceptionContext) -> xlenbits = { rvfi_trap(); if get_config_print_platform() - then print_platform("handling " ^ (if intr then "int#" else "exc#") - ^ BitStr(c) ^ " at priv " ^ to_str(del_priv) - ^ " with tval " ^ BitStr(tval(info))); + then print_platform("handling " ^ (if trapCause_is_interrupt(cause) then "int#" else "exc#") + ^ BitStr(trapCause_to_bits(cause)) ^ " at priv " ^ to_str(del_priv, del_virt) + ^ " with tval " ^ BitStr(some_or_zero(context.excinfo)) + ^ " and tinst " ^ BitStr(some_or_zero(context.excinst))); match (del_priv) { Machine => { - mcause[IsInterrupt] = bool_to_bits(intr); - mcause[Cause] = zero_extend(c); + mcause[IsInterrupt] = bool_to_bits(trapCause_is_interrupt(cause)); + mcause[Cause] = zero_extend(trapCause_to_bits(cause)); mstatus[MPIE] = mstatus[MIE]; mstatus[MIE] = 0b0; mstatus[MPP] = privLevel_to_bits(cur_privilege); - mtval = tval(info); + mstatus[MPV] = virtMode_to_bits(cur_virtualization); + mstatus[GVA] = if context.info_is_gva then 0b1 else 0b0; mepc = pc; + mtval = some_or_zero(context.excinfo); + mtval2 = some_or_zero(context.excinfo2); + mtinst = some_or_zero(context.excinst); - cur_privilege = del_priv; + cur_privilege = del_priv; + cur_virtualization = del_virt; - handle_trap_extension(del_priv, pc, ext); + handle_trap_extension(del_priv, del_virt, pc, context.ext); if get_config_print_reg() then print_reg("CSR mstatus <- " ^ BitStr(mstatus.bits)); - prepare_trap_vector(del_priv, mcause) + prepare_trap_vector(del_priv, del_virt, mcause) }, - Supervisor => { - assert (extensionEnabled(Ext_S), "no supervisor mode present for delegation"); - - scause[IsInterrupt] = bool_to_bits(intr); - scause[Cause] = zero_extend(c); - - mstatus[SPIE] = mstatus[SIE]; - mstatus[SIE] = 0b0; - mstatus[SPP] = match cur_privilege { - User => 0b0, - Supervisor => 0b1, - Machine => internal_error(__FILE__, __LINE__, "invalid privilege for s-mode trap") - }; - stval = tval(info); - sepc = pc; - - cur_privilege = del_priv; - - handle_trap_extension(del_priv, pc, ext); - - if get_config_print_reg() - then print_reg("CSR mstatus <- " ^ BitStr(mstatus.bits)); - - prepare_trap_vector(del_priv, scause) + Supervisor => match (del_virt) { + V0 => { + assert (extensionEnabled(Ext_S), "no supervisor mode present for delegation"); + + scause[IsInterrupt] = bool_to_bits(trapCause_is_interrupt(cause)); + scause[Cause] = zero_extend(trapCause_to_bits(cause)); + + mstatus[SPIE] = mstatus[SIE]; + mstatus[SIE] = 0b0; + mstatus[SPP] = match cur_privilege { + User => 0b0, + Supervisor => 0b1, + Machine => internal_error(__FILE__, __LINE__, "invalid privilege for s-mode trap") + }; + hstatus[SPV] = virtMode_to_bits(cur_virtualization); + hstatus[GVA] = bool_to_bits(context.info_is_gva); + if cur_virtualization == V1 then hstatus[SPVP] = mstatus[SPP]; + sepc = pc; + stval = some_or_zero(context.excinfo); + htval = some_or_zero(context.excinfo2); + htinst = some_or_zero(context.excinst); + + cur_privilege = del_priv; + cur_virtualization = del_virt; + + handle_trap_extension(del_priv, del_virt, pc, context.ext); + + if get_config_print_reg() + then print_reg("CSR mstatus <- " ^ BitStr(mstatus.bits)); + + prepare_trap_vector(del_priv, del_virt, scause) + }, + V1 => { + assert (extensionEnabled(Ext_S) & extensionEnabled(Ext_H), "no virtual supervisor mode present for delegation"); + + vscause[IsInterrupt] = bool_to_bits(trapCause_is_interrupt(cause)); + vscause[Cause] = zero_extend(trapCause_to_bits(cause)); + + vsstatus[SPIE] = mstatus[SIE]; + vsstatus[SIE] = 0b0; + vsstatus[SPP] = match cur_privilege { + User => 0b0, + Supervisor => 0b1, + Machine => internal_error(__FILE__, __LINE__, "invalid privilege for vs-mode trap") + }; + vsepc = pc; + vstval = some_or_zero(context.excinfo); + + cur_privilege = del_priv; + cur_virtualization = del_virt; + + handle_trap_extension(del_priv, del_virt, pc, context.ext); + + if get_config_print_reg() + then print_reg("CSR vsstatus <- " ^ BitStr(vsstatus.bits)); + + prepare_trap_vector(del_priv, del_virt, vscause) + } }, User => internal_error(__FILE__, __LINE__, "Invalid privilege level"), }; } -function exception_handler(cur_priv : Privilege, ctl : ctl_result, +function exception_handler(cur_priv : Privilege, + cur_virt: Virtualization, + ctl : ctl_result, pc: xlenbits) -> xlenbits = { - match (cur_priv, ctl) { - (_, CTL_TRAP(e)) => { - let del_priv = exception_delegatee(e.trap, cur_priv); + match (cur_priv, cur_virt, ctl) { + (_, _, CTL_TRAP(e, c)) => { + let (del_priv, del_virt) = exception_delegatee(e, cur_priv, cur_virt); if get_config_print_platform() - then print_platform("trapping from " ^ to_str(cur_priv) ^ " to " ^ to_str(del_priv) - ^ " to handle " ^ to_str(e.trap)); - trap_handler(del_priv, false, exceptionType_to_bits(e.trap), pc, e.excinfo, e.ext) + then print_platform("trapping from " ^ to_str(cur_priv, cur_virt) ^ " to " ^ to_str(del_priv, del_virt) + ^ " to handle " ^ to_str(e)); + trap_handler(del_priv, del_virt, pc, E(e), c) }, - (_, CTL_MRET()) => { + (_, _, CTL_MRET()) => { let prev_priv = cur_privilege; + let prev_virt = cur_virtualization; + mstatus[MIE] = mstatus[MPIE]; mstatus[MPIE] = 0b1; + cur_privilege = privLevel_of_bits(mstatus[MPP]); mstatus[MPP] = privLevel_to_bits(if extensionEnabled(Ext_U) then User else Machine); if cur_privilege != Machine then mstatus[MPRV] = 0b0; + cur_virtualization = virtMode_of_bits(mstatus[MPV]); + mstatus[MPV] = virtMode_to_bits(V0); + if get_config_print_reg() then print_reg("CSR mstatus <- " ^ BitStr(mstatus.bits)); if get_config_print_platform() - then print_platform("ret-ing from " ^ to_str(prev_priv) ^ " to " ^ to_str(cur_privilege)); + then print_platform("ret-ing from " ^ to_str(prev_priv, prev_virt) ^ " to " ^ to_str(cur_privilege, cur_virtualization)); - prepare_xret_target(Machine) & pc_alignment_mask() + prepare_xret_target(Machine, prev_virt) & pc_alignment_mask() }, - (_, CTL_SRET()) => { + (_, V0, CTL_SRET()) => { let prev_priv = cur_privilege; + let prev_virt = cur_virtualization; + mstatus[SIE] = mstatus[SPIE]; mstatus[SPIE] = 0b1; + cur_privilege = if mstatus[SPP] == 0b1 then Supervisor else User; - mstatus[SPP] = 0b0; - if cur_privilege != Machine - then mstatus[MPRV] = 0b0; + mstatus[SPP] = 0b0; - if get_config_print_reg() - then print_reg("CSR mstatus <- " ^ BitStr(mstatus.bits)); + cur_virtualization = virtMode_of_bits(hstatus[SPV]); + hstatus[SPV] = virtMode_to_bits(V0); + + if get_config_print_reg() then { + print_reg("CSR mstatus <- " ^ BitStr(mstatus.bits)); + print_reg("CSR hstatus <- " ^ BitStr(hstatus.bits)); + }; if get_config_print_platform() - then print_platform("ret-ing from " ^ to_str(prev_priv) - ^ " to " ^ to_str(cur_privilege)); + then print_platform("ret-ing from " ^ to_str(prev_priv, prev_virt) + ^ " to " ^ to_str(cur_privilege, cur_virtualization)); - prepare_xret_target(Supervisor) & pc_alignment_mask() + prepare_xret_target(Supervisor, prev_virt) & pc_alignment_mask() + }, + (_, V1, CTL_SRET()) => { + let prev_priv = cur_privilege; + let prev_virt = cur_virtualization; + + vsstatus[SIE] = vsstatus[SPIE]; + vsstatus[SPIE] = 0b1; + + cur_privilege = if vsstatus[SPP] == 0b1 then Supervisor else User; + vsstatus[SPP] = 0b0; + + cur_virtualization = V1; + + if get_config_print_reg() then { + print_reg("CSR mstatus <- " ^ BitStr(mstatus.bits)); + print_reg("CSR hstatus <- " ^ BitStr(hstatus.bits)); + }; + if get_config_print_platform() + then print_platform("ret-ing from " ^ to_str(prev_priv, prev_virt) + ^ " to " ^ to_str(cur_privilege, cur_virtualization)); + + prepare_xret_target(Supervisor, prev_virt) & pc_alignment_mask() }, } } -function handle_mem_exception(addr : xlenbits, e : ExceptionType) -> unit = { - let t : sync_exception = struct { trap = e, - excinfo = Some(addr), - ext = None() } in - set_next_pc(exception_handler(cur_privilege, CTL_TRAP(t), PC)) +function handle_exception(e: ExceptionType, c: ExceptionContext) -> unit = { + let c = {c with excinst = if c.excinst != None() + then c.excinst + else if (plat_xtinst_has_transformed_inst() & exc_causes_transformed_inst_in_xtinst(e)) + then Some(zero_extend(instbits_transformed)) + else None()} in + set_next_pc(exception_handler(cur_privilege, cur_virtualization, CTL_TRAP(e, c), PC)) } -function handle_exception(e: ExceptionType) -> unit = { - let t : sync_exception = struct { trap = e, - excinfo = None(), - ext = None() } in - set_next_pc(exception_handler(cur_privilege, CTL_TRAP(t), PC)) +function handle_mem_exception(addr : xlenbits, e : ExceptionType) -> unit = { + /* handle_mem_exception only supports the following exception causes */ + assert(match e { + E_Breakpoint() => true, + E_Fetch_Addr_Align() => true, + E_Load_Addr_Align() => true, + E_SAMO_Addr_Align() => true, + E_Fetch_Access_Fault() => true, + E_Load_Access_Fault() => true, + E_SAMO_Access_Fault() => true, + _ => false + }, "exception not supported by current handler: " ^ to_str(e)); + handle_exception(e, mem_exception_context(addr, cur_virtualization == V1)) } -function handle_interrupt(i : InterruptType, del_priv : Privilege) -> unit = - set_next_pc(trap_handler(del_priv, true, interruptType_to_bits(i), PC, None(), None())) +function handle_interrupt(i : InterruptType, del_priv : Privilege, del_virt: Virtualization) -> unit = + set_next_pc(trap_handler(del_priv, del_virt, PC, I(i), empty_exception_context())) /* state state initialization */ @@ -428,10 +516,14 @@ function init_sys() -> unit = { misa[U] = 0b1; /* user-mode */ misa[S] = 0b1; /* supervisor-mode */ misa[V] = bool_to_bits(sys_enable_vext()); /* vector extension */ + misa[H] = bool_to_bits(sys_enable_hext()); /* hypervisor extension */ if sys_enable_fdext() & sys_enable_zfinx() then internal_error(__FILE__, __LINE__, "F and Zfinx cannot both be enabled!"); + if sys_enable_next() & sys_enable_hext() + then internal_error(__FILE__, __LINE__, "The hypervisor extension currently does not consider the possibility of user-mode interrupts"); + /* We currently support both F and D */ misa[F] = bool_to_bits(sys_enable_fdext()); /* single-precision */ misa[D] = if flen >= 64 @@ -444,10 +536,8 @@ function init_sys() -> unit = { mstatus[MPP] = privLevel_to_bits(lowest_supported_privLevel()); /* set to little-endian mode */ - if xlen == 64 then { - mstatus = Mk_Mstatus([mstatus.bits with 37 .. 36 = 0b00]) - }; - mstatush.bits = zeros(); + mstatus[MBE] = 0b0; + mstatus[SBE] = 0b0; mip.bits = zeros(); mie.bits = zeros(); @@ -488,6 +578,8 @@ function init_sys() -> unit = { // PMP's L and A fields are set to 0 on reset. init_pmp(); + if extensionEnabled(Ext_H) then init_hext() else cur_virtualization = V0; + // log compatibility with spike if get_config_print_reg() then print_reg("CSR mstatus <- " ^ BitStr(mstatus.bits) ^ " (input: " ^ BitStr(zeros() : xlenbits) ^ ")") diff --git a/model/riscv_sys_exceptions.sail b/model/riscv_sys_exceptions.sail index 4e4b550ad..c839a239f 100644 --- a/model/riscv_sys_exceptions.sail +++ b/model/riscv_sys_exceptions.sail @@ -15,14 +15,17 @@ function ext_check_xret_priv (p : Privilege) : Privilege -> bool = true /* Called if above check fails */ function ext_fail_xret_priv () : unit -> unit = () -function handle_trap_extension(p : Privilege, pc : xlenbits, u : option(unit)) -> unit = () +function handle_trap_extension(p : Privilege, v : Virtualization, pc : xlenbits, u : option(unit)) -> unit = () /* used for traps and ECALL */ -function prepare_trap_vector(p : Privilege, cause : Mcause) -> xlenbits = { +function prepare_trap_vector(p : Privilege, v : Virtualization, cause : Mcause) -> xlenbits = { let tvec : Mtvec = match p { Machine => mtvec, - Supervisor => stvec, - User => internal_error(__FILE__, __LINE__, "Invalid privilege level"), + Supervisor => match v { + V0 => stvec, + V1 => vstvec, + }, + User => internal_error(__FILE__, __LINE__, "Invalid privilege level"), }; match tvec_addr(tvec, cause) { Some(epc) => epc, @@ -37,28 +40,34 @@ function prepare_trap_vector(p : Privilege, cause : Mcause) -> xlenbits = { * prepare_xret_target: used to get the value for control transfer to the xret target */ -val get_xret_target : Privilege -> xlenbits -function get_xret_target(p) = +val get_xret_target : (Privilege, Virtualization) -> xlenbits +function get_xret_target(p, v) = match p { - Machine => mepc, - Supervisor => sepc, - User => internal_error(__FILE__, __LINE__, "Invalid privilege level"), + Machine => mepc, + Supervisor => match v { + V0 => sepc, + V1 => vsepc + }, + User => internal_error(__FILE__, __LINE__, "Invalid privilege level"), } -val set_xret_target : (Privilege, xlenbits) -> xlenbits -function set_xret_target(p, value) = { +val set_xret_target : (Privilege, Virtualization, xlenbits) -> xlenbits +function set_xret_target(p, v, value) = { let target = legalize_xepc(value); match p { - Machine => mepc = target, - Supervisor => sepc = target, - User => internal_error(__FILE__, __LINE__, "Invalid privilege level"), + Machine => mepc = target, + Supervisor => match v { + V0 => sepc = target, + V1 => vsepc = target + }, + User => internal_error(__FILE__, __LINE__, "Invalid privilege level"), }; target } -val prepare_xret_target : (Privilege) -> xlenbits -function prepare_xret_target(p) = - get_xret_target(p) +val prepare_xret_target : (Privilege, Virtualization) -> xlenbits +function prepare_xret_target(p, v) = + get_xret_target(p, v) /* other trap-related CSRs */ @@ -68,6 +77,9 @@ function get_mtvec() -> xlenbits = function get_stvec() -> xlenbits = stvec.bits +function get_vstvec() -> xlenbits = + vstvec.bits + function set_mtvec(value : xlenbits) -> xlenbits = { mtvec = legalize_tvec(mtvec, value); mtvec.bits @@ -77,3 +89,8 @@ function set_stvec(value : xlenbits) -> xlenbits = { stvec = legalize_tvec(stvec, value); stvec.bits } + +function set_vstvec(value : xlenbits) -> xlenbits = { + vstvec = legalize_tvec(vstvec, value); + vstvec.bits +} diff --git a/model/riscv_sys_regs.sail b/model/riscv_sys_regs.sail index 8bbf68e75..e6cdc67a4 100644 --- a/model/riscv_sys_regs.sail +++ b/model/riscv_sys_regs.sail @@ -116,6 +116,9 @@ val sys_enable_bext = pure "sys_enable_bext" : unit -> bool val sys_enable_zicbom = pure "sys_enable_zicbom" : unit -> bool val sys_enable_zicboz = pure "sys_enable_zicboz" : unit -> bool +/* whether misa.h was enabled at boot */ +val sys_enable_hext = pure "sys_enable_hext" : unit -> bool + /* This function allows an extension to veto a write to Misa if it would violate an alignment restriction on unsetting C. If it returns true the write will have no effect. */ @@ -142,6 +145,9 @@ function clause extensionEnabled(Ext_U) = misa[U] == 0b1 enum clause extension = Ext_S function clause extensionEnabled(Ext_S) = misa[S] == 0b1 +enum clause extension = Ext_H +function clause extensionEnabled(Ext_H) = misa[H] == 0b1 + /* Hardware Performance Monitoring counters */ enum clause extension = Ext_Zihpm function clause extensionEnabled(Ext_Zihpm) = true @@ -161,25 +167,20 @@ function have_privLevel(priv : priv_level) -> bool = 0b11 => true, } -bitfield Mstatush : bits(32) = { - MBE : 5, - SBE : 4 -} -register mstatush : Mstatush - -bitfield Mstatus : xlenbits = { +bitfield Mstatus : bits(64) = { SD : xlen - 1, - // The MBE and SBE fields are in mstatus in RV64 and absent in RV32. - // On RV32, they are in mstatush, which doesn't exist in RV64. For now, - // they are handled in an ad-hoc way. - // MBE : 37 - // SBE : 36 + // These fields are in mstatus in RV64 and absent in RV32. + // On RV32, they are in mstatush, which doesn't exist in RV64. + MPV : 39, + GVA : 38, + MBE : 37, + SBE : 36, // The SXL and UXL fields don't exist on RV32, so they are modelled // via explicit getters and setters; see below. - // SXL : 35 .. 34, - // UXL : 33 .. 32, + SXL : 35 .. 34, + UXL : 33 .. 32, TSR : 22, TW : 21, @@ -196,6 +197,7 @@ bitfield Mstatus : xlenbits = { SPP : 8, MPIE : 7, + UBE : 6, SPIE : 5, MIE : 3, @@ -208,43 +210,37 @@ function effectivePrivilege(t : AccessType(ext_access_type), m : Mstatus, priv : then privLevel_of_bits(m[MPP]) else priv +function effectiveVirtualization(t : AccessType(ext_access_type), m : Mstatus, virt : Virtualization) -> Virtualization = + if t != Execute() & m[MPRV] == 0b1 + then virtMode_of_bits(mstatus[MPV]) + else virt + function get_mstatus_SXL(m : Mstatus) -> arch_xlen = { if xlen == 32 then arch_to_bits(RV32) - else m.bits[35 .. 34] + else m[SXL] } function set_mstatus_SXL(m : Mstatus, a : arch_xlen) -> Mstatus = { if xlen == 32 then m - else { - let m = vector_update_subrange(m.bits, 35, 34, a); - Mk_Mstatus(m) - } + else [m with SXL = a] } function get_mstatus_UXL(m : Mstatus) -> arch_xlen = { if xlen == 32 then arch_to_bits(RV32) - else m.bits[33 .. 32] + else m[UXL] } function set_mstatus_UXL(m : Mstatus, a : arch_xlen) -> Mstatus = { if xlen == 32 then m - else { - let m = vector_update_subrange(m.bits, 33, 32, a); - Mk_Mstatus(m) - } + else [m with UXL = a] } -function legalize_mstatus(o : Mstatus, v : xlenbits) -> Mstatus = { - /* - * Populate all defined fields using the bits of v, stripping anything - * that does not have a matching bitfield entry. All bits above 32 are handled - * explicitly later. - */ - let m : Mstatus = Mk_Mstatus(zero_extend(v[22 .. 7] @ 0b0 @ v[5 .. 3] @ 0b0 @ v[1 .. 0])); +function legalize_mstatus(o : Mstatus, v : bits(64)) -> Mstatus = { + let m = Mk_Mstatus(v[63 .. 63] @ zeros(23) @ v[39 .. 32] @ zeros(9) @ v[22 .. 3] @ 0b0 @ v[1 .. 0]); /* Legalize MPP */ let m = [m with MPP = if have_privLevel(m[MPP]) then m[MPP] @@ -268,9 +264,12 @@ function legalize_mstatus(o : Mstatus, v : xlenbits) -> Mstatus = { let m = set_mstatus_UXL(m, get_mstatus_UXL(o)); /* We don't currently support changing MBE and SBE. */ - let m = if xlen == 64 then { - Mk_Mstatus([m.bits with 37 .. 36 = 0b00]) - } else m; + let m = if sizeof(xlen) == 64 + then [m with MBE = 0b0] + else m; + let m = if sizeof(xlen) == 64 + then [m with SBE = 0b0] + else m; if not(extensionEnabled(Ext_U)) then { let m = [m with MPRV = 0b0]; @@ -300,51 +299,85 @@ function in32BitMode() -> bool = { /* interrupt processing state */ bitfield Minterrupts : xlenbits = { - MEI : 11, /* external interrupts */ - SEI : 9, - - MTI : 7, /* timers interrupts */ - STI : 5, - - MSI : 3, /* software interrupts */ - SSI : 1, + SGEI : 12, /* external interrupts */ + MEI : 11, + VSEI : 10, + SEI : 9, + UEI : 8, + + MTI : 7, /* timers interrupts */ + VSTI : 6, + STI : 5, + UTI : 4, + + MSI : 3, /* software interrupts */ + VSSI : 2, + SSI : 1, + USI : 0, } register mip : Minterrupts /* Pending */ register mie : Minterrupts /* Enabled */ register mideleg : Minterrupts /* Delegation to S-mode */ +/*! Legalize mip from the previous mip (m) and the written mip (m) */ function legalize_mip(o : Minterrupts, v : xlenbits) -> Minterrupts = { - /* The only writable bits are the S-mode bits, and with the 'N' - * extension, the U-mode bits. */ let v = Mk_Minterrupts(v); - [o with SEI = v[SEI], STI = v[STI], SSI = v[SSI]] + /* S-mode bits are writable */ + let m = [o with SEI = v[SEI], STI = v[STI], SSI = v[SSI]]; + /* As SGEI, VSEI, VSTI & VSSI in mip are aliases for their corresponding bits + in hip, only mip.VSSI is writable */ + let m = if extensionEnabled(Ext_H) + then [m with VSSI = v[VSSI]] + else m; + m } function legalize_mie(o : Minterrupts, v : xlenbits) -> Minterrupts = { let v = Mk_Minterrupts(v); - [o with + let m = [o with MEI = v[MEI], MTI = v[MTI], MSI = v[MSI], SEI = v[SEI], STI = v[STI], SSI = v[SSI] - ] + ]; + /* These interrupts can only be enabled when the hypervisor extention is present */ + if extensionEnabled(Ext_H) + then [m with + SGEI = v[SGEI], + VSEI = v[VSEI], + VSTI = v[VSTI], + VSSI = v[VSSI] + ] + else m } function legalize_mideleg(o : Minterrupts, v : xlenbits) -> Minterrupts = { /* M-mode interrupt delegation bits "should" be hardwired to 0. */ /* FIXME: needs verification against eventual spec language. */ - [Mk_Minterrupts(v) with MEI = 0b0, MTI = 0b0, MSI = 0b0] + let m = [Mk_Minterrupts(v) with MEI = 0b0, MTI = 0b0, MSI = 0b0]; + /* VS-mode interrupts are always delegated when the H-extesion is present */ + let m = if extensionEnabled(Ext_H) + then [m with VSEI = 0b1, VSTI = 0b1, VSSI = 0b1] + else m; + /* If GEILEN > 0, SGEI is always delegated */ + let m = if extensionEnabled(Ext_H) then [m with SGEI = 0b1] else m; + m } /* exception processing state */ bitfield Medeleg : xlenbits = { + SAMO_GPage_Fault : 23, + Virtual_Instr : 22, + Load_GPage_Fault : 21, + Fetch_GPage_Fault : 20, SAMO_Page_Fault : 15, Load_Page_Fault : 13, Fetch_Page_Fault : 12, MEnvCall : 11, + VSEnvCall : 10, SEnvCall : 9, UEnvCall : 8, SAMO_Access_Fault : 7, @@ -522,11 +555,11 @@ register mconfigptr : xlenbits /* S-mode registers */ /* sstatus reveals a subset of mstatus */ -bitfield Sstatus : xlenbits = { +bitfield Sstatus : bits(64) = { SD : xlen - 1, // The UXL field does not exist on RV32, so we define an explicit // getter and setter below. - // UXL : 30 .. 29, + UXL : 33 .. 32, MXR : 19, SUM : 18, XS : 16 .. 15, @@ -584,7 +617,7 @@ function lift_sstatus(m : Mstatus, s : Sstatus) -> Mstatus = { } function legalize_sstatus(m : Mstatus, v : xlenbits) -> Mstatus = { - legalize_mstatus(m, lift_sstatus(m, Mk_Sstatus(v)).bits) + legalize_mstatus(m, lift_sstatus(m, Mk_Sstatus(zero_extend(v))).bits) } @@ -742,8 +775,10 @@ bitfield MEnvcfg : bits(64) = { STCE : 63, // Page Based Memory Types Extension PBMTE : 62, + // Hardware update of A/D bits Extension + ADUE : 61, // Reserved WPRI bits. - wpri_1 : 61 .. 8, + wpri_1 : 60 .. 8, // Cache Block Zero instruction Enable CBZE : 7, // Cache Block Clean and Flush instruction Enable diff --git a/model/riscv_types.sail b/model/riscv_types.sail index c333c4e17..3ff194c8b 100644 --- a/model/riscv_types.sail +++ b/model/riscv_types.sail @@ -31,6 +31,9 @@ let xlen_min_signed = 0 - 2 ^ (xlen - 1) type half = bits(16) type word = bits(32) +/* max instruction length */ +type ilenbits = bits(32) + /* register identifiers */ type regidx = bits(5) @@ -151,51 +154,87 @@ enum word_width = {BYTE, HALF, WORD, DOUBLE} enum InterruptType = { I_U_Software, I_S_Software, + I_VS_Software, I_M_Software, I_U_Timer, I_S_Timer, + I_VS_Timer, I_M_Timer, I_U_External, I_S_External, - I_M_External + I_VS_External, + I_M_External, + I_G_External } val interruptType_to_bits : InterruptType -> exc_code function interruptType_to_bits (i) = match (i) { - I_U_Software => 0x00, - I_S_Software => 0x01, - I_M_Software => 0x03, - I_U_Timer => 0x04, - I_S_Timer => 0x05, - I_M_Timer => 0x07, - I_U_External => 0x08, - I_S_External => 0x09, - I_M_External => 0x0b + I_U_Software => 0x00, + I_S_Software => 0x01, + I_VS_Software => 0x02, + I_M_Software => 0x03, + I_U_Timer => 0x04, + I_S_Timer => 0x05, + I_VS_Timer => 0x06, + I_M_Timer => 0x07, + I_U_External => 0x08, + I_S_External => 0x09, + I_VS_External => 0x0a, + I_M_External => 0x0b, + I_G_External => 0x0c } +val interruptType_to_str : InterruptType -> string +function interruptType_to_str (i) = + match (i) { + I_U_Software => "user-software-interrupt", + I_S_Software => "supervisor-software-interrupt", + I_VS_Software => "virtual-supervisor-software-interrupt", + I_M_Software => "machine-software-interrupt", + I_U_Timer => "user-timer-interrupt", + I_S_Timer => "supervisor-timer-interrupt", + I_VS_Timer => "virtual-supervisor-timer-interrupt", + I_M_Timer => "machine-timer-interrupt", + I_U_External => "user-external-interrupt", + I_S_External => "supervisor-external-interrupt", + I_VS_External => "virtual-supervisor-external-interrupt", + I_M_External => "machine-external-interrupt", + I_G_External => "guest-external-interrupt" + } + +overload to_str = {interruptType_to_str} + /* architectural exception definitions */ union ExceptionType = { - E_Fetch_Addr_Align : unit, - E_Fetch_Access_Fault : unit, - E_Illegal_Instr : unit, - E_Breakpoint : unit, - E_Load_Addr_Align : unit, - E_Load_Access_Fault : unit, - E_SAMO_Addr_Align : unit, - E_SAMO_Access_Fault : unit, - E_U_EnvCall : unit, - E_S_EnvCall : unit, - E_Reserved_10 : unit, - E_M_EnvCall : unit, - E_Fetch_Page_Fault : unit, - E_Load_Page_Fault : unit, - E_Reserved_14 : unit, - E_SAMO_Page_Fault : unit, - - /* extensions */ - E_Extension : ext_exc_type + E_Fetch_Addr_Align : unit, + E_Fetch_Access_Fault : unit, + E_Illegal_Instr : unit, + E_Breakpoint : unit, + E_Load_Addr_Align : unit, + E_Load_Access_Fault : unit, + E_SAMO_Addr_Align : unit, + E_SAMO_Access_Fault : unit, + E_U_EnvCall : unit, + E_S_EnvCall : unit, + E_VS_EnvCall : unit, + E_M_EnvCall : unit, + E_Fetch_Page_Fault : unit, + E_Load_Page_Fault : unit, + E_Reserved_14 : unit, + E_SAMO_Page_Fault : unit, + E_Reserved_16 : unit, + E_Reserved_17 : unit, + E_Reserved_18 : unit, + E_Reserved_19 : unit, + E_Fetch_GPage_Fault : unit, + E_Load_GPage_Fault : unit, + E_Virtual_Instr : unit, + E_SAMO_GPage_Fault : unit, + + /* extensions */ + E_Extension : ext_exc_type } val exceptionType_to_bits : ExceptionType -> exc_code @@ -211,12 +250,20 @@ function exceptionType_to_bits(e) = E_SAMO_Access_Fault() => 0x07, E_U_EnvCall() => 0x08, E_S_EnvCall() => 0x09, - E_Reserved_10() => 0x0a, + E_VS_EnvCall() => 0x0a, E_M_EnvCall() => 0x0b, E_Fetch_Page_Fault() => 0x0c, E_Load_Page_Fault() => 0x0d, E_Reserved_14() => 0x0e, E_SAMO_Page_Fault() => 0x0f, + E_Reserved_16() => 0x10, + E_Reserved_17() => 0x11, + E_Reserved_18() => 0x12, + E_Reserved_19() => 0x13, + E_Fetch_GPage_Fault() => 0x14, + E_Load_GPage_Fault() => 0x15, + E_Virtual_Instr() => 0x16, + E_SAMO_GPage_Fault() => 0x17, /* extensions */ E_Extension(e) => ext_exc_type_to_bits(e) @@ -235,12 +282,20 @@ function num_of_ExceptionType(e) = E_SAMO_Access_Fault() => 7, E_U_EnvCall() => 8, E_S_EnvCall() => 9, - E_Reserved_10() => 10, + E_VS_EnvCall() => 10, E_M_EnvCall() => 11, E_Fetch_Page_Fault() => 12, E_Load_Page_Fault() => 13, E_Reserved_14() => 14, E_SAMO_Page_Fault() => 15, + E_Reserved_16() => 16, + E_Reserved_17() => 17, + E_Reserved_18() => 18, + E_Reserved_19() => 19, + E_Fetch_GPage_Fault() => 20, + E_Load_GPage_Fault() => 21, + E_Virtual_Instr() => 22, + E_SAMO_GPage_Fault() => 23, /* extensions */ E_Extension(e) => num_of_ext_exc_type(e) @@ -256,16 +311,24 @@ function exceptionType_to_str(e) = E_Breakpoint() => "breakpoint", E_Load_Addr_Align() => "misaligned-load", E_Load_Access_Fault() => "load-access-fault", - E_SAMO_Addr_Align() => "misaligned-store/amo", + E_SAMO_Addr_Align() => "misaliged-store/amo", E_SAMO_Access_Fault() => "store/amo-access-fault", E_U_EnvCall() => "u-call", E_S_EnvCall() => "s-call", - E_Reserved_10() => "reserved-0", + E_VS_EnvCall() => "vs-call", E_M_EnvCall() => "m-call", E_Fetch_Page_Fault() => "fetch-page-fault", E_Load_Page_Fault() => "load-page-fault", - E_Reserved_14() => "reserved-1", + E_Reserved_14() => "reserved-14", E_SAMO_Page_Fault() => "store/amo-page-fault", + E_Reserved_16() => "reserved-16", + E_Reserved_17() => "reserved-17", + E_Reserved_18() => "reserved-18", + E_Reserved_19() => "reserved-19", + E_Fetch_GPage_Fault() => "fetch-guest-page-fault", + E_Load_GPage_Fault() => "load-guest-page-fault", + E_Virtual_Instr() => "virtual-instruction", + E_SAMO_GPage_Fault() => "store/amo-guest-page-fault", /* extensions */ E_Extension(e) => ext_exc_type_to_str(e) @@ -273,6 +336,93 @@ function exceptionType_to_str(e) = overload to_str = {exceptionType_to_str} +/* + * Update exception type with a given memory access type. + * + * Note: could probably be modeled in a nicer, more intuitive manner + */ + +val E_Addr_Align_of_AccessType : forall 'a . (AccessType('a)) -> ExceptionType +function E_Addr_Align_of_AccessType(a) = + match a { + Read(_) => E_Load_Addr_Align(), + Write(_) => E_SAMO_Addr_Align(), + ReadWrite(_) => E_SAMO_Addr_Align(), + Execute(_) => E_Fetch_Addr_Align(), + } + +val E_Access_Fault_of_AccessType : forall 'a . (AccessType('a)) -> ExceptionType +function E_Access_Fault_of_AccessType(a) = + match a { + Read(_) => E_Load_Access_Fault(), + Write(_) => E_SAMO_Access_Fault(), + ReadWrite(_) => E_SAMO_Access_Fault(), + Execute(_) => E_Fetch_Access_Fault(), + } + +val E_Page_Fault_of_AccessType : forall 'a . (AccessType('a)) -> ExceptionType +function E_Page_Fault_of_AccessType(a) = + match a { + Read(_) => E_Load_Page_Fault(), + Write(_) => E_SAMO_Page_Fault(), + ReadWrite(_) => E_SAMO_Page_Fault(), + Execute(_) => E_Fetch_Page_Fault(), + } + +val E_GPage_Fault_of_AccessType : forall 'a . (AccessType('a)) -> ExceptionType +function E_GPage_Fault_of_AccessType(a) = + match a { + Read(_) => E_Load_GPage_Fault(), + Write(_) => E_SAMO_GPage_Fault(), + ReadWrite(_) => E_SAMO_GPage_Fault(), + Execute(_) => E_Fetch_GPage_Fault(), + } + +val exceptionType_update_AccessType : forall 'a . (ExceptionType, AccessType('a)) -> ExceptionType +function exceptionType_update_AccessType(e, a) = +match(e) { + E_Fetch_Addr_Align() => E_Addr_Align_of_AccessType(a), + E_Load_Addr_Align() => E_Addr_Align_of_AccessType(a), + E_SAMO_Addr_Align() => E_Addr_Align_of_AccessType(a), + E_Fetch_Access_Fault() => E_Access_Fault_of_AccessType(a), + E_Load_Access_Fault() => E_Access_Fault_of_AccessType(a), + E_SAMO_Access_Fault() => E_Access_Fault_of_AccessType(a), + E_Fetch_Page_Fault() => E_Page_Fault_of_AccessType(a), + E_Load_Page_Fault() => E_Page_Fault_of_AccessType(a), + E_SAMO_Page_Fault() => E_Page_Fault_of_AccessType(a), + E_Fetch_GPage_Fault() => E_GPage_Fault_of_AccessType(a), + E_Load_GPage_Fault() => E_GPage_Fault_of_AccessType(a), + E_SAMO_GPage_Fault() => E_GPage_Fault_of_AccessType(a), + _ => e /* Other exceptions are not access type specific */ +} + +/* an architectural trap can be caused by either an interrupt or an exception */ + +union TrapCause = { + I : InterruptType, + E : ExceptionType, +} + +val trapCause_to_bits : TrapCause -> exc_code +function trapCause_to_bits(t) = match t { + I(i) => interruptType_to_bits(i), + E(e) => exceptionType_to_bits(e), +} + +val trapCause_is_interrupt : TrapCause -> bool +function trapCause_is_interrupt(t) = match t { + I(_) => true, + _ => false, +} + +val trapCause_to_str : TrapCause -> string +function trapCause_to_str(t) = match t { + I(i) => interruptType_to_str(i), + E(e) => exceptionType_to_str(e), +} + +overload to_str = {trapCause_to_str} + /* trap modes */ type tv_mode = bits(2) @@ -309,17 +459,60 @@ function extStatus_of_bits(e) = 0b11 => Dirty } -/* supervisor-level address translation modes */ +/* address translation stages & modes */ + +/* + * We model 3 stages: + * - Stage_S: va --> pa (V=0) + * - Stage_VS: va --> gpa (V=1) + * - Stage_G: gpa -> spa (V=1) + */ +enum AddressTranslationStage = {Stage_S, Stage_VS, Stage_G} + +function AddressTranslationStage_to_str(s : AddressTranslationStage) -> string = + match s { + Stage_S => "S", + Stage_VS => "VS", + Stage_G => "G" + } + +overload to_str = {AddressTranslationStage_to_str} + +enum AddressTranslationMode = {Bare, Sv32, Sv39, Sv48, Sv57, Sv32x4, Sv39x4, Sv48x4, Sv57x4} + +function AddressTranslationMode_to_str(m : AddressTranslationMode) -> string = match m { + Bare => "Bare", + Sv32 => "Sv32", + Sv39 => "Sv39", + Sv48 => "Sv48", + Sv57 => "Sv57", + Sv32x4 => "Sv32x4", + Sv39x4 => "Sv39x4", + Sv48x4 => "Sv48x4", + Sv57x4 => "Sv57x4" +} + +overload to_str = {AddressTranslationMode_to_str} -type satp_mode = bits(4) -enum SATPMode = {Sbare, Sv32, Sv39, Sv48} +type atp64_mode = bits(4) -function satp64Mode_of_bits(a : Architecture, m : satp_mode) -> option(SATPMode) = +function satp64Mode_of_bits(a : Architecture, m : atp64_mode) -> option(AddressTranslationMode) = match (a, m) { - (_, 0x0) => Some(Sbare), + (_, 0x0) => Some(Bare), (RV32, 0x1) => Some(Sv32), (RV64, 0x8) => Some(Sv39), (RV64, 0x9) => Some(Sv48), + (RV64, 0xa) => Some(Sv57), + (_, _) => None() + } + +function hgatp64Mode_of_bits(a : Architecture, m : atp64_mode) -> option(AddressTranslationMode) = + match (a, m) { + (_, 0x0) => Some(Bare), + (RV32, 0x1) => Some(Sv32x4), + (RV64, 0x8) => Some(Sv39x4), + (RV64, 0x9) => Some(Sv48x4), + (RV64, 0xa) => Some(Sv57x4), (_, _) => None() } diff --git a/model/riscv_types_hext.sail b/model/riscv_types_hext.sail new file mode 100644 index 000000000..9e99f87df --- /dev/null +++ b/model/riscv_types_hext.sail @@ -0,0 +1,48 @@ +/*=======================================================================================*/ +/* This Sail RISC-V architecture model, comprising all files and */ +/* directories except where otherwise noted is subject the BSD */ +/* two-clause license in the LICENSE file. */ +/* */ +/* SPDX-License-Identifier: BSD-2-Clause */ +/*=======================================================================================*/ + +/* Virtualization modes */ + +type virt_mode = bits(1) +enum Virtualization = {V0, V1} + +val virtMode_to_bits : Virtualization -> virt_mode +function virtMode_to_bits (v) = + match (v) { + V0 => 0b0, + V1 => 0b1 + } + +val virtMode_of_bits : virt_mode -> Virtualization +function virtMode_of_bits(v) = + match (v) { + 0b0 => V0, + 0b1 => V1 + } + +val virtMode_to_str : Virtualization -> string +function virtMode_to_str (v) = + match (v) { + V0 => "V=0", + V1 => "V=1" + } + +overload to_str = {virtMode_to_str} + +val virt_priv_to_str : (Privilege, Virtualization) -> string +function virt_priv_to_str (priv, virt) = + match (priv, virt) { + (User, V1) => "VU", + (User, V0) => "U", + (Supervisor, V1) => "VS", + (Supervisor, V0) => "HS", + (Machine, V1) => internal_error(__FILE__, __LINE__, "illegal privilege mode"), + (Machine, V0) => "M", + } + +overload to_str = {virt_priv_to_str} diff --git a/model/riscv_vmem.sail b/model/riscv_vmem.sail index 6b394354d..3ef05c355 100644 --- a/model/riscv_vmem.sail +++ b/model/riscv_vmem.sail @@ -9,7 +9,7 @@ // **************************************************************** // Virtual memory address translation and memory protection, // including PTWs (Page Table Walks) and TLBs (Translation Look-aside Buffers) -// Supported VM modes: Sv32, Sv39, Sv48. TODO: Sv57 +// Supported VM modes: Sv32, Sv32x4, Sv39, Sv39x4, Sv48 and Sv48x4. TODO: Sv57 and Sv57x4 // STYLE NOTES: // PRIVATE items are used only within this VM code. @@ -41,7 +41,9 @@ function vpn_j_of_va(sv_params : SV_Params, level : PTW_Level) -> bits(64) = { let lsb : range(0,63) = pagesize_bits + level * sv_params.vpn_size_bits; assert (lsb < xlen); - let mask : bits(64) = zero_extend(ones(sv_params.vpn_size_bits)); + let mask : bits(64) = if (level == sv_params.levels - 1) + then zero_extend(ones(sv_params.root_vpn_size_bits)) + else zero_extend(ones(sv_params.vpn_size_bits)); ((va >> lsb) & mask) } @@ -52,9 +54,34 @@ function offset_of_va(va : bits(64)) -> bits(PAGESIZE_BITS) = va[pagesize_bits - // bits equal to the MSB of the virtual address. // Virtual address widths depend on the virtual memory mode. // PRIVATE -function is_valid_vAddr(struct { va_size_bits, _ } : SV_Params, - vAddr : bits(64)) -> bool = - vAddr == sign_extend(vAddr[va_size_bits - 1 .. 0]) +function is_valid_vAddr(struct { va_size_bits, va_sign_extends, _ } : SV_Params, + vAddr : bits(64)) -> bool = { + if va_sign_extends + then vAddr == sign_extend(vAddr[va_size_bits - 1 .. 0]) + else vAddr == zero_extend(vAddr[va_size_bits - 1 .. 0]) +} + +// **************************************************************** +// Translate VA -> PA (S-stage), VA -> GPA (VS-stage) or GPA -> SPA(G-stage) + +// Result of Address Translation (for internal, private functions) +// PRIVATE +union TRi_Result = { + TRi_Address : (bits(64), ext_ptw), + TRi_Failure : (PTW_Error, ext_ptw) +} + +// Note: translate_stage is used for translation of implicit accesses during page walk +// => 'val' declaration should occur before 'function' definition of pt_walk, +// the 'function' definition of translate_stage is further down in this file + +// PRIVATE +val translate_stage : (bits(64), // virtual address + AccessType(ext_access_type), // Read/Write/ReadWrite/Execute + Privilege, // Machine/Supervisor/User + AddressTranslationStage, // S/VS/G + ext_ptw // ext_ptw + ) -> TRi_Result // **************************************************************** // Results of Page Table Walk (PTW) @@ -72,6 +99,7 @@ union PTW_Result = { // PRIVATE val pt_walk : (SV_Params, + AddressTranslationStage, // stage bits(64), // virtual addr AccessType(ext_access_type), // Read/Write/ReadWrite/Execute Privilege, // User/Supervisor/Machine @@ -84,6 +112,7 @@ val pt_walk : (SV_Params, -> PTW_Result function pt_walk(sv_params, + stage, va, ac, priv, @@ -101,7 +130,13 @@ function pt_walk(sv_params, // Below, 'pte_phys_addr' is XLEN bits because it's an arg to // 'mem_read_priv()' [riscv_mem.sail] where it's declared as xlenbits. // That def and this use need to be fixed together (TODO) - let pte_phys_addr : xlenbits = pte_addr[(xlen - 1) .. 0]; + let pte_phys_addr : xlenbits = match stage { + Stage_VS => match translate_stage(pte_addr, Read(Data), User, Stage_G, ext_ptw) { + TRi_Address(pte_pa, ext_ptw) => pte_pa[(sizeof(xlen) - 1) .. 0], + TRi_Failure(f, ext_ptw) => return PTW_Failure(PTW_Implicit(ptw_implicit_error(f), pte_addr, Read(Data)), ext_ptw), + }, + _ => pte_addr[(sizeof(xlen) - 1) .. 0] + }; // Read this-level PTE from mem let mem_result = mem_read_priv(Read(Data), // AccessType @@ -130,7 +165,7 @@ function pt_walk(sv_params, // follow the pointer to walk next level let pt_base' : bits(64) = ppns << pagesize_bits; let level' = level - 1; - pt_walk(sv_params, va, ac, priv, mxr, do_sum, + pt_walk(sv_params, stage, va, ac, priv, mxr, do_sum, pt_base', level', global', ext_ptw) } else @@ -180,6 +215,9 @@ function pt_walk(sv_params, // PUBLIC: see also riscv_insts_zicsr.sail and other CSR-related files register satp : xlenbits +// PRIVATE +type asidbits = bits(16) + // See riscv_sys_regs.sail for legalize_satp{32,64}(). // WARNING: those functions legalize Mode but not ASID? // PUBLIC: invoked from writeCSR() to fixup WARL fields @@ -226,37 +264,91 @@ function satp_to_PT_base(satp_val : xlenbits) -> bits(64) = { ppn << pagesize_bits } -// Compute address translation mode from SATP register +// **************************************************************** +// Architectural HGATP CSR + +// PUBLIC +register hgatp : xlenbits + // PRIVATE -function translationMode(priv : Privilege) -> SATPMode = { - if priv == Machine then - Sbare - else if xlen == 32 then - match Mk_Satp32(satp)[Mode] { - 0b0 => Sbare, - 0b1 => Sv32 - } - else if xlen == 64 then { - // Translation mode is based on mstatus.SXL, which could be RV32 when xlen==64 - let arch = architecture(get_mstatus_SXL(mstatus)); - match arch { - Some(RV64) => { let mbits : bits(4) = satp[63 .. 60]; - match satp64Mode_of_bits(RV64, mbits) { // see riscv_types.sail - Some(m) => m, - None() => internal_error(__FILE__, __LINE__, - "invalid RV64 translation mode in satp") - } - }, - Some(RV32) => match Mk_Satp32(satp[31 .. 0])[Mode] { // Note: satp is 64bits here - // When xlen is 64, mstatus.SXL (for S privilege) can be RV32 - 0b0 => Sbare, - 0b1 => Sv32 - }, - _ => internal_error(__FILE__, __LINE__, "unsupported address translation arch") - } +type vmidbits = bits(14) + +// See riscv_hext_regs.sail for legalize_hgatp{32,64}(). +// WARNING: those functions legalize Mode but not VMID? +// PUBLIC: invoked from writeCSR() to fixup WARL fields +function legalize_hgatp(a : Architecture, + o : xlenbits, // previous value of hgatp + v : xlenbits) // proposed new value of hgatp + -> xlenbits = { // new legal value of hgatp + if sizeof(xlen) == 32 then { + // The slice and extend ops below are no-ops when xlen==32, + // but appease the type-checker when xlen==64 (when this code is not executed!) + let o32 : bits(32) = o[31 .. 0]; + let v32 : bits(32) = v[31 .. 0]; + let new_hgatp : bits(32) = legalize_hgatp32(a, o32, v32); + zero_extend(new_hgatp); + } else if sizeof(xlen) == 64 then { + // The extend and truncate ops below are no-ops when xlen==64, + // but appease the type-checker when xlen==32 (when this code is not executed!) + let o64 : bits(64) = zero_extend(o); + let v64 : bits(64) = zero_extend(v); + let new_hgatp : bits(64) = legalize_hgatp64(a, o64, v64); + truncate(new_hgatp, sizeof(xlen)) + } else + internal_error(__FILE__, __LINE__, "Unsupported xlen") +} + +// ---------------- +// Fields of HGATP + +// VMID is 7b in Sv32x4, 14b in Sv39x4/Sv48x4/Sv57x4: we use 14b for both +// PRIVATE +function hgatp_to_vmid(hgatp_val : xlenbits) -> vmidbits = { + if xlen == 32 then zero_extend(Mk_Hgatp32(hgatp_val)[Vmid]) + else if xlen == 64 then Mk_Hgatp64(hgatp_val)[Vmid] + else internal_error(__FILE__, __LINE__, + "Unsupported xlen" ^ dec_str(xlen)) +} + +// Result is 64b to cover both RV32 and RV64 addresses +// PRIVATE +function hgatp_to_PT_base(hgatp_val : xlenbits) -> bits(64) = { + let ppn = if xlen == 32 then zero_extend (64, Mk_Hgatp32(hgatp_val)[PPN]) + else if xlen == 64 then zero_extend (64, Mk_Hgatp64(hgatp_val)[PPN]) + else internal_error(__FILE__, __LINE__, + "Unsupported xlen" ^ dec_str(xlen)); + ppn << pagesize_bits +} + +// **************************************************************** +// Applicable address translation mode + +// Compute address translation mode from SATP/HGATP register +// PRIVATE +function translationMode(priv : Privilege, stage : AddressTranslationStage) -> AddressTranslationMode = { + // For S- & G-stage, translation mode is based on mstatus.SXL + // For VS-stage, translation mode is based on hstatus.VSXL + let arch : option(Architecture) = match stage { + Stage_VS => architecture(get_hstatus_VSXL(hstatus)), + _ => architecture(get_mstatus_SXL(mstatus)), + }; + + let mode = if priv == Machine then + Some(Bare) + else match (stage, arch, xlen) { + (Stage_S, Some(RV32), _) => satp64Mode_of_bits(RV32, zero_extend(Mk_Satp32(satp[31 .. 0])[Mode])), + (Stage_S, Some(RV64), 64) => satp64Mode_of_bits(RV64, Mk_Satp64(satp)[Mode]), + (Stage_VS, Some(RV32), _) => satp64Mode_of_bits(RV32, zero_extend(Mk_Satp32(vsatp[31 .. 0])[Mode])), + (Stage_VS, Some(RV64), 64) => satp64Mode_of_bits(RV64, Mk_Satp64(vsatp)[Mode]), + (Stage_G, Some(RV32), _) => hgatp64Mode_of_bits(RV32, zero_extend(Mk_Hgatp32(hgatp[31 .. 0])[Mode])), + (Stage_G, Some(RV64), 64) => hgatp64Mode_of_bits(RV64, Mk_Hgatp64(hgatp)[Mode]), + (_, _, _) => internal_error(__FILE__, __LINE__, "unsupported address translation arch") + }; + + match mode { + Some(m) => m, + None() => internal_error(__FILE__, __LINE__, "invalid translation mode in xatp") } - else - internal_error(__FILE__, __LINE__, "unsupported xlen") } // **************************************************************** @@ -272,19 +364,12 @@ function write_pte forall 'n, 'n in {4, 8} . ( ) -> MemoryOpResult(bool) = mem_write_value_priv(paddr, pte_size, pte[pte_size * 8 - 1 .. 0], Supervisor, false, false, false) -// Result of address translation - -// PUBLIC -union TR_Result('paddr : Type, 'failure : Type) = { - TR_Address : ('paddr, ext_ptw), - TR_Failure : ('failure, ext_ptw) -} - // This function can be ignored on first reading since TLBs are not // part of RISC-V architecture spec (see TLB_NOTE above). // PRIVATE: translate on TLB hit, and maintenance of PTE in TLB function translate_TLB_hit(sv_params : SV_Params, - asid : asidbits, + stage : AddressTranslationStage, + xxid : xxidbits, ptb : bits(64), vAddr : bits(64), ac : AccessType(ext_access_type), @@ -294,7 +379,7 @@ function translate_TLB_hit(sv_params : SV_Params, ext_ptw : ext_ptw, tlb_index : tlb_index_range, ent : TLB_Entry) - -> TR_Result(bits(64), PTW_Error) = { + -> TRi_Result = { let pte = ent.pte; let ext_pte = msbs_of_PTE(sv_params, pte); let pte_flags = Mk_PTE_Flags(pte[7 .. 0]); @@ -303,27 +388,33 @@ function translate_TLB_hit(sv_params : SV_Params, ext_ptw); match pte_check { PTE_Check_Failure(ext_ptw, ext_ptw_fail) => - TR_Failure(ext_get_ptw_error(ext_ptw_fail), ext_ptw), + TRi_Failure(ext_get_ptw_error(ext_ptw_fail), ext_ptw), PTE_Check_Success(ext_ptw) => match update_PTE_Bits(sv_params, pte, ac) { - None() => TR_Address(ent.pAddr | (vAddr & ent.vAddrMask), ext_ptw), + None() => TRi_Address(ent.pAddr | (vAddr & ent.vAddrMask), ext_ptw), Some(pte') => // See riscv_platform.sail if not(plat_enable_dirty_update()) then // pte needs dirty/accessed update but that is not enabled - TR_Failure(PTW_PTE_Update(), ext_ptw) + TRi_Failure(PTW_PTE_Update(), ext_ptw) else { // Writeback the PTE (which has new A/D bits) let n_ent = {ent with pte=pte'}; - write_TLB(tlb_index, n_ent); - let pte_phys_addr = ent.pteAddr[(xlen - 1) .. 0]; + write_TLB(tlb_index, n_ent, stage); + let pte_phys_addr : xlenbits = match stage { + Stage_VS => match translate_stage(ent.pteAddr, Write(Data), User, Stage_G, ext_ptw) { + TRi_Address(pte_pa, ext_ptw) => pte_pa[(sizeof(xlen) - 1) .. 0], + TRi_Failure(f, ext_ptw) => return TRi_Failure(PTW_Implicit(ptw_implicit_error(f), ent.pteAddr, Write(Data)), ext_ptw), + }, + _ => ent.pteAddr[(xlen - 1) .. 0], + }; match write_pte(pte_phys_addr, 2 ^ sv_params.log_pte_size_bytes, pte') { MemValue(_) => (), MemException(e) => internal_error(__FILE__, __LINE__, "invalid physical address in TLB") }; - TR_Address(ent.pAddr | (vAddr & ent.vAddrMask), ext_ptw) + TRi_Address(ent.pAddr | (vAddr & ent.vAddrMask), ext_ptw) } } } @@ -331,48 +422,56 @@ function translate_TLB_hit(sv_params : SV_Params, // PRIVATE: translate on TLB miss (do a page-table walk) function translate_TLB_miss(sv_params : SV_Params, - asid : asidbits, + stage : AddressTranslationStage, + xxid : xxidbits, ptb : bits(64), vAddr : bits(64), ac : AccessType(ext_access_type), priv : Privilege, mxr : bool, do_sum : bool, - ext_ptw : ext_ptw) -> TR_Result(bits(64), PTW_Error) = { + ext_ptw : ext_ptw) + -> TRi_Result = { let initial_level = sv_params.levels - 1; - let ptw_result = pt_walk(sv_params, vAddr, ac, priv, mxr, do_sum, + let ptw_result = pt_walk(sv_params, stage, vAddr, ac, priv, mxr, do_sum, ptb, initial_level, false, ext_ptw); match ptw_result { - PTW_Failure(f, ext_ptw) => TR_Failure(f, ext_ptw), + PTW_Failure(f, ext_ptw) => TRi_Failure(f, ext_ptw), PTW_Success(pAddr, pte, pteAddr, level, global, ext_ptw) => { let ext_pte = msbs_of_PTE(sv_params, pte); // Without TLBs, this 'match' expression can be replaced simply - // by: 'TR_Address(pAddr, ext_ptw)' (see TLB_NOTE above) + // by: 'TRi_Address(pAddr, ext_ptw)' (see TLB_NOTE above) match update_PTE_Bits(sv_params, pte, ac) { None() => { - add_to_TLB(asid, vAddr, pAddr, pte, pteAddr, level, global, + add_to_TLB(xxid, stage, vAddr, pAddr, pte, pteAddr, level, global, sv_params.vpn_size_bits, pagesize_bits); - TR_Address(pAddr, ext_ptw) + TRi_Address(pAddr, ext_ptw) }, Some(pte') => // See riscv_platform.sail if not(plat_enable_dirty_update()) then // pte needs dirty/accessed update but that is not enabled - TR_Failure(PTW_PTE_Update(), ext_ptw) + TRi_Failure(PTW_PTE_Update(), ext_ptw) else { // Writeback the PTE (which has new A/D bits) - let pte_phys_addr = pteAddr[(xlen - 1) .. 0]; + let pte_phys_addr : xlenbits = match stage { + Stage_VS => match translate_stage(pteAddr, Write(Data), User, Stage_G, ext_ptw) { + TRi_Address(pte_pa, ext_ptw) => pte_pa[(sizeof(xlen) - 1) .. 0], + TRi_Failure(f, ext_ptw) => return TRi_Failure(PTW_Implicit(ptw_implicit_error(f), pteAddr, Write(Data)), ext_ptw), + }, + _ => pteAddr[(xlen - 1) .. 0], + }; match write_pte(pte_phys_addr, 2 ^ sv_params.log_pte_size_bytes, pte') { MemValue(_) => { - add_to_TLB(asid, vAddr, pAddr, pte', pteAddr, level, global, + add_to_TLB(xxid, stage, vAddr, pAddr, pte', pteAddr, level, global, sv_params.vpn_size_bits, pagesize_bits); - TR_Address(pAddr, ext_ptw) + TRi_Address(pAddr, ext_ptw) }, MemException(e) => - TR_Failure(PTW_Access(), ext_ptw) + TRi_Failure(PTW_Access(), ext_ptw) } } } @@ -382,7 +481,8 @@ function translate_TLB_miss(sv_params : SV_Params, // PRIVATE function translate(sv_params : SV_Params, - asid : asidbits, + stage : AddressTranslationStage, + xxid : xxidbits, ptb : bits(64), vAddr : bits(64), ac : AccessType(ext_access_type), @@ -390,55 +490,167 @@ function translate(sv_params : SV_Params, mxr : bool, do_sum : bool, ext_ptw : ext_ptw) - -> TR_Result(bits(64), PTW_Error) = { + -> TRi_Result = { // On first reading, assume lookup_TLB returns None(), since TLBs // are not part of RISC-V archticture spec (see TLB_NOTE above) - match lookup_TLB(asid, vAddr) { - Some(index, ent) => translate_TLB_hit(sv_params, asid, ptb, vAddr, ac, priv, + match lookup_TLB(xxid, stage, vAddr) { + Some(index, ent) => translate_TLB_hit(sv_params, stage, xxid, ptb, vAddr, ac, priv, mxr, do_sum, ext_ptw, index, ent), - None() => translate_TLB_miss(sv_params, asid, ptb, vAddr, ac, priv, + None() => translate_TLB_miss(sv_params, stage, xxid, ptb, vAddr, ac, priv, mxr, do_sum, ext_ptw) } } +// PRIVATE: perform address translation for a specific stage +function translate_stage(vAddr, + ac, + priv, + stage, + ext_ptw) = { + match stage { + Stage_S => { + let mode = translationMode(priv, stage); + let (valid_va, sv_params) : (bool, SV_Params) = match mode { + Bare => return TRi_Address(vAddr, ext_ptw), + Sv32 => (true, sv32_params), + Sv39 => (is_valid_vAddr(sv39_params, vAddr), sv39_params), + Sv48 => (is_valid_vAddr(sv48_params, vAddr), sv48_params), + // Sv57 => (is_valid_vAddr(sv57_params, vAddr), sv57_params), // TODO: Sv57 address translation + _ => internal_error(__FILE__, __LINE__, "Invalid S-stage address translation mode") + }; + if not(valid_va) then + TRi_Failure(PTW_Invalid_Addr(), ext_ptw) + else { + let mxr : bool = mstatus[MXR] == 0b1; + let do_sum : bool = mstatus[SUM] == 0b1; + let asid : bits(16) = satp_to_asid(satp); + let ptb : bits(64) = satp_to_PT_base(satp); + translate(sv_params, + Stage_S, + asid, + ptb, + vAddr, + ac, + priv, + mxr, + do_sum, + ext_ptw) + } + }, + Stage_VS => { + let mode = translationMode(priv, stage); + let (valid_va, sv_params) : (bool, SV_Params) = match mode { + Bare => return TRi_Address(vAddr, ext_ptw), + Sv32 => (true, sv32_params), + Sv39 => (is_valid_vAddr(sv39_params, vAddr), sv39_params), + Sv48 => (is_valid_vAddr(sv48_params, vAddr), sv48_params), + // Sv57 => (is_valid_vAddr(sv57_params, vAddr), sv57_params), // TODO: Sv57 address translation + _ => internal_error(__FILE__, __LINE__, "Invalid VS-stage address translation mode") + }; + if not(valid_va) then + TRi_Failure(PTW_Invalid_Addr(), ext_ptw) + else { + let mxr : bool = mstatus[MXR] == 0b1 | vsstatus[MXR] == 0b1; // HS-level MXR overwrites VS-stage page protections + let do_sum : bool = vsstatus[SUM] == 0b1; + let asid : bits(16) = satp_to_asid(vsatp); + let ptb : bits(64) = satp_to_PT_base(vsatp); + translate(sv_params, + Stage_VS, + asid, + ptb, + vAddr, + ac, + priv, + mxr, + do_sum, + ext_ptw) + } + }, + Stage_G => { + let mode = translationMode(priv, stage); + let (valid_va, sv_params) : (bool, SV_Params) = match mode { + Bare => return TRi_Address(vAddr, ext_ptw), + Sv32x4 => (true, sv32x4_params), + Sv39x4 => (is_valid_vAddr(sv39x4_params, vAddr), sv39x4_params), + Sv48x4 => (is_valid_vAddr(sv48x4_params, vAddr), sv48x4_params), + // Sv57x4 => (is_valid_vAddr(sv57x4_params, vAddr), sv57x4_params), // TODO: Sv57x4 address translation + _ => internal_error(__FILE__, __LINE__, "Invalid G-stage address translation mode") + }; + if not(valid_va) then + TRi_Failure(PTW_Invalid_Addr(), ext_ptw) + else { + let mxr : bool = mstatus[MXR] == 0b1; + let do_sum : bool = mstatus[SUM] == 0b1; + let vmid : bits(16) = zero_extend(hgatp_to_vmid(hgatp)); + let ptb : bits(64) = hgatp_to_PT_base(hgatp); + translate(sv_params, + Stage_G, + vmid, + ptb, + vAddr, + ac, + User, // All G-stage memory accesses are considered User-level + mxr, + do_sum, + ext_ptw) + } + }, + } +} + +// PUBLIC +/*! Result of Address Translation (AT) */ +union TR_Result('paddr : Type, 'exception : Type, 'context : Type) = { + TR_Address : ('paddr, ext_ptw), + TR_Failure : ('exception, 'context, ext_ptw) +} + +// Addr-translation function for specific privilege & virtualization mode +// PUBLIC: invoked from HLV, HSV and HLVX +function translateAddr_pv(vAddr : xlenbits, + ac : AccessType(ext_access_type), + priv : Privilege, + virt : Virtualization) + -> TR_Result(xlenbits, ExceptionType, ExceptionContext) = { + // Internally the vmem code works with 64-bit values, whether xlen==32 or xlen==64 + // This 'extend' is a no-op when xlen==64 and extends when xlen==32 + let vAddr_64b : bits(64) = zero_extend(vAddr); + // PTW extensions (non-standard): initialize the PTW extension state + let ext_ptw : ext_ptw = init_ext_ptw; + + if virt == V0 then + match translate_stage(vAddr_64b, ac, priv, Stage_S, ext_ptw) { + TRi_Address(pa, ext_ptw) => TR_Address(truncate(pa, sizeof(xlen)), ext_ptw), + TRi_Failure(f, ext_ptw) => TR_Failure(translationException(ac, Stage_S, f), + translationExcContext(f, vAddr_64b, false), + ext_ptw) + } + else + match translate_stage(vAddr_64b, ac, priv, Stage_VS, ext_ptw) { + TRi_Address(gpa, ext_ptw) => match translate_stage(gpa, ac, priv, Stage_G, ext_ptw){ + TRi_Address(spa, ext_ptw) => TR_Address(truncate(spa, sizeof(xlen)), ext_ptw), + TRi_Failure(f, ext_ptw) => TR_Failure(translationException(ac, Stage_G, f), + {translationExcContext(f, vAddr_64b, true) with excinfo2 = Some(truncate(gpa >> 2, sizeof(xlen)))}, + ext_ptw) + }, + TRi_Failure(f, ext_ptw) => TR_Failure(translationException(ac, Stage_VS, f), + translationExcContext(f, vAddr_64b, true), + ext_ptw) + } +} + // Top-level addr-translation function // PUBLIC: invoked from instr-fetch and load/store/amo function translateAddr(vAddr : xlenbits, ac : AccessType(ext_access_type)) - -> TR_Result(xlenbits, ExceptionType) = { - // Internally the vmem code works with 64-bit values, whether xlen==32 or xlen==64 - // This 'extend' is a no-op when xlen==64 and extends when xlen==32 - let vAddr_64b : bits(64) = zero_extend(vAddr); - // Effective privilege takes into account mstatus.PRV, mstatus.MPP + -> TR_Result(xlenbits, ExceptionType, ExceptionContext) = { + // Effective privilege takes into account mstatus.MPRV, mstatus.MPP // See riscv_sys_regs.sail for effectivePrivilege() and cur_privilege - let effPriv : Privilege = effectivePrivilege(ac, mstatus, cur_privilege); - let mode : SATPMode = translationMode(effPriv); - let (valid_va, sv_params) : (bool, SV_Params) = match mode { - Sbare => return TR_Address(vAddr, init_ext_ptw), - Sv32 => (true, sv32_params), - Sv39 => (is_valid_vAddr(sv39_params, vAddr_64b), sv39_params), - Sv48 => (is_valid_vAddr(sv48_params, vAddr_64b), sv48_params), - // Sv57 => (is_valid_vAddr(sv57_params, vAddr_64b), sv57_params), // TODO - }; - if not(valid_va) then - TR_Failure(translationException(ac, PTW_Invalid_Addr()), init_ext_ptw) - else { - let mxr = mstatus[MXR] == 0b1; - let do_sum = mstatus[SUM] == 0b1; - let asid : asidbits = satp_to_asid(satp); - let ptb : bits(64) = satp_to_PT_base(satp); - let tr_result1 = translate(sv_params, - asid, - ptb, - vAddr_64b, - ac, effPriv, mxr, do_sum, - init_ext_ptw); - // Fixup result PA or exception - match tr_result1 { - TR_Address(pa, ext_ptw) => TR_Address(truncate(pa, xlen), ext_ptw), - TR_Failure(f, ext_ptw) => TR_Failure(translationException(ac, f), ext_ptw) - } - } + let effPriv : Privilege = effectivePrivilege(ac, mstatus, cur_privilege); + // Effective virtualization takes into account mstatus.MPRV, mstatus.MPV + let effVirt : Virtualization = effectiveVirtualization(ac, mstatus, cur_virtualization); + + translateAddr_pv(vAddr, ac, effPriv, effVirt) } // **************************************************************** diff --git a/model/riscv_vmem_common.sail b/model/riscv_vmem_common.sail index a6b717b65..4e595acd1 100644 --- a/model/riscv_vmem_common.sail +++ b/model/riscv_vmem_common.sail @@ -6,8 +6,7 @@ /* SPDX-License-Identifier: BSD-2-Clause */ /*=======================================================================================*/ -// **************************************************************** -// Parameters for VM modes sv32, sv39, sv48 and (TODO) Sv57 +// Parameters for VM modes Sv32, Sv32x4, Sv39, Sv39x4, Sv48, Sv48x4, (TODO) Sv57 and (TODO) Sv57x4 // All VM modes use the same page size (4KB, with 12-bit index) @@ -18,28 +17,34 @@ let pagesize_bits = sizeof(PAGESIZE_BITS) // PRIVATE struct SV_Params = { + // Address translation modes: Sv32 Sv39 Sv48 Sv57 Sv32x4 Sv39x4 Sv48x4 Sv57x4 + // VA - va_size_bits : {32, 39, 48}, // 32 39 48 - vpn_size_bits : {10, 9}, // 10 9 9 + va_size_bits : {32, 34, 39, 41, 48, 50, 57, 59}, // 32 39 48 57 34 41 50 59 + va_sign_extends : bool, // true true true true false false false false + vpn_size_bits : {10, 9}, // 10 9 9 9 10 9 9 9 + root_vpn_size_bits : {9, 10, 11, 12}, // 10 9 9 9 12 11 11 11 // PTE - levels : { 2, 3, 4}, // 2 3 4 - log_pte_size_bytes : { 2, 3}, // 2 3 3 - pte_msbs_lsb_index : {32, 54}, // 32 54 54 - pte_msbs_size_bits : { 0, 10}, // 0 10 10 - pte_PPNs_lsb_index : {10}, // 10 10 10 - pte_PPNs_size_bits : {22, 44}, // 22 44 44 - pte_PPN_j_size_bits : {10, 9} // 10 9 9 + levels : { 2, 3, 4, 5}, // 2 3 4 5 2 3 4 5 + log_pte_size_bytes : { 2, 3}, // 2 3 3 3 2 3 3 3 + pte_msbs_lsb_index : {32, 54}, // 32 54 54 54 32 54 54 54 + pte_msbs_size_bits : { 0, 10}, // 0 10 10 10 0 10 10 10 + pte_PPNs_lsb_index : {10}, // 10 10 10 10 10 10 10 10 + pte_PPNs_size_bits : {22, 44}, // 22 44 44 44 22 44 44 44 + pte_PPN_j_size_bits : {10, 9} // 10 9 9 9 10 9 9 9 } // Current level during a page-table walk (0 to SV_Params.levels - 1) -type PTW_Level = range(0,3) // range(0,4) when add Sv57 (TODO) +type PTW_Level = range(0, 4) // PRIVATE let sv32_params : SV_Params = struct { // VA va_size_bits = 32, + va_sign_extends = true, vpn_size_bits = 10, + root_vpn_size_bits = 10, // PTE levels = 2, @@ -55,7 +60,9 @@ let sv32_params : SV_Params = struct { let sv39_params : SV_Params = struct { // VA va_size_bits = 39, + va_sign_extends = true, vpn_size_bits = 9, + root_vpn_size_bits = 9, // PTE levels = 3, @@ -71,7 +78,9 @@ let sv39_params : SV_Params = struct { let sv48_params : SV_Params = struct { // VA va_size_bits = 48, + va_sign_extends = true, vpn_size_bits = 9, + root_vpn_size_bits = 9, // PTE levels = 4, @@ -83,13 +92,89 @@ let sv48_params : SV_Params = struct { pte_PPN_j_size_bits = 9 } -// TODO; not currently used +// TODO: Sv57 address translation // PRIVATE /* let sv57_params : SV_Params = struct { // VA va_size_bits = 57, + va_sign_extends = true, + vpn_size_bits = 9, + root_vpn_size_bits = 9, + + // PTE + levels = 5, + log_pte_size_bytes = 3, // 8 Bytes + pte_msbs_lsb_index = 54, + pte_msbs_size_bits = 10, + pte_PPNs_lsb_index = 10, + pte_PPNs_size_bits = 44, + pte_PPN_j_size_bits = 9 +} +*/ + +// PRIVATE +let sv32x4_params : SV_Params = struct { + // VA + va_size_bits = 34, + va_sign_extends = false, + vpn_size_bits = 10, + root_vpn_size_bits = 12, + + // PTE + levels = 2, + log_pte_size_bytes = 2, // 4 Bytes + pte_msbs_lsb_index = 32, + pte_msbs_size_bits = 0, + pte_PPNs_lsb_index = 10, + pte_PPNs_size_bits = 22, + pte_PPN_j_size_bits = 10 +} + +let sv39x4_params : SV_Params = struct { + // VA + va_size_bits = 41, + va_sign_extends = false, + vpn_size_bits = 9, + root_vpn_size_bits = 11, + + // PTE + levels = 3, + log_pte_size_bytes = 3, // 8 Bytes + pte_msbs_lsb_index = 54, + pte_msbs_size_bits = 10, + pte_PPNs_lsb_index = 10, + pte_PPNs_size_bits = 44, + pte_PPN_j_size_bits = 9 +} + +// PRIVATE +let sv48x4_params : SV_Params = struct { + // VA + va_size_bits = 50, + va_sign_extends = false, + vpn_size_bits = 9, + root_vpn_size_bits = 11, + + // PTE + levels = 4, + log_pte_size_bytes = 3, // 8 Bytes + pte_msbs_lsb_index = 54, + pte_msbs_size_bits = 10, + pte_PPNs_lsb_index = 10, + pte_PPNs_size_bits = 44, + pte_PPN_j_size_bits = 9 +} + +// TODO: Sv57x4 address translation +// PRIVATE +/* +let sv57x4_params : SV_Params = struct { + // VA + va_size_bits = 59, + va_sign_extends = false, vpn_size_bits = 9, + root_vpn_size_bits = 11, // PTE levels = 5, diff --git a/model/riscv_vmem_ptw.sail b/model/riscv_vmem_ptw.sail index ab73b1909..6545b4b10 100644 --- a/model/riscv_vmem_ptw.sail +++ b/model/riscv_vmem_ptw.sail @@ -12,28 +12,60 @@ // 'ext_ptw' supports (non-standard) extensions to the default addr-translation and PTW. // See riscv_types_ext.sail for definitions. +// Note: Ideally we could use 'PTW_Implicit : (PTW_Error, ...)' in 'PTW_Error' but recursive types are not supported by sail + +// Failure modes for implicit address-translation/page-table-walks +// PRIVATE +union PTW_Implicit_Error = { + PTW_I_Invalid_Addr : unit, // invalid source address + PTW_I_Access : unit, // physical memory access error for a PTE + PTW_I_Invalid_PTE : unit, // invalid page table entry or ptr PTE when level = 0 + PTW_I_No_Permission : unit, // insufficient page permissions + PTW_I_Misaligned : unit, // misaligned superpage + PTW_I_PTE_Update : unit, // PTE update needed but not enabled + PTW_I_Ext_Error : ext_ptw_error // parameterized for errors from extensions +} + // Failure modes for address-translation/page-table-walks // PRIVATE union PTW_Error = { - PTW_Invalid_Addr : unit, // invalid source address - PTW_Access : unit, // physical memory access error for a PTE - PTW_Invalid_PTE : unit, - PTW_No_Permission : unit, - PTW_Misaligned : unit, // misaligned superpage - PTW_PTE_Update : unit, // PTE update needed but not enabled - PTW_Ext_Error : ext_ptw_error // parameterized for errors from extensions + PTW_Invalid_Addr : unit, // invalid source address + PTW_Access : unit, // physical memory access error for a PTE + PTW_Invalid_PTE : unit, // invalid page table entry or ptr PTE when level = 0 + PTW_No_Permission : unit, // insufficient page permissions + PTW_Misaligned : unit, // misaligned superpage + PTW_PTE_Update : unit, // PTE update needed but not enabled + PTW_Implicit : (PTW_Implicit_Error, // error during implicit access/page-walk + bits(64), // faulting address of implicit access/page-walk + AccessType(ext_access_type)), // access type of implicit access/page-walk + PTW_Ext_Error : ext_ptw_error // parameterized for errors from extensions +} + +// Convert PTW_Error to PTW_Implicit_Error +function ptw_implicit_error(f : PTW_Error) -> PTW_Implicit_Error = { + match f { + PTW_Invalid_Addr() => PTW_I_Invalid_Addr(), + PTW_Access() => PTW_I_Access(), + PTW_Invalid_PTE() => PTW_I_Invalid_PTE(), + PTW_No_Permission() => PTW_I_No_Permission(), + PTW_Misaligned() => PTW_I_Misaligned(), + PTW_PTE_Update() => PTW_I_PTE_Update(), + PTW_Ext_Error() => PTW_I_Ext_Error(), + PTW_Implicit(_, _, _) => internal_error(__FILE__, __LINE__, "cannot convert implicit PTW error") + } } // PRIVATE: only 'to_str' overload is public function ptw_error_to_str(e : PTW_Error) -> string = { match e { - PTW_Invalid_Addr() => "invalid-source-addr", - PTW_Access() => "mem-access-error", - PTW_Invalid_PTE() => "invalid-pte", - PTW_No_Permission() => "no-permission", - PTW_Misaligned() => "misaligned-superpage", - PTW_PTE_Update() => "pte-update-needed", - PTW_Ext_Error(e) => "extension-error" + PTW_Invalid_Addr() => "invalid-source-addr", + PTW_Access() => "mem-access-error", + PTW_Invalid_PTE() => "invalid-pte", + PTW_No_Permission() => "no-permission", + PTW_Misaligned() => "misaligned-superpage", + PTW_PTE_Update() => "pte-update-needed", + PTW_Implicit(_, _, _) => "implicit-access-error", + PTW_Ext_Error(e) => "extension-error" } } @@ -48,19 +80,75 @@ overload to_str = {ptw_error_to_str} function ext_get_ptw_error(eptwf : ext_ptw_fail) -> PTW_Error = PTW_No_Permission() -// Convert translation/PTW failures into architectural exceptions +// PRIVATE +/*! Convert translation/PTW failures into architectural exceptions */ function translationException(a : AccessType(ext_access_type), + s : AddressTranslationStage, f : PTW_Error) - -> ExceptionType = { - match (a, f) { - (_, PTW_Ext_Error(e)) => E_Extension(ext_translate_exception(e)), - (ReadWrite(_), PTW_Access()) => E_SAMO_Access_Fault(), - (ReadWrite(_), _) => E_SAMO_Page_Fault(), - (Read(_), PTW_Access()) => E_Load_Access_Fault(), - (Read(_), _) => E_Load_Page_Fault(), - (Write(_), PTW_Access()) => E_SAMO_Access_Fault(), - (Write(_), _) => E_SAMO_Page_Fault(), - (Execute(), PTW_Access()) => E_Fetch_Access_Fault(), - (Execute(), _) => E_Fetch_Page_Fault() + -> ExceptionType = { + let e : ExceptionType = match (a, f) { + (_, PTW_Ext_Error(e)) => E_Extension(ext_translate_exception(e)), + (_, PTW_Implicit(fi, _, _)) => match (a, fi) { // Report exception for the original access type (of explicit page-walk) + (_, PTW_I_Ext_Error(e)) => E_Extension(ext_translate_exception(e)), + (ReadWrite(_), PTW_I_Access()) => E_SAMO_Access_Fault(), + (ReadWrite(_), _) => E_SAMO_GPage_Fault(), + (Read(_), PTW_I_Access()) => E_Load_Access_Fault(), + (Read(_), _) => E_Load_GPage_Fault(), + (Write(_), PTW_I_Access()) => E_SAMO_Access_Fault(), + (Write(_), _) => E_SAMO_GPage_Fault(), + (Execute(), PTW_I_Access()) => E_Fetch_Access_Fault(), + (Execute(), _) => E_Fetch_GPage_Fault()}, + (ReadWrite(_), PTW_Access()) => E_SAMO_Access_Fault(), + (ReadWrite(_), _) => E_SAMO_Page_Fault(), + (Read(_), PTW_Access()) => E_Load_Access_Fault(), + (Read(_), _) => E_Load_Page_Fault(), + (Write(_), PTW_Access()) => E_SAMO_Access_Fault(), + (Write(_), _) => E_SAMO_Page_Fault(), + (Execute(), PTW_Access()) => E_Fetch_Access_Fault(), + (Execute(), _) => E_Fetch_Page_Fault() + }; + + /* Convert exceptions resulting from G-stage page walks to the correct architectural exception: + * - Guest-page-fault exceptions are raised instead of regular page-walk exceptions + * - Any exception is always reported for the original access type */ + if s == Stage_G then + match exceptionType_update_AccessType(e, a) { + E_Fetch_Page_Fault() => E_Fetch_GPage_Fault(), + E_Load_Page_Fault() => E_Load_GPage_Fault(), + E_SAMO_Page_Fault() => E_SAMO_GPage_Fault(), + _ => e /* Other exceptions should not be converted */ + } + else e +} + +// PRIVATE +/*! Build a pseudoinstruction encoding for a given access type and architecture */ +function pseudoinst(ac : AccessType(ext_access_type), arch : Architecture) -> bits(32) = { + match (arch, ac) { + (RV32, Read(_)) => 0x00002000, + (RV32, Write(_)) => 0x00002020, + (RV64, Read(_)) => 0x00003000, + (RV64, Write(_)) => 0x00003020, + (_, _) => internal_error(__FILE__, __LINE__, "Illegal pseudoinst architecture/access type") + } +} + +// PRIVATE +/*! Build architectural exception context for translation/PTW failures */ +function translationExcContext(f : PTW_Error, + vaddr : bits(64), + gva : bool) + -> ExceptionContext = { + match f { + PTW_Ext_Error(e) => ext_exception_context(e), + PTW_Implicit(f, gpaddr, ac) => { + let pinst : option(xlenbits) = match f { + PTW_I_Access() => None(), + PTW_I_Ext_Error(_) => None(), + _ => Some(zero_extend(pseudoinst(ac, cur_Architecture()))), + } in + vmem_exception_context(truncate(vaddr, sizeof(xlen)), true, Some(truncate(gpaddr >> 2, sizeof(xlen))), pinst) + }, + _ => vmem_exception_context(truncate(vaddr, sizeof(xlen)), gva, None(), None()), } } diff --git a/model/riscv_vmem_tlb.sail b/model/riscv_vmem_tlb.sail index d066f1356..ba7a1e78e 100644 --- a/model/riscv_vmem_tlb.sail +++ b/model/riscv_vmem_tlb.sail @@ -12,11 +12,11 @@ // (2) we can greatly speed up simulation speed (for Linux boot, can // reduce elapsed time from 10s of minutes to few minutes). -type asidbits = bits(16) +type xxidbits = bits(16) // PRIVATE struct TLB_Entry = { - asid : asidbits, // address-space id + xxid : xxidbits, // address-space id / virtual-machine id global : bool, // global translation vAddr : bits(64), // VPN pAddr : bits(64), // ppn @@ -33,7 +33,7 @@ type num_tlb_entries : Int = 64 type tlb_index_range = range(0, num_tlb_entries - 1) // PRIVATE -register tlb : vector(num_tlb_entries, option(TLB_Entry)) = [ +register tlb_S : vector(num_tlb_entries, option(TLB_Entry)) = [ None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), None(), @@ -44,52 +44,93 @@ register tlb : vector(num_tlb_entries, option(TLB_Entry)) = [ None(), None(), None(), None(), None(), None(), None(), None(), ] +// PRIVATE +register tlb_VS : vector(num_tlb_entries, option(TLB_Entry)) = [ + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), +] + +// PRIVATE +register tlb_G : vector(num_tlb_entries, option(TLB_Entry)) = [ + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), + None(), None(), None(), None(), None(), None(), None(), None(), +] + + // Indexed by the lowest bits of the VPN. function tlb_hash(vaddr : bits(64)) -> tlb_index_range = unsigned(vaddr[17 .. 12]) // PUBLIC: invoked in init_vmem() [riscv_vmem.sail] function init_TLB() -> unit = { - foreach (i from 0 to (length(tlb) - 1)) { - tlb[i] = None(); - } + foreach (i from 0 to (length(tlb_S) - 1)) { + tlb_S[i] = None(); + }; + foreach (i from 0 to (length(tlb_VS) - 1)) { + tlb_VS[i] = None(); + }; + foreach (i from 0 to (length(tlb_G) - 1)) { + tlb_G[i] = None(); + }; } // PUBLIC: invoked in translate_TLB_hit() [riscv_vmem.sail] -function write_TLB(index : tlb_index_range, ent : TLB_Entry) -> unit = - tlb[index] = Some(ent) +function write_TLB(index : tlb_index_range, ent : TLB_Entry, stage : AddressTranslationStage) -> unit = + match stage { + Stage_S => tlb_S[index] = Some(ent), + Stage_G => tlb_G[index] = Some(ent), + Stage_VS => tlb_VS[index] = Some(ent), + } // PRIVATE function match_TLB_Entry(ent : TLB_Entry, - asid : asidbits, + xxid : xxidbits, vaddr : bits(64)) -> bool = - (ent.global | (ent.asid == asid)) + (ent.global | (ent.xxid == xxid)) & (ent.vAddr == (ent.vMatchMask & vaddr)) // PRIVATE function flush_TLB_Entry(e : TLB_Entry, - asid : option(asidbits), + xxid : option(xxidbits), addr : option(bits(64))) -> bool = { - match (asid, addr) { + match (xxid, addr) { ( None(), None()) => true, ( None(), Some(a)) => e.vAddr == (e.vMatchMask & a), - (Some(i), None()) => (e.asid == i) & not(e.global), - (Some(i), Some(a)) => ( (e.asid == i) & (e.vAddr == (a & e.vMatchMask)) + (Some(i), None()) => (e.xxid == i) & not(e.global), + (Some(i), Some(a)) => ( (e.xxid == i) & (e.vAddr == (a & e.vMatchMask)) & not(e.global)) } } // PUBLIC: invoked in translate() [riscv_vmem.sail] -function lookup_TLB (asid : asidbits, vaddr : bits(64)) -> option((tlb_index_range, TLB_Entry)) = { +function lookup_TLB (xxid : xxidbits, stage : AddressTranslationStage, vaddr : bits(64)) -> option((tlb_index_range, TLB_Entry)) = { let index = tlb_hash(vaddr); - match tlb[index] { + let tlb_slot : option(TLB_Entry) = match stage { + Stage_S => tlb_S[index], + Stage_G => tlb_G[index], + Stage_VS => tlb_VS[index], + }; + match tlb_slot { None() => None(), - Some(entry) => if match_TLB_Entry(entry, asid, vaddr) then Some((index, entry)) else None(), + Some(entry) => if match_TLB_Entry(entry, xxid, vaddr) then Some((index, entry)) else None(), } } // PRIVATE -function add_to_TLB(asid : asidbits, +function add_to_TLB(xxid : xxidbits, + stage : AddressTranslationStage, vAddr : bits(64), pAddr : bits(64), pte : bits(64), @@ -103,7 +144,7 @@ function add_to_TLB(asid : asidbits, let vAddrMask : bits(64) = zero_extend(ones(shift)); let vMatchMask : bits(64) = ~ (vAddrMask); - let entry : TLB_Entry = struct{asid = asid, + let entry : TLB_Entry = struct{xxid = xxid, global = global, pte = pte, pteAddr = pteAddr, @@ -118,14 +159,19 @@ function add_to_TLB(asid : asidbits, // lookup_TLB looks it up. For superpages will just end up with the same // TLB entry in multiple slots. let index = tlb_hash(vAddr); - tlb[index] = Some(entry); + match stage { + Stage_S => tlb_S[index] = Some(entry), + Stage_VS => tlb_VS[index] = Some(entry), + Stage_G => tlb_G[index] = Some(entry), + } } // Top-level TLB flush function -// PUBLIC: invoked from exec SFENCE_VMA +// PUBLIC: invoked from exec SFENCE_VMA, HFENCE_VVMA and HFENCE_GVMA function flush_TLB(asid_xlen : option(xlenbits), + stage : AddressTranslationStage, addr_xlen : option(xlenbits)) -> unit = { - let asid : option(asidbits) = + let xxid : option(xxidbits) = match asid_xlen { None() => None(), Some(a) => Some(a[15 .. 0]) @@ -135,10 +181,25 @@ function flush_TLB(asid_xlen : option(xlenbits), None() => None(), Some(a) => Some(zero_extend(a)) }; - foreach (i from 0 to (length(tlb) - 1)) { - match tlb[i] { - Some(e) => if flush_TLB_Entry(e, asid, addr_64b) then { tlb[i] = None(); }, - None() => (), - } + + match stage { + Stage_S => foreach (i from 0 to (length(tlb_S) - 1)) { + match tlb_S[i] { + Some(e) => if flush_TLB_Entry(e, xxid, addr_64b) then { tlb_S[i] = None(); }, + None() => (), + } + }, + Stage_VS => foreach (i from 0 to (length(tlb_VS) - 1)) { + match tlb_VS[i] { + Some(e) => if flush_TLB_Entry(e, xxid, addr_64b) then { tlb_VS[i] = None(); }, + None() => (), + } + }, + Stage_G => foreach (i from 0 to (length(tlb_G) - 1)) { + match tlb_G[i] { + Some(e) => if flush_TLB_Entry(e, xxid, addr_64b) then { tlb_G[i] = None(); }, + None() => (), + } + } } } diff --git a/model/riscv_zicsr_control.sail b/model/riscv_zicsr_control.sail new file mode 100644 index 000000000..4aecf1592 --- /dev/null +++ b/model/riscv_zicsr_control.sail @@ -0,0 +1,79 @@ +/*=======================================================================================*/ +/* This Sail RISC-V architecture model, comprising all files and */ +/* directories except where otherwise noted is subject the BSD */ +/* two-clause license in the LICENSE file. */ +/* */ +/* SPDX-License-Identifier: BSD-2-Clause */ +/*=======================================================================================*/ + +/* CSR access control */ + +function csrAccess(csr : csreg) -> csrRW = csr[11..10] +function csrPriv(csr : csreg) -> priv_level = csr[9..8] + +val check_CSR_access : (csrRW, priv_level, Privilege, Virtualization, bool) -> bool +function check_CSR_access(csrrw, csrpr, p, v, isWrite) = { + let check_priv: bool = match p { /* privilege */ + Machine => (privLevel_to_bits(Machine) >=_u csrpr), + Supervisor => match v { + V0 => (0b10>=_u csrpr), /* hypervisor CSRs (0b10) are accessible in HS-mode */ + V1 => (privLevel_to_bits(Supervisor) >=_u csrpr)}, + User => (privLevel_to_bits(User) >=_u csrpr), + }; + let check_RW = not(isWrite == true & csrrw == 0b11); /* read/write */ + check_priv & check_RW +} + +function check_TVM_SATP(csr : csreg, p : Privilege, v: Virtualization) -> bool = + match (p, v) { + (Supervisor, V0) => not(csr == 0x180 & mstatus[TVM] == 0b1), + (Supervisor, V1) => not(csr == 0x180 & hstatus[VTVM] == 0b1), + (_, _) => true + } + +// There are several features that are controlled by machine/supervisor enable +// bits (m/senvcfg, m/scounteren, etc.). This abstracts that logic. +function feature_enabled_for_priv(p : Privilege, v : Virtualization, machine_enable_bit : bit, hypervisor_enable_bit : bit, supervisor_enable_bit : bit) -> bool = match (p, v) { + (Machine, _) => true, + (Supervisor,V0) => machine_enable_bit == bitone, + (Supervisor,V1) => machine_enable_bit == bitone & hypervisor_enable_bit == bitone, + (User, V0) => machine_enable_bit == bitone & (not(extensionEnabled(Ext_S)) | supervisor_enable_bit == bitone), + // Note: S-mode is always present when H-ext is implemented (when V1 is possible) + (User, V1) => machine_enable_bit == bitone & hypervisor_enable_bit == bitone & supervisor_enable_bit == bitone, +} + +// Return true if the counter is enabled OR the CSR is not a counter. +function check_Counteren(csr : csreg, p : Privilege, v : Virtualization) -> bool = { + // Check if it is not a counter. + if csr <_u 0xC00 | 0xC1F <_u csr + then return true; + + // Check the relevant bit in m/scounteren. + let index = unsigned(csr[4 .. 0]); + feature_enabled_for_priv(p, v, mcounteren.bits[index], hcounteren.bits[index], scounteren.bits[index]) +} + +/* Seed may only be accessed if we are doing a write, and access has been + * allowed in the current priv mode + */ +function check_seed_CSR (csr : csreg, p : Privilege, isWrite : bool) -> bool = { + if not(csr == 0x015) then { + true + } else if not(isWrite) then { + /* Read-only access to the seed CSR is not allowed */ + false + } else { + match (p) { + Machine => true, + Supervisor => false, /* TODO: base this on mseccfg */ + User => false, /* TODO: base this on mseccfg */ + } + } +} + +function check_CSR(csr : csreg, p : Privilege, v: Virtualization, isWrite : bool) -> bool = + is_CSR_defined(csr) + & check_CSR_access(csrAccess(csr), csrPriv(csr), p, v, isWrite) + & check_TVM_SATP(csr, p, v) + & check_Counteren(csr, p, v) + & check_seed_CSR(csr, p, isWrite)