2 * RISC-V CPU helpers for qemu.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu/main-loop.h"
24 #include "internals.h"
26 #include "exec/exec-all.h"
28 #include "tcg/tcg-op.h"
30 #include "semihosting/common-semi.h"
31 #include "sysemu/cpu-timers.h"
34 #include "tcg/oversized-guest.h"
36 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
38 #ifdef CONFIG_USER_ONLY
41 bool virt = env->virt_enabled;
44 /* All priv -> mmu_idx mapping are here */
46 uint64_t status = env->mstatus;
48 if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) {
49 mode = get_field(env->mstatus, MSTATUS_MPP);
50 virt = get_field(env->mstatus, MSTATUS_MPV) &&
53 status = env->vsstatus;
56 if (mode == PRV_S && get_field(status, MSTATUS_SUM)) {
61 return mode | (virt ? MMU_2STAGE_BIT : 0);
65 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
66 uint64_t *cs_base, uint32_t *pflags)
68 CPUState *cs = env_cpu(env);
69 RISCVCPU *cpu = RISCV_CPU(cs);
70 RISCVExtStatus fs, vs;
73 *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
76 if (cpu->cfg.ext_zve32f) {
78 * If env->vl equals to VLMAX, we can use generic vector operation
79 * expanders (GVEC) to accerlate the vector operations.
80 * However, as LMUL could be a fractional number. The maximum
81 * vector size can be operated might be less than 8 bytes,
82 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
83 * only when maxsz >= 8 bytes.
85 uint32_t vlmax = vext_get_vlmax(cpu, env->vtype);
86 uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
87 uint32_t maxsz = vlmax << sew;
88 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
90 flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
91 flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
92 flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
93 FIELD_EX64(env->vtype, VTYPE, VLMUL));
94 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
95 flags = FIELD_DP32(flags, TB_FLAGS, VTA,
96 FIELD_EX64(env->vtype, VTYPE, VTA));
97 flags = FIELD_DP32(flags, TB_FLAGS, VMA,
98 FIELD_EX64(env->vtype, VTYPE, VMA));
99 flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
101 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
104 #ifdef CONFIG_USER_ONLY
105 fs = EXT_STATUS_DIRTY;
106 vs = EXT_STATUS_DIRTY;
108 flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
110 flags |= cpu_mmu_index(env, 0);
111 fs = get_field(env->mstatus, MSTATUS_FS);
112 vs = get_field(env->mstatus, MSTATUS_VS);
114 if (env->virt_enabled) {
115 flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
117 * Merge DISABLED and !DIRTY states using MIN.
118 * We will set both fields when dirtying.
120 fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
121 vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
124 /* With Zfinx, floating point is enabled/disabled by Smstateen. */
125 if (!riscv_has_ext(env, RVF)) {
126 fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
127 ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
130 if (cpu->cfg.debug && !icount_enabled()) {
131 flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
135 flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
136 flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
137 flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
138 if (env->cur_pmmask != 0) {
139 flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
141 if (env->cur_pmbase != 0) {
142 flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
148 void riscv_cpu_update_mask(CPURISCVState *env)
150 target_ulong mask = 0, base = 0;
152 * TODO: Current RVJ spec does not specify
153 * how the extension interacts with XLEN.
155 #ifndef CONFIG_USER_ONLY
156 if (riscv_has_ext(env, RVJ)) {
159 if (env->mmte & M_PM_ENABLE) {
165 if (env->mmte & S_PM_ENABLE) {
171 if (env->mmte & U_PM_ENABLE) {
177 g_assert_not_reached();
181 if (env->xl == MXL_RV32) {
182 env->cur_pmmask = mask & UINT32_MAX;
183 env->cur_pmbase = base & UINT32_MAX;
185 env->cur_pmmask = mask;
186 env->cur_pmbase = base;
190 #ifndef CONFIG_USER_ONLY
193 * The HS-mode is allowed to configure priority only for the
194 * following VS-mode local interrupts:
196 * 0 (Reserved interrupt, reads as zero)
197 * 1 Supervisor software interrupt
198 * 4 (Reserved interrupt, reads as zero)
199 * 5 Supervisor timer interrupt
200 * 8 (Reserved interrupt, reads as zero)
201 * 13 (Reserved interrupt)
214 static const int hviprio_index2irq[] = {
215 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
216 static const int hviprio_index2rdzero[] = {
217 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
219 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
221 if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
226 *out_irq = hviprio_index2irq[index];
230 *out_rdzero = hviprio_index2rdzero[index];
237 * Default priorities of local interrupts are defined in the
238 * RISC-V Advanced Interrupt Architecture specification.
240 * ----------------------------------------------------------------
242 * Priority | Major Interrupt Numbers
243 * ----------------------------------------------------------------
244 * Highest | 47, 23, 46, 45, 22, 44,
245 * | 43, 21, 42, 41, 20, 40
247 * | 11 (0b), 3 (03), 7 (07)
248 * | 9 (09), 1 (01), 5 (05)
250 * | 10 (0a), 2 (02), 6 (06)
252 * | 39, 19, 38, 37, 18, 36,
253 * Lowest | 35, 17, 34, 33, 16, 32
254 * ----------------------------------------------------------------
256 static const uint8_t default_iprio[64] = {
257 /* Custom interrupts 48 to 63 */
258 [63] = IPRIO_MMAXIPRIO,
259 [62] = IPRIO_MMAXIPRIO,
260 [61] = IPRIO_MMAXIPRIO,
261 [60] = IPRIO_MMAXIPRIO,
262 [59] = IPRIO_MMAXIPRIO,
263 [58] = IPRIO_MMAXIPRIO,
264 [57] = IPRIO_MMAXIPRIO,
265 [56] = IPRIO_MMAXIPRIO,
266 [55] = IPRIO_MMAXIPRIO,
267 [54] = IPRIO_MMAXIPRIO,
268 [53] = IPRIO_MMAXIPRIO,
269 [52] = IPRIO_MMAXIPRIO,
270 [51] = IPRIO_MMAXIPRIO,
271 [50] = IPRIO_MMAXIPRIO,
272 [49] = IPRIO_MMAXIPRIO,
273 [48] = IPRIO_MMAXIPRIO,
275 /* Custom interrupts 24 to 31 */
276 [31] = IPRIO_MMAXIPRIO,
277 [30] = IPRIO_MMAXIPRIO,
278 [29] = IPRIO_MMAXIPRIO,
279 [28] = IPRIO_MMAXIPRIO,
280 [27] = IPRIO_MMAXIPRIO,
281 [26] = IPRIO_MMAXIPRIO,
282 [25] = IPRIO_MMAXIPRIO,
283 [24] = IPRIO_MMAXIPRIO,
285 [47] = IPRIO_DEFAULT_UPPER,
286 [23] = IPRIO_DEFAULT_UPPER + 1,
287 [46] = IPRIO_DEFAULT_UPPER + 2,
288 [45] = IPRIO_DEFAULT_UPPER + 3,
289 [22] = IPRIO_DEFAULT_UPPER + 4,
290 [44] = IPRIO_DEFAULT_UPPER + 5,
292 [43] = IPRIO_DEFAULT_UPPER + 6,
293 [21] = IPRIO_DEFAULT_UPPER + 7,
294 [42] = IPRIO_DEFAULT_UPPER + 8,
295 [41] = IPRIO_DEFAULT_UPPER + 9,
296 [20] = IPRIO_DEFAULT_UPPER + 10,
297 [40] = IPRIO_DEFAULT_UPPER + 11,
299 [11] = IPRIO_DEFAULT_M,
300 [3] = IPRIO_DEFAULT_M + 1,
301 [7] = IPRIO_DEFAULT_M + 2,
303 [9] = IPRIO_DEFAULT_S,
304 [1] = IPRIO_DEFAULT_S + 1,
305 [5] = IPRIO_DEFAULT_S + 2,
307 [12] = IPRIO_DEFAULT_SGEXT,
309 [10] = IPRIO_DEFAULT_VS,
310 [2] = IPRIO_DEFAULT_VS + 1,
311 [6] = IPRIO_DEFAULT_VS + 2,
313 [39] = IPRIO_DEFAULT_LOWER,
314 [19] = IPRIO_DEFAULT_LOWER + 1,
315 [38] = IPRIO_DEFAULT_LOWER + 2,
316 [37] = IPRIO_DEFAULT_LOWER + 3,
317 [18] = IPRIO_DEFAULT_LOWER + 4,
318 [36] = IPRIO_DEFAULT_LOWER + 5,
320 [35] = IPRIO_DEFAULT_LOWER + 6,
321 [17] = IPRIO_DEFAULT_LOWER + 7,
322 [34] = IPRIO_DEFAULT_LOWER + 8,
323 [33] = IPRIO_DEFAULT_LOWER + 9,
324 [16] = IPRIO_DEFAULT_LOWER + 10,
325 [32] = IPRIO_DEFAULT_LOWER + 11,
328 uint8_t riscv_cpu_default_priority(int irq)
330 if (irq < 0 || irq > 63) {
331 return IPRIO_MMAXIPRIO;
334 return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
337 static int riscv_cpu_pending_to_irq(CPURISCVState *env,
338 int extirq, unsigned int extirq_def_prio,
339 uint64_t pending, uint8_t *iprio)
341 int irq, best_irq = RISCV_EXCP_NONE;
342 unsigned int prio, best_prio = UINT_MAX;
345 return RISCV_EXCP_NONE;
348 irq = ctz64(pending);
349 if (!((extirq == IRQ_M_EXT) ? riscv_cpu_cfg(env)->ext_smaia :
350 riscv_cpu_cfg(env)->ext_ssaia)) {
354 pending = pending >> irq;
359 prio = extirq_def_prio;
361 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
365 if ((pending & 0x1) && (prio <= best_prio)) {
370 pending = pending >> 1;
376 uint64_t riscv_cpu_all_pending(CPURISCVState *env)
378 uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
379 uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
380 uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0;
382 return (env->mip | vsgein | vstip) & env->mie;
385 int riscv_cpu_mirq_pending(CPURISCVState *env)
387 uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
388 ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
390 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
394 int riscv_cpu_sirq_pending(CPURISCVState *env)
396 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
397 ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
399 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
403 int riscv_cpu_vsirq_pending(CPURISCVState *env)
405 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
406 (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
408 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
409 irqs >> 1, env->hviprio);
412 static int riscv_cpu_local_irq_pending(CPURISCVState *env)
415 uint64_t irqs, pending, mie, hsie, vsie;
417 /* Determine interrupt enable state of all privilege modes */
418 if (env->virt_enabled) {
421 vsie = (env->priv < PRV_S) ||
422 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
424 mie = (env->priv < PRV_M) ||
425 (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
426 hsie = (env->priv < PRV_S) ||
427 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
431 /* Determine all pending interrupts */
432 pending = riscv_cpu_all_pending(env);
434 /* Check M-mode interrupts */
435 irqs = pending & ~env->mideleg & -mie;
437 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
441 /* Check HS-mode interrupts */
442 irqs = pending & env->mideleg & ~env->hideleg & -hsie;
444 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
448 /* Check VS-mode interrupts */
449 irqs = pending & env->mideleg & env->hideleg & -vsie;
451 virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
452 irqs >> 1, env->hviprio);
453 return (virq <= 0) ? virq : virq + 1;
456 /* Indicate no pending interrupt */
457 return RISCV_EXCP_NONE;
460 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
462 if (interrupt_request & CPU_INTERRUPT_HARD) {
463 RISCVCPU *cpu = RISCV_CPU(cs);
464 CPURISCVState *env = &cpu->env;
465 int interruptno = riscv_cpu_local_irq_pending(env);
466 if (interruptno >= 0) {
467 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
468 riscv_cpu_do_interrupt(cs);
475 /* Return true is floating point support is currently enabled */
476 bool riscv_cpu_fp_enabled(CPURISCVState *env)
478 if (env->mstatus & MSTATUS_FS) {
479 if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_FS)) {
488 /* Return true is vector support is currently enabled */
489 bool riscv_cpu_vector_enabled(CPURISCVState *env)
491 if (env->mstatus & MSTATUS_VS) {
492 if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_VS)) {
501 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
503 uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
504 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
505 MSTATUS64_UXL | MSTATUS_VS;
507 if (riscv_has_ext(env, RVF)) {
508 mstatus_mask |= MSTATUS_FS;
510 bool current_virt = env->virt_enabled;
512 g_assert(riscv_has_ext(env, RVH));
515 /* Current V=1 and we are about to change to V=0 */
516 env->vsstatus = env->mstatus & mstatus_mask;
517 env->mstatus &= ~mstatus_mask;
518 env->mstatus |= env->mstatus_hs;
520 env->vstvec = env->stvec;
521 env->stvec = env->stvec_hs;
523 env->vsscratch = env->sscratch;
524 env->sscratch = env->sscratch_hs;
526 env->vsepc = env->sepc;
527 env->sepc = env->sepc_hs;
529 env->vscause = env->scause;
530 env->scause = env->scause_hs;
532 env->vstval = env->stval;
533 env->stval = env->stval_hs;
535 env->vsatp = env->satp;
536 env->satp = env->satp_hs;
538 /* Current V=0 and we are about to change to V=1 */
539 env->mstatus_hs = env->mstatus & mstatus_mask;
540 env->mstatus &= ~mstatus_mask;
541 env->mstatus |= env->vsstatus;
543 env->stvec_hs = env->stvec;
544 env->stvec = env->vstvec;
546 env->sscratch_hs = env->sscratch;
547 env->sscratch = env->vsscratch;
549 env->sepc_hs = env->sepc;
550 env->sepc = env->vsepc;
552 env->scause_hs = env->scause;
553 env->scause = env->vscause;
555 env->stval_hs = env->stval;
556 env->stval = env->vstval;
558 env->satp_hs = env->satp;
559 env->satp = env->vsatp;
563 target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
565 if (!riscv_has_ext(env, RVH)) {
572 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
574 if (!riscv_has_ext(env, RVH)) {
578 if (geilen > (TARGET_LONG_BITS - 1)) {
582 env->geilen = geilen;
585 /* This function can only be called to set virt when RVH is enabled */
586 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
588 /* Flush the TLB on all virt mode changes. */
589 if (env->virt_enabled != enable) {
590 tlb_flush(env_cpu(env));
593 env->virt_enabled = enable;
597 * The guest external interrupts from an interrupt controller are
598 * delivered only when the Guest/VM is running (i.e. V=1). This means
599 * any guest external interrupt which is triggered while the Guest/VM
600 * is not running (i.e. V=0) will be missed on QEMU resulting in guest
601 * with sluggish response to serial console input and other I/O events.
603 * To solve this, we check and inject interrupt after setting V=1.
605 riscv_cpu_update_mip(env, 0, 0);
609 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
611 CPURISCVState *env = &cpu->env;
612 if (env->miclaim & interrupts) {
615 env->miclaim |= interrupts;
620 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
623 CPUState *cs = env_cpu(env);
624 uint64_t gein, vsgein = 0, vstip = 0, old = env->mip;
626 if (env->virt_enabled) {
627 gein = get_field(env->hstatus, HSTATUS_VGEIN);
628 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
631 vstip = env->vstime_irq ? MIP_VSTIP : 0;
633 QEMU_IOTHREAD_LOCK_GUARD();
635 env->mip = (env->mip & ~mask) | (value & mask);
637 if (env->mip | vsgein | vstip) {
638 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
640 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
646 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
650 env->rdtime_fn_arg = arg;
653 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
654 int (*rmw_fn)(void *arg,
657 target_ulong new_val,
658 target_ulong write_mask),
662 env->aia_ireg_rmw_fn[priv] = rmw_fn;
663 env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
667 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
669 g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
671 if (icount_enabled() && newpriv != env->priv) {
672 riscv_itrigger_update_priv(env);
674 /* tlb_flush is unnecessary as mode is contained in mmu_idx */
676 env->xl = cpu_recompute_xl(env);
677 riscv_cpu_update_mask(env);
680 * Clear the load reservation - otherwise a reservation placed in one
681 * context/process can be used by another, resulting in an SC succeeding
682 * incorrectly. Version 2.2 of the ISA specification explicitly requires
683 * this behaviour, while later revisions say that the kernel "should" use
684 * an SC instruction to force the yielding of a load reservation on a
685 * preemptive context switch. As a result, do both.
691 * get_physical_address_pmp - check PMP permission for this physical address
693 * Match the PMP region and check permission for this physical address and it's
694 * TLB page. Returns 0 if the permission checking was successful
696 * @env: CPURISCVState
697 * @prot: The returned protection attributes
698 * @addr: The physical address to be checked permission
699 * @access_type: The type of MMU access
700 * @mode: Indicates current privilege level.
702 static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
703 int size, MMUAccessType access_type,
709 if (!riscv_cpu_cfg(env)->pmp) {
710 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
711 return TRANSLATE_SUCCESS;
714 pmp_has_privs = pmp_hart_has_privs(env, addr, size, 1 << access_type,
716 if (!pmp_has_privs) {
718 return TRANSLATE_PMP_FAIL;
721 *prot = pmp_priv_to_page_prot(pmp_priv);
723 return TRANSLATE_SUCCESS;
727 * get_physical_address - get the physical address for this virtual address
729 * Do a page table walk to obtain the physical address corresponding to a
730 * virtual address. Returns 0 if the translation was successful
732 * Adapted from Spike's mmu_t::translate and mmu_t::walk
734 * @env: CPURISCVState
735 * @physical: This will be set to the calculated physical address
736 * @prot: The returned protection attributes
737 * @addr: The virtual address or guest physical address to be translated
738 * @fault_pte_addr: If not NULL, this will be set to fault pte address
739 * when a error occurs on pte address translation.
740 * This will already be shifted to match htval.
741 * @access_type: The type of MMU access
742 * @mmu_idx: Indicates current privilege level
743 * @first_stage: Are we in first stage translation?
744 * Second stage is used for hypervisor guest translation
745 * @two_stage: Are we going to perform two stage translation
746 * @is_debug: Is this access from a debugger or the monitor?
748 static int get_physical_address(CPURISCVState *env, hwaddr *physical,
749 int *ret_prot, vaddr addr,
750 target_ulong *fault_pte_addr,
751 int access_type, int mmu_idx,
752 bool first_stage, bool two_stage,
756 * NOTE: the env->pc value visible here will not be
757 * correct, but the value visible to the exception handler
758 * (riscv_cpu_do_interrupt) is correct
761 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
762 int mode = mmuidx_priv(mmu_idx);
763 bool use_background = false;
766 target_ulong napot_mask;
769 * Check if we should use the background registers for the two
770 * stage translation. We don't need to check if we actually need
771 * two stage translation as that happened before this function
772 * was called. Background registers will be used if the guest has
773 * forced a two stage translation to be on (in HS or M mode).
775 if (!env->virt_enabled && two_stage) {
776 use_background = true;
779 if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) {
781 *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
782 return TRANSLATE_SUCCESS;
788 int levels, ptidxbits, ptesize, vm, widened;
790 if (first_stage == true) {
791 if (use_background) {
792 if (riscv_cpu_mxl(env) == MXL_RV32) {
793 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
794 vm = get_field(env->vsatp, SATP32_MODE);
796 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
797 vm = get_field(env->vsatp, SATP64_MODE);
800 if (riscv_cpu_mxl(env) == MXL_RV32) {
801 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
802 vm = get_field(env->satp, SATP32_MODE);
804 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
805 vm = get_field(env->satp, SATP64_MODE);
810 if (riscv_cpu_mxl(env) == MXL_RV32) {
811 base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
812 vm = get_field(env->hgatp, SATP32_MODE);
814 base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
815 vm = get_field(env->hgatp, SATP64_MODE);
822 levels = 2; ptidxbits = 10; ptesize = 4; break;
824 levels = 3; ptidxbits = 9; ptesize = 8; break;
826 levels = 4; ptidxbits = 9; ptesize = 8; break;
828 levels = 5; ptidxbits = 9; ptesize = 8; break;
831 *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
832 return TRANSLATE_SUCCESS;
834 g_assert_not_reached();
837 CPUState *cs = env_cpu(env);
838 int va_bits = PGSHIFT + levels * ptidxbits + widened;
840 if (first_stage == true) {
841 target_ulong mask, masked_msbs;
843 if (TARGET_LONG_BITS > (va_bits - 1)) {
844 mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
848 masked_msbs = (addr >> (va_bits - 1)) & mask;
850 if (masked_msbs != 0 && masked_msbs != mask) {
851 return TRANSLATE_FAIL;
854 if (vm != VM_1_10_SV32 && addr >> va_bits != 0) {
855 return TRANSLATE_FAIL;
859 bool pbmte = env->menvcfg & MENVCFG_PBMTE;
860 bool hade = env->menvcfg & MENVCFG_HADE;
862 if (first_stage && two_stage && env->virt_enabled) {
863 pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
864 hade = hade && (env->henvcfg & HENVCFG_HADE);
867 int ptshift = (levels - 1) * ptidxbits;
872 #if !TCG_OVERSIZED_GUEST
875 for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
878 idx = (addr >> (PGSHIFT + ptshift)) &
879 ((1 << (ptidxbits + widened)) - 1);
881 idx = (addr >> (PGSHIFT + ptshift)) &
882 ((1 << ptidxbits) - 1);
885 /* check that physical address of PTE is legal */
887 if (two_stage && first_stage) {
891 /* Do the second stage translation on the base PTE address. */
892 int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
893 base, NULL, MMU_DATA_LOAD,
894 MMUIdx_U, false, true,
897 if (vbase_ret != TRANSLATE_SUCCESS) {
898 if (fault_pte_addr) {
899 *fault_pte_addr = (base + idx * ptesize) >> 2;
901 return TRANSLATE_G_STAGE_FAIL;
904 pte_addr = vbase + idx * ptesize;
906 pte_addr = base + idx * ptesize;
910 int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
911 sizeof(target_ulong),
912 MMU_DATA_LOAD, PRV_S);
913 if (pmp_ret != TRANSLATE_SUCCESS) {
914 return TRANSLATE_PMP_FAIL;
917 if (riscv_cpu_mxl(env) == MXL_RV32) {
918 pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
920 pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
923 if (res != MEMTX_OK) {
924 return TRANSLATE_FAIL;
927 if (riscv_cpu_sxl(env) == MXL_RV32) {
928 ppn = pte >> PTE_PPN_SHIFT;
930 if (pte & PTE_RESERVED) {
931 return TRANSLATE_FAIL;
934 if (!pbmte && (pte & PTE_PBMT)) {
935 return TRANSLATE_FAIL;
938 if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
939 return TRANSLATE_FAIL;
942 ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
945 if (!(pte & PTE_V)) {
947 return TRANSLATE_FAIL;
949 if (pte & (PTE_R | PTE_W | PTE_X)) {
953 /* Inner PTE, continue walking */
954 if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
955 return TRANSLATE_FAIL;
957 base = ppn << PGSHIFT;
960 /* No leaf pte at any translation level. */
961 return TRANSLATE_FAIL;
964 if (ppn & ((1ULL << ptshift) - 1)) {
966 return TRANSLATE_FAIL;
968 if (!pbmte && (pte & PTE_PBMT)) {
969 /* Reserved without Svpbmt. */
970 return TRANSLATE_FAIL;
973 /* Check for reserved combinations of RWX flags. */
974 switch (pte & (PTE_R | PTE_W | PTE_X)) {
977 return TRANSLATE_FAIL;
990 if (first_stage == true) {
991 mxr = get_field(env->mstatus, MSTATUS_MXR);
993 mxr = get_field(env->vsstatus, MSTATUS_MXR);
1002 if (mode != PRV_U) {
1003 if (!mmuidx_sum(mmu_idx)) {
1004 return TRANSLATE_FAIL;
1006 /* SUM allows only read+write, not execute. */
1007 prot &= PAGE_READ | PAGE_WRITE;
1009 } else if (mode != PRV_S) {
1010 /* Supervisor PTE flags when not S mode */
1011 return TRANSLATE_FAIL;
1014 if (!((prot >> access_type) & 1)) {
1015 /* Access check failed */
1016 return TRANSLATE_FAIL;
1019 /* If necessary, set accessed and dirty bits. */
1020 target_ulong updated_pte = pte | PTE_A |
1021 (access_type == MMU_DATA_STORE ? PTE_D : 0);
1023 /* Page table updates need to be atomic with MTTCG enabled */
1024 if (updated_pte != pte && !is_debug) {
1026 return TRANSLATE_FAIL;
1030 * - if accessed or dirty bits need updating, and the PTE is
1031 * in RAM, then we do so atomically with a compare and swap.
1032 * - if the PTE is in IO space or ROM, then it can't be updated
1033 * and we return TRANSLATE_FAIL.
1034 * - if the PTE changed by the time we went to update it, then
1035 * it is no longer valid and we must re-walk the page table.
1038 hwaddr l = sizeof(target_ulong), addr1;
1039 mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
1040 false, MEMTXATTRS_UNSPECIFIED);
1041 if (memory_region_is_ram(mr)) {
1042 target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
1043 #if TCG_OVERSIZED_GUEST
1045 * MTTCG is not enabled on oversized TCG guests so
1046 * page table updates do not need to be atomic
1048 *pte_pa = pte = updated_pte;
1050 target_ulong old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
1051 if (old_pte != pte) {
1058 * Misconfigured PTE in ROM (AD bits are not preset) or
1059 * PTE is in IO space and can't be updated atomically.
1061 return TRANSLATE_FAIL;
1065 /* For superpage mappings, make a fake leaf PTE for the TLB's benefit. */
1066 target_ulong vpn = addr >> PGSHIFT;
1068 if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1069 napot_bits = ctzl(ppn) + 1;
1070 if ((i != (levels - 1)) || (napot_bits != 4)) {
1071 return TRANSLATE_FAIL;
1075 napot_mask = (1 << napot_bits) - 1;
1076 *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
1077 (vpn & (((target_ulong)1 << ptshift) - 1))
1078 ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
1081 * Remove write permission unless this is a store, or the page is
1082 * already dirty, so that we TLB miss on later writes to update
1085 if (access_type != MMU_DATA_STORE && !(pte & PTE_D)) {
1086 prot &= ~PAGE_WRITE;
1090 return TRANSLATE_SUCCESS;
1093 static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
1094 MMUAccessType access_type, bool pmp_violation,
1095 bool first_stage, bool two_stage,
1096 bool two_stage_indirect)
1098 CPUState *cs = env_cpu(env);
1099 int page_fault_exceptions, vm;
1102 if (riscv_cpu_mxl(env) == MXL_RV32) {
1103 stap_mode = SATP32_MODE;
1105 stap_mode = SATP64_MODE;
1109 vm = get_field(env->satp, stap_mode);
1111 vm = get_field(env->hgatp, stap_mode);
1114 page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation;
1116 switch (access_type) {
1117 case MMU_INST_FETCH:
1118 if (env->virt_enabled && !first_stage) {
1119 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
1121 cs->exception_index = page_fault_exceptions ?
1122 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
1126 if (two_stage && !first_stage) {
1127 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
1129 cs->exception_index = page_fault_exceptions ?
1130 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
1133 case MMU_DATA_STORE:
1134 if (two_stage && !first_stage) {
1135 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
1137 cs->exception_index = page_fault_exceptions ?
1138 RISCV_EXCP_STORE_PAGE_FAULT :
1139 RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1143 g_assert_not_reached();
1145 env->badaddr = address;
1146 env->two_stage_lookup = two_stage;
1147 env->two_stage_indirect_lookup = two_stage_indirect;
1150 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1152 RISCVCPU *cpu = RISCV_CPU(cs);
1153 CPURISCVState *env = &cpu->env;
1156 int mmu_idx = cpu_mmu_index(&cpu->env, false);
1158 if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
1159 true, env->virt_enabled, true)) {
1163 if (env->virt_enabled) {
1164 if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
1165 0, mmu_idx, false, true, true)) {
1170 return phys_addr & TARGET_PAGE_MASK;
1173 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1174 vaddr addr, unsigned size,
1175 MMUAccessType access_type,
1176 int mmu_idx, MemTxAttrs attrs,
1177 MemTxResult response, uintptr_t retaddr)
1179 RISCVCPU *cpu = RISCV_CPU(cs);
1180 CPURISCVState *env = &cpu->env;
1182 if (access_type == MMU_DATA_STORE) {
1183 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1184 } else if (access_type == MMU_DATA_LOAD) {
1185 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1187 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1190 env->badaddr = addr;
1191 env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1192 env->two_stage_indirect_lookup = false;
1193 cpu_loop_exit_restore(cs, retaddr);
1196 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1197 MMUAccessType access_type, int mmu_idx,
1200 RISCVCPU *cpu = RISCV_CPU(cs);
1201 CPURISCVState *env = &cpu->env;
1202 switch (access_type) {
1203 case MMU_INST_FETCH:
1204 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
1207 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
1209 case MMU_DATA_STORE:
1210 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
1213 g_assert_not_reached();
1215 env->badaddr = addr;
1216 env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1217 env->two_stage_indirect_lookup = false;
1218 cpu_loop_exit_restore(cs, retaddr);
1222 static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type)
1224 enum riscv_pmu_event_idx pmu_event_type;
1226 switch (access_type) {
1227 case MMU_INST_FETCH:
1228 pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
1231 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS;
1233 case MMU_DATA_STORE:
1234 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
1240 riscv_pmu_incr_ctr(cpu, pmu_event_type);
1243 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1244 MMUAccessType access_type, int mmu_idx,
1245 bool probe, uintptr_t retaddr)
1247 RISCVCPU *cpu = RISCV_CPU(cs);
1248 CPURISCVState *env = &cpu->env;
1251 int prot, prot2, prot_pmp;
1252 bool pmp_violation = false;
1253 bool first_stage_error = true;
1254 bool two_stage_lookup = mmuidx_2stage(mmu_idx);
1255 bool two_stage_indirect_error = false;
1256 int ret = TRANSLATE_FAIL;
1258 /* default TLB page size */
1259 target_ulong tlb_size = TARGET_PAGE_SIZE;
1261 env->guest_phys_fault_addr = 0;
1263 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
1264 __func__, address, access_type, mmu_idx);
1266 pmu_tlb_fill_incr_ctr(cpu, access_type);
1267 if (two_stage_lookup) {
1268 /* Two stage lookup */
1269 ret = get_physical_address(env, &pa, &prot, address,
1270 &env->guest_phys_fault_addr, access_type,
1271 mmu_idx, true, true, false);
1274 * A G-stage exception may be triggered during two state lookup.
1275 * And the env->guest_phys_fault_addr has already been set in
1276 * get_physical_address().
1278 if (ret == TRANSLATE_G_STAGE_FAIL) {
1279 first_stage_error = false;
1280 two_stage_indirect_error = true;
1281 access_type = MMU_DATA_LOAD;
1284 qemu_log_mask(CPU_LOG_MMU,
1285 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
1286 HWADDR_FMT_plx " prot %d\n",
1287 __func__, address, ret, pa, prot);
1289 if (ret == TRANSLATE_SUCCESS) {
1290 /* Second stage lookup */
1293 ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
1294 access_type, MMUIdx_U, false, true,
1297 qemu_log_mask(CPU_LOG_MMU,
1298 "%s 2nd-stage address=%" VADDR_PRIx
1300 HWADDR_FMT_plx " prot %d\n",
1301 __func__, im_address, ret, pa, prot2);
1305 if (ret == TRANSLATE_SUCCESS) {
1306 ret = get_physical_address_pmp(env, &prot_pmp, pa,
1307 size, access_type, mode);
1308 tlb_size = pmp_get_tlb_size(env, pa);
1310 qemu_log_mask(CPU_LOG_MMU,
1311 "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1312 " %d tlb_size " TARGET_FMT_lu "\n",
1313 __func__, pa, ret, prot_pmp, tlb_size);
1318 if (ret != TRANSLATE_SUCCESS) {
1320 * Guest physical address translation failed, this is a HS
1323 first_stage_error = false;
1324 env->guest_phys_fault_addr = (im_address |
1326 (TARGET_PAGE_SIZE - 1))) >> 2;
1330 /* Single stage lookup */
1331 ret = get_physical_address(env, &pa, &prot, address, NULL,
1332 access_type, mmu_idx, true, false, false);
1334 qemu_log_mask(CPU_LOG_MMU,
1335 "%s address=%" VADDR_PRIx " ret %d physical "
1336 HWADDR_FMT_plx " prot %d\n",
1337 __func__, address, ret, pa, prot);
1339 if (ret == TRANSLATE_SUCCESS) {
1340 ret = get_physical_address_pmp(env, &prot_pmp, pa,
1341 size, access_type, mode);
1342 tlb_size = pmp_get_tlb_size(env, pa);
1344 qemu_log_mask(CPU_LOG_MMU,
1345 "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1346 " %d tlb_size " TARGET_FMT_lu "\n",
1347 __func__, pa, ret, prot_pmp, tlb_size);
1353 if (ret == TRANSLATE_PMP_FAIL) {
1354 pmp_violation = true;
1357 if (ret == TRANSLATE_SUCCESS) {
1358 tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
1359 prot, mmu_idx, tlb_size);
1364 raise_mmu_exception(env, address, access_type, pmp_violation,
1365 first_stage_error, two_stage_lookup,
1366 two_stage_indirect_error);
1367 cpu_loop_exit_restore(cs, retaddr);
1373 static target_ulong riscv_transformed_insn(CPURISCVState *env,
1377 target_ulong xinsn = 0;
1378 target_ulong access_rs1 = 0, access_imm = 0, access_size = 0;
1381 * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to
1382 * be uncompressed. The Quadrant 1 of RVC instruction space need
1383 * not be transformed because these instructions won't generate
1384 * any load/store trap.
1387 if ((insn & 0x3) != 0x3) {
1388 /* Transform 16bit instruction into 32bit instruction */
1389 switch (GET_C_OP(insn)) {
1390 case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */
1391 switch (GET_C_FUNC(insn)) {
1392 case OPC_RISC_C_FUNC_FLD_LQ:
1393 if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */
1394 xinsn = OPC_RISC_FLD;
1395 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1396 access_rs1 = GET_C_RS1S(insn);
1397 access_imm = GET_C_LD_IMM(insn);
1401 case OPC_RISC_C_FUNC_LW: /* C.LW */
1402 xinsn = OPC_RISC_LW;
1403 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1404 access_rs1 = GET_C_RS1S(insn);
1405 access_imm = GET_C_LW_IMM(insn);
1408 case OPC_RISC_C_FUNC_FLW_LD:
1409 if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */
1410 xinsn = OPC_RISC_FLW;
1411 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1412 access_rs1 = GET_C_RS1S(insn);
1413 access_imm = GET_C_LW_IMM(insn);
1415 } else { /* C.LD (RV64/RV128) */
1416 xinsn = OPC_RISC_LD;
1417 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1418 access_rs1 = GET_C_RS1S(insn);
1419 access_imm = GET_C_LD_IMM(insn);
1423 case OPC_RISC_C_FUNC_FSD_SQ:
1424 if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */
1425 xinsn = OPC_RISC_FSD;
1426 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1427 access_rs1 = GET_C_RS1S(insn);
1428 access_imm = GET_C_SD_IMM(insn);
1432 case OPC_RISC_C_FUNC_SW: /* C.SW */
1433 xinsn = OPC_RISC_SW;
1434 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1435 access_rs1 = GET_C_RS1S(insn);
1436 access_imm = GET_C_SW_IMM(insn);
1439 case OPC_RISC_C_FUNC_FSW_SD:
1440 if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */
1441 xinsn = OPC_RISC_FSW;
1442 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1443 access_rs1 = GET_C_RS1S(insn);
1444 access_imm = GET_C_SW_IMM(insn);
1446 } else { /* C.SD (RV64/RV128) */
1447 xinsn = OPC_RISC_SD;
1448 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1449 access_rs1 = GET_C_RS1S(insn);
1450 access_imm = GET_C_SD_IMM(insn);
1458 case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */
1459 switch (GET_C_FUNC(insn)) {
1460 case OPC_RISC_C_FUNC_FLDSP_LQSP:
1461 if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */
1462 xinsn = OPC_RISC_FLD;
1463 xinsn = SET_RD(xinsn, GET_C_RD(insn));
1465 access_imm = GET_C_LDSP_IMM(insn);
1469 case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */
1470 xinsn = OPC_RISC_LW;
1471 xinsn = SET_RD(xinsn, GET_C_RD(insn));
1473 access_imm = GET_C_LWSP_IMM(insn);
1476 case OPC_RISC_C_FUNC_FLWSP_LDSP:
1477 if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */
1478 xinsn = OPC_RISC_FLW;
1479 xinsn = SET_RD(xinsn, GET_C_RD(insn));
1481 access_imm = GET_C_LWSP_IMM(insn);
1483 } else { /* C.LDSP (RV64/RV128) */
1484 xinsn = OPC_RISC_LD;
1485 xinsn = SET_RD(xinsn, GET_C_RD(insn));
1487 access_imm = GET_C_LDSP_IMM(insn);
1491 case OPC_RISC_C_FUNC_FSDSP_SQSP:
1492 if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */
1493 xinsn = OPC_RISC_FSD;
1494 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1496 access_imm = GET_C_SDSP_IMM(insn);
1500 case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */
1501 xinsn = OPC_RISC_SW;
1502 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1504 access_imm = GET_C_SWSP_IMM(insn);
1508 if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */
1509 xinsn = OPC_RISC_FSW;
1510 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1512 access_imm = GET_C_SWSP_IMM(insn);
1514 } else { /* C.SDSP (RV64/RV128) */
1515 xinsn = OPC_RISC_SD;
1516 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1518 access_imm = GET_C_SDSP_IMM(insn);
1531 * Clear Bit1 of transformed instruction to indicate that
1532 * original insruction was a 16bit instruction
1534 xinsn &= ~((target_ulong)0x2);
1536 /* Transform 32bit (or wider) instructions */
1537 switch (MASK_OP_MAJOR(insn)) {
1538 case OPC_RISC_ATOMIC:
1540 access_rs1 = GET_RS1(insn);
1541 access_size = 1 << GET_FUNCT3(insn);
1544 case OPC_RISC_FP_LOAD:
1545 xinsn = SET_I_IMM(insn, 0);
1546 access_rs1 = GET_RS1(insn);
1547 access_imm = GET_IMM(insn);
1548 access_size = 1 << GET_FUNCT3(insn);
1550 case OPC_RISC_STORE:
1551 case OPC_RISC_FP_STORE:
1552 xinsn = SET_S_IMM(insn, 0);
1553 access_rs1 = GET_RS1(insn);
1554 access_imm = GET_STORE_IMM(insn);
1555 access_size = 1 << GET_FUNCT3(insn);
1557 case OPC_RISC_SYSTEM:
1558 if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) {
1560 access_rs1 = GET_RS1(insn);
1561 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3);
1562 access_size = 1 << access_size;
1571 xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) &
1577 #endif /* !CONFIG_USER_ONLY */
1582 * Adapted from Spike's processor_t::take_trap.
1585 void riscv_cpu_do_interrupt(CPUState *cs)
1587 #if !defined(CONFIG_USER_ONLY)
1589 RISCVCPU *cpu = RISCV_CPU(cs);
1590 CPURISCVState *env = &cpu->env;
1591 bool write_gva = false;
1595 * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
1596 * so we mask off the MSB and separate into trap type and cause.
1598 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
1599 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
1600 uint64_t deleg = async ? env->mideleg : env->medeleg;
1601 target_ulong tval = 0;
1602 target_ulong tinst = 0;
1603 target_ulong htval = 0;
1604 target_ulong mtval2 = 0;
1606 if (cause == RISCV_EXCP_SEMIHOST) {
1607 do_common_semihosting(cs);
1613 /* set tval to badaddr for traps with address information */
1615 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
1616 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
1617 case RISCV_EXCP_LOAD_ADDR_MIS:
1618 case RISCV_EXCP_STORE_AMO_ADDR_MIS:
1619 case RISCV_EXCP_LOAD_ACCESS_FAULT:
1620 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
1621 case RISCV_EXCP_LOAD_PAGE_FAULT:
1622 case RISCV_EXCP_STORE_PAGE_FAULT:
1623 write_gva = env->two_stage_lookup;
1624 tval = env->badaddr;
1625 if (env->two_stage_indirect_lookup) {
1627 * special pseudoinstruction for G-stage fault taken while
1628 * doing VS-stage page table walk.
1630 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
1633 * The "Addr. Offset" field in transformed instruction is
1634 * non-zero only for misaligned access.
1636 tinst = riscv_transformed_insn(env, env->bins, tval);
1639 case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
1640 case RISCV_EXCP_INST_ADDR_MIS:
1641 case RISCV_EXCP_INST_ACCESS_FAULT:
1642 case RISCV_EXCP_INST_PAGE_FAULT:
1643 write_gva = env->two_stage_lookup;
1644 tval = env->badaddr;
1645 if (env->two_stage_indirect_lookup) {
1647 * special pseudoinstruction for G-stage fault taken while
1648 * doing VS-stage page table walk.
1650 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
1653 case RISCV_EXCP_ILLEGAL_INST:
1654 case RISCV_EXCP_VIRT_INSTRUCTION_FAULT:
1657 case RISCV_EXCP_BREAKPOINT:
1658 if (cs->watchpoint_hit) {
1659 tval = cs->watchpoint_hit->hitaddr;
1660 cs->watchpoint_hit = NULL;
1666 /* ecall is dispatched as one cause so translate based on mode */
1667 if (cause == RISCV_EXCP_U_ECALL) {
1668 assert(env->priv <= 3);
1670 if (env->priv == PRV_M) {
1671 cause = RISCV_EXCP_M_ECALL;
1672 } else if (env->priv == PRV_S && env->virt_enabled) {
1673 cause = RISCV_EXCP_VS_ECALL;
1674 } else if (env->priv == PRV_S && !env->virt_enabled) {
1675 cause = RISCV_EXCP_S_ECALL;
1676 } else if (env->priv == PRV_U) {
1677 cause = RISCV_EXCP_U_ECALL;
1682 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
1683 riscv_cpu_get_trap_name(cause, async));
1685 qemu_log_mask(CPU_LOG_INT,
1686 "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
1687 "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
1688 __func__, env->mhartid, async, cause, env->pc, tval,
1689 riscv_cpu_get_trap_name(cause, async));
1691 if (env->priv <= PRV_S &&
1692 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
1693 /* handle the trap in S-mode */
1694 if (riscv_has_ext(env, RVH)) {
1695 uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
1697 if (env->virt_enabled && ((hdeleg >> cause) & 1)) {
1698 /* Trap to VS mode */
1700 * See if we need to adjust cause. Yes if its VS mode interrupt
1701 * no if hypervisor has delegated one of hs mode's interrupt
1703 if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
1704 cause == IRQ_VS_EXT) {
1708 } else if (env->virt_enabled) {
1709 /* Trap into HS mode, from virt */
1710 riscv_cpu_swap_hypervisor_regs(env);
1711 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
1713 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, true);
1715 htval = env->guest_phys_fault_addr;
1717 riscv_cpu_set_virt_enabled(env, 0);
1719 /* Trap into HS mode */
1720 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
1721 htval = env->guest_phys_fault_addr;
1723 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
1727 s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
1728 s = set_field(s, MSTATUS_SPP, env->priv);
1729 s = set_field(s, MSTATUS_SIE, 0);
1731 env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
1732 env->sepc = env->pc;
1735 env->htinst = tinst;
1736 env->pc = (env->stvec >> 2 << 2) +
1737 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
1738 riscv_cpu_set_mode(env, PRV_S);
1740 /* handle the trap in M-mode */
1741 if (riscv_has_ext(env, RVH)) {
1742 if (env->virt_enabled) {
1743 riscv_cpu_swap_hypervisor_regs(env);
1745 env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
1747 if (env->virt_enabled && tval) {
1748 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
1751 mtval2 = env->guest_phys_fault_addr;
1753 /* Trapping to M mode, virt is disabled */
1754 riscv_cpu_set_virt_enabled(env, 0);
1758 s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
1759 s = set_field(s, MSTATUS_MPP, env->priv);
1760 s = set_field(s, MSTATUS_MIE, 0);
1762 env->mcause = cause | ~(((target_ulong)-1) >> async);
1763 env->mepc = env->pc;
1765 env->mtval2 = mtval2;
1766 env->mtinst = tinst;
1767 env->pc = (env->mtvec >> 2 << 2) +
1768 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
1769 riscv_cpu_set_mode(env, PRV_M);
1773 * NOTE: it is not necessary to yield load reservations here. It is only
1774 * necessary for an SC from "another hart" to cause a load reservation
1775 * to be yielded. Refer to the memory consistency model section of the
1776 * RISC-V ISA Specification.
1779 env->two_stage_lookup = false;
1780 env->two_stage_indirect_lookup = false;
1782 cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */