4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "migration/vmstate.h"
33 #include "fpu/softfloat-helpers.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/tcg.h"
36 #include "kvm/kvm_riscv.h"
37 #include "tcg/tcg-cpu.h"
40 /* RISC-V CPU definitions */
41 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH";
42 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
43 RVC, RVS, RVU, RVH, RVJ, RVG, 0};
46 * From vector_helper.c
47 * Note that vector data is stored in host-endian 64-bit chunks,
48 * so addressing bytes needs a host-endian fixup.
51 #define BYTE(x) ((x) ^ 7)
56 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
57 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
60 * Here are the ordering rules of extension naming defined by RISC-V
62 * 1. All extensions should be separated from other multi-letter extensions
64 * 2. The first letter following the 'Z' conventionally indicates the most
65 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
66 * If multiple 'Z' extensions are named, they should be ordered first
67 * by category, then alphabetically within a category.
68 * 3. Standard supervisor-level extensions (starts with 'S') should be
69 * listed after standard unprivileged extensions. If multiple
70 * supervisor-level extensions are listed, they should be ordered
72 * 4. Non-standard extensions (starts with 'X') must be listed after all
73 * standard extensions. They must be separated from other multi-letter
74 * extensions by an underscore.
76 * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
79 const RISCVIsaExtData isa_edata_arr[] = {
80 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
81 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
82 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
83 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
84 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
85 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
86 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
87 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
88 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
89 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
90 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
91 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
92 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
93 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
94 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
95 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
96 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
97 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
98 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
99 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
100 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
101 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
102 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
103 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
104 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
105 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
106 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
107 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
108 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
109 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
110 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
111 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
112 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
113 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
114 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
115 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
116 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
117 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
118 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
119 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
120 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
121 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
122 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
123 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
124 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
125 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
126 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
127 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
128 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
129 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
130 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
131 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
132 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
133 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
134 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
135 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
136 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
137 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
138 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
139 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
140 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
141 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
142 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
143 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
144 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
145 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
146 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
147 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
148 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
149 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
150 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
151 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
152 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
153 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
154 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
155 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
156 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
157 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
158 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
160 DEFINE_PROP_END_OF_LIST(),
163 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
165 bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
170 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
172 bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
177 const char * const riscv_int_regnames[] = {
178 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
179 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
180 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
181 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
182 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
185 const char * const riscv_int_regnamesh[] = {
186 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
187 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
188 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
189 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
190 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
191 "x30h/t5h", "x31h/t6h"
194 const char * const riscv_fpr_regnames[] = {
195 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
196 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
197 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
198 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
199 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
200 "f30/ft10", "f31/ft11"
203 const char * const riscv_rvv_regnames[] = {
204 "v0", "v1", "v2", "v3", "v4", "v5", "v6",
205 "v7", "v8", "v9", "v10", "v11", "v12", "v13",
206 "v14", "v15", "v16", "v17", "v18", "v19", "v20",
207 "v21", "v22", "v23", "v24", "v25", "v26", "v27",
208 "v28", "v29", "v30", "v31"
211 static const char * const riscv_excp_names[] = {
214 "illegal_instruction",
232 "guest_exec_page_fault",
233 "guest_load_page_fault",
235 "guest_store_page_fault",
238 static const char * const riscv_intr_names[] = {
257 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
260 return (cause < ARRAY_SIZE(riscv_intr_names)) ?
261 riscv_intr_names[cause] : "(unknown)";
263 return (cause < ARRAY_SIZE(riscv_excp_names)) ?
264 riscv_excp_names[cause] : "(unknown)";
268 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
270 env->misa_mxl_max = env->misa_mxl = mxl;
271 env->misa_ext_mask = env->misa_ext = ext;
274 #ifndef CONFIG_USER_ONLY
275 static uint8_t satp_mode_from_str(const char *satp_mode_str)
277 if (!strncmp(satp_mode_str, "mbare", 5)) {
278 return VM_1_10_MBARE;
281 if (!strncmp(satp_mode_str, "sv32", 4)) {
285 if (!strncmp(satp_mode_str, "sv39", 4)) {
289 if (!strncmp(satp_mode_str, "sv48", 4)) {
293 if (!strncmp(satp_mode_str, "sv57", 4)) {
297 if (!strncmp(satp_mode_str, "sv64", 4)) {
301 g_assert_not_reached();
304 uint8_t satp_mode_max_from_map(uint32_t map)
307 * 'map = 0' will make us return (31 - 32), which C will
308 * happily overflow to UINT_MAX. There's no good result to
309 * return if 'map = 0' (e.g. returning 0 will be ambiguous
310 * with the result for 'map = 1').
312 * Assert out if map = 0. Callers will have to deal with
313 * it outside of this function.
317 /* map here has at least one bit set, so no problem with clz */
318 return 31 - __builtin_clz(map);
321 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
345 g_assert_not_reached();
348 static void set_satp_mode_max_supported(RISCVCPU *cpu,
351 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
352 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
354 for (int i = 0; i <= satp_mode; ++i) {
356 cpu->cfg.satp_mode.supported |= (1 << i);
361 /* Set the satp mode to the max supported */
362 static void set_satp_mode_default_map(RISCVCPU *cpu)
364 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
368 static void riscv_any_cpu_init(Object *obj)
370 RISCVCPU *cpu = RISCV_CPU(obj);
371 CPURISCVState *env = &cpu->env;
372 #if defined(TARGET_RISCV32)
373 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
374 #elif defined(TARGET_RISCV64)
375 riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
378 #ifndef CONFIG_USER_ONLY
379 set_satp_mode_max_supported(RISCV_CPU(obj),
380 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
381 VM_1_10_SV32 : VM_1_10_SV57);
384 env->priv_ver = PRIV_VERSION_LATEST;
386 /* inherited from parent obj via riscv_cpu_init() */
387 cpu->cfg.ext_zifencei = true;
388 cpu->cfg.ext_zicsr = true;
393 static void riscv_max_cpu_init(Object *obj)
395 RISCVCPU *cpu = RISCV_CPU(obj);
396 CPURISCVState *env = &cpu->env;
397 RISCVMXL mlx = MXL_RV64;
399 #ifdef TARGET_RISCV32
402 riscv_cpu_set_misa(env, mlx, 0);
403 env->priv_ver = PRIV_VERSION_LATEST;
404 #ifndef CONFIG_USER_ONLY
405 set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ?
406 VM_1_10_SV32 : VM_1_10_SV57);
410 #if defined(TARGET_RISCV64)
411 static void rv64_base_cpu_init(Object *obj)
413 CPURISCVState *env = &RISCV_CPU(obj)->env;
414 /* We set this in the realise function */
415 riscv_cpu_set_misa(env, MXL_RV64, 0);
416 /* Set latest version of privileged specification */
417 env->priv_ver = PRIV_VERSION_LATEST;
418 #ifndef CONFIG_USER_ONLY
419 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
423 static void rv64_sifive_u_cpu_init(Object *obj)
425 RISCVCPU *cpu = RISCV_CPU(obj);
426 CPURISCVState *env = &cpu->env;
427 riscv_cpu_set_misa(env, MXL_RV64,
428 RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
429 env->priv_ver = PRIV_VERSION_1_10_0;
430 #ifndef CONFIG_USER_ONLY
431 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
434 /* inherited from parent obj via riscv_cpu_init() */
435 cpu->cfg.ext_zifencei = true;
436 cpu->cfg.ext_zicsr = true;
441 static void rv64_sifive_e_cpu_init(Object *obj)
443 CPURISCVState *env = &RISCV_CPU(obj)->env;
444 RISCVCPU *cpu = RISCV_CPU(obj);
446 riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
447 env->priv_ver = PRIV_VERSION_1_10_0;
448 #ifndef CONFIG_USER_ONLY
449 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
452 /* inherited from parent obj via riscv_cpu_init() */
453 cpu->cfg.ext_zifencei = true;
454 cpu->cfg.ext_zicsr = true;
458 static void rv64_thead_c906_cpu_init(Object *obj)
460 CPURISCVState *env = &RISCV_CPU(obj)->env;
461 RISCVCPU *cpu = RISCV_CPU(obj);
463 riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
464 env->priv_ver = PRIV_VERSION_1_11_0;
466 cpu->cfg.ext_zfa = true;
467 cpu->cfg.ext_zfh = true;
469 cpu->cfg.ext_xtheadba = true;
470 cpu->cfg.ext_xtheadbb = true;
471 cpu->cfg.ext_xtheadbs = true;
472 cpu->cfg.ext_xtheadcmo = true;
473 cpu->cfg.ext_xtheadcondmov = true;
474 cpu->cfg.ext_xtheadfmemidx = true;
475 cpu->cfg.ext_xtheadmac = true;
476 cpu->cfg.ext_xtheadmemidx = true;
477 cpu->cfg.ext_xtheadmempair = true;
478 cpu->cfg.ext_xtheadsync = true;
480 cpu->cfg.mvendorid = THEAD_VENDOR_ID;
481 #ifndef CONFIG_USER_ONLY
482 set_satp_mode_max_supported(cpu, VM_1_10_SV39);
485 /* inherited from parent obj via riscv_cpu_init() */
489 static void rv64_veyron_v1_cpu_init(Object *obj)
491 CPURISCVState *env = &RISCV_CPU(obj)->env;
492 RISCVCPU *cpu = RISCV_CPU(obj);
494 riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH);
495 env->priv_ver = PRIV_VERSION_1_12_0;
497 /* Enable ISA extensions */
499 cpu->cfg.ext_zifencei = true;
500 cpu->cfg.ext_zicsr = true;
502 cpu->cfg.ext_zicbom = true;
503 cpu->cfg.cbom_blocksize = 64;
504 cpu->cfg.cboz_blocksize = 64;
505 cpu->cfg.ext_zicboz = true;
506 cpu->cfg.ext_smaia = true;
507 cpu->cfg.ext_ssaia = true;
508 cpu->cfg.ext_sscofpmf = true;
509 cpu->cfg.ext_sstc = true;
510 cpu->cfg.ext_svinval = true;
511 cpu->cfg.ext_svnapot = true;
512 cpu->cfg.ext_svpbmt = true;
513 cpu->cfg.ext_smstateen = true;
514 cpu->cfg.ext_zba = true;
515 cpu->cfg.ext_zbb = true;
516 cpu->cfg.ext_zbc = true;
517 cpu->cfg.ext_zbs = true;
518 cpu->cfg.ext_XVentanaCondOps = true;
520 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
521 cpu->cfg.marchid = VEYRON_V1_MARCHID;
522 cpu->cfg.mimpid = VEYRON_V1_MIMPID;
524 #ifndef CONFIG_USER_ONLY
525 set_satp_mode_max_supported(cpu, VM_1_10_SV48);
529 static void rv128_base_cpu_init(Object *obj)
531 if (qemu_tcg_mttcg_enabled()) {
532 /* Missing 128-bit aligned atomics */
533 error_report("128-bit RISC-V currently does not work with Multi "
534 "Threaded TCG. Please use: -accel tcg,thread=single");
537 CPURISCVState *env = &RISCV_CPU(obj)->env;
538 /* We set this in the realise function */
539 riscv_cpu_set_misa(env, MXL_RV128, 0);
540 /* Set latest version of privileged specification */
541 env->priv_ver = PRIV_VERSION_LATEST;
542 #ifndef CONFIG_USER_ONLY
543 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
547 static void rv32_base_cpu_init(Object *obj)
549 CPURISCVState *env = &RISCV_CPU(obj)->env;
550 /* We set this in the realise function */
551 riscv_cpu_set_misa(env, MXL_RV32, 0);
552 /* Set latest version of privileged specification */
553 env->priv_ver = PRIV_VERSION_LATEST;
554 #ifndef CONFIG_USER_ONLY
555 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
559 static void rv32_sifive_u_cpu_init(Object *obj)
561 RISCVCPU *cpu = RISCV_CPU(obj);
562 CPURISCVState *env = &cpu->env;
563 riscv_cpu_set_misa(env, MXL_RV32,
564 RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
565 env->priv_ver = PRIV_VERSION_1_10_0;
566 #ifndef CONFIG_USER_ONLY
567 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
570 /* inherited from parent obj via riscv_cpu_init() */
571 cpu->cfg.ext_zifencei = true;
572 cpu->cfg.ext_zicsr = true;
577 static void rv32_sifive_e_cpu_init(Object *obj)
579 CPURISCVState *env = &RISCV_CPU(obj)->env;
580 RISCVCPU *cpu = RISCV_CPU(obj);
582 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
583 env->priv_ver = PRIV_VERSION_1_10_0;
584 #ifndef CONFIG_USER_ONLY
585 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
588 /* inherited from parent obj via riscv_cpu_init() */
589 cpu->cfg.ext_zifencei = true;
590 cpu->cfg.ext_zicsr = true;
594 static void rv32_ibex_cpu_init(Object *obj)
596 CPURISCVState *env = &RISCV_CPU(obj)->env;
597 RISCVCPU *cpu = RISCV_CPU(obj);
599 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
600 env->priv_ver = PRIV_VERSION_1_11_0;
601 #ifndef CONFIG_USER_ONLY
602 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
604 /* inherited from parent obj via riscv_cpu_init() */
605 cpu->cfg.ext_zifencei = true;
606 cpu->cfg.ext_zicsr = true;
608 cpu->cfg.ext_smepmp = true;
611 static void rv32_imafcu_nommu_cpu_init(Object *obj)
613 CPURISCVState *env = &RISCV_CPU(obj)->env;
614 RISCVCPU *cpu = RISCV_CPU(obj);
616 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
617 env->priv_ver = PRIV_VERSION_1_10_0;
618 #ifndef CONFIG_USER_ONLY
619 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
622 /* inherited from parent obj via riscv_cpu_init() */
623 cpu->cfg.ext_zifencei = true;
624 cpu->cfg.ext_zicsr = true;
629 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
635 cpuname = g_strsplit(cpu_model, ",", 1);
636 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
637 oc = object_class_by_name(typename);
640 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) ||
641 object_class_is_abstract(oc)) {
647 char *riscv_cpu_get_name(RISCVCPU *cpu)
649 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
650 const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
652 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
654 return g_strndup(typename,
655 strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX));
658 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
660 RISCVCPU *cpu = RISCV_CPU(cs);
661 CPURISCVState *env = &cpu->env;
665 #if !defined(CONFIG_USER_ONLY)
666 if (riscv_has_ext(env, RVH)) {
667 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled);
670 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
671 #ifndef CONFIG_USER_ONLY
673 static const int dump_csrs[] = {
678 * CSR_SSTATUS is intentionally omitted here as its value
679 * can be figured out by looking at CSR_MSTATUS
714 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
715 int csrno = dump_csrs[i];
716 target_ulong val = 0;
717 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
720 * Rely on the smode, hmode, etc, predicates within csr.c
721 * to do the filtering of the registers that are present.
723 if (res == RISCV_EXCP_NONE) {
724 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
725 csr_ops[csrno].name, val);
731 for (i = 0; i < 32; i++) {
732 qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
733 riscv_int_regnames[i], env->gpr[i]);
735 qemu_fprintf(f, "\n");
738 if (flags & CPU_DUMP_FPU) {
739 for (i = 0; i < 32; i++) {
740 qemu_fprintf(f, " %-8s %016" PRIx64,
741 riscv_fpr_regnames[i], env->fpr[i]);
743 qemu_fprintf(f, "\n");
747 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
748 static const int dump_rvv_csrs[] = {
757 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
758 int csrno = dump_rvv_csrs[i];
759 target_ulong val = 0;
760 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
763 * Rely on the smode, hmode, etc, predicates within csr.c
764 * to do the filtering of the registers that are present.
766 if (res == RISCV_EXCP_NONE) {
767 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
768 csr_ops[csrno].name, val);
771 uint16_t vlenb = cpu->cfg.vlen >> 3;
773 for (i = 0; i < 32; i++) {
774 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
775 p = (uint8_t *)env->vreg;
776 for (j = vlenb - 1 ; j >= 0; j--) {
777 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
779 qemu_fprintf(f, "\n");
784 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
786 RISCVCPU *cpu = RISCV_CPU(cs);
787 CPURISCVState *env = &cpu->env;
789 if (env->xl == MXL_RV32) {
790 env->pc = (int32_t)value;
796 static vaddr riscv_cpu_get_pc(CPUState *cs)
798 RISCVCPU *cpu = RISCV_CPU(cs);
799 CPURISCVState *env = &cpu->env;
801 /* Match cpu_get_tb_cpu_state. */
802 if (env->xl == MXL_RV32) {
803 return env->pc & UINT32_MAX;
808 static bool riscv_cpu_has_work(CPUState *cs)
810 #ifndef CONFIG_USER_ONLY
811 RISCVCPU *cpu = RISCV_CPU(cs);
812 CPURISCVState *env = &cpu->env;
814 * Definition of the WFI instruction requires it to ignore the privilege
815 * mode and delegation registers, but respect individual enables
817 return riscv_cpu_all_pending(env) != 0 ||
818 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
819 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
825 static void riscv_cpu_reset_hold(Object *obj)
827 #ifndef CONFIG_USER_ONLY
831 CPUState *cs = CPU(obj);
832 RISCVCPU *cpu = RISCV_CPU(cs);
833 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
834 CPURISCVState *env = &cpu->env;
836 if (mcc->parent_phases.hold) {
837 mcc->parent_phases.hold(obj);
839 #ifndef CONFIG_USER_ONLY
840 env->misa_mxl = env->misa_mxl_max;
842 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
843 if (env->misa_mxl > MXL_RV32) {
845 * The reset status of SXL/UXL is undefined, but mstatus is WARL
846 * and we must ensure that the value after init is valid for read.
848 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
849 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
850 if (riscv_has_ext(env, RVH)) {
851 env->vsstatus = set_field(env->vsstatus,
852 MSTATUS64_SXL, env->misa_mxl);
853 env->vsstatus = set_field(env->vsstatus,
854 MSTATUS64_UXL, env->misa_mxl);
855 env->mstatus_hs = set_field(env->mstatus_hs,
856 MSTATUS64_SXL, env->misa_mxl);
857 env->mstatus_hs = set_field(env->mstatus_hs,
858 MSTATUS64_UXL, env->misa_mxl);
862 env->miclaim = MIP_SGEIP;
863 env->pc = env->resetvec;
865 env->two_stage_lookup = false;
867 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
868 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0);
869 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
870 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0);
872 /* Initialized default priorities of local interrupts. */
873 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
874 iprio = riscv_cpu_default_priority(i);
875 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
876 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
880 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
882 env->hviprio[irq] = env->miprio[irq];
886 /* mmte is supposed to have pm.current hardwired to 1 */
887 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
890 * Clear mseccfg and unlock all the PMP entries upon reset.
891 * This is allowed as per the priv and smepmp specifications
892 * and is needed to clear stale entries across reboots.
894 if (riscv_cpu_cfg(env)->ext_smepmp) {
898 pmp_unlock_entries(env);
900 env->xl = riscv_cpu_mxl(env);
901 riscv_cpu_update_mask(env);
902 cs->exception_index = RISCV_EXCP_NONE;
904 set_default_nan_mode(1, &env->fp_status);
906 #ifndef CONFIG_USER_ONLY
907 if (cpu->cfg.debug) {
908 riscv_trigger_reset_hold(env);
912 kvm_riscv_reset_vcpu(cpu);
917 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
919 RISCVCPU *cpu = RISCV_CPU(s);
920 CPURISCVState *env = &cpu->env;
921 info->target_info = &cpu->cfg;
925 info->print_insn = print_insn_riscv32;
928 info->print_insn = print_insn_riscv64;
931 info->print_insn = print_insn_riscv128;
934 g_assert_not_reached();
938 #ifndef CONFIG_USER_ONLY
939 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
941 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
942 uint8_t satp_mode_map_max, satp_mode_supported_max;
944 /* The CPU wants the OS to decide which satp mode to use */
945 if (cpu->cfg.satp_mode.supported == 0) {
949 satp_mode_supported_max =
950 satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
952 if (cpu->cfg.satp_mode.map == 0) {
953 if (cpu->cfg.satp_mode.init == 0) {
954 /* If unset by the user, we fallback to the default satp mode. */
955 set_satp_mode_default_map(cpu);
958 * Find the lowest level that was disabled and then enable the
959 * first valid level below which can be found in
960 * valid_vm_1_10_32/64.
962 for (int i = 1; i < 16; ++i) {
963 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
964 (cpu->cfg.satp_mode.supported & (1 << i))) {
965 for (int j = i - 1; j >= 0; --j) {
966 if (cpu->cfg.satp_mode.supported & (1 << j)) {
967 cpu->cfg.satp_mode.map |= (1 << j);
977 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
979 /* Make sure the user asked for a supported configuration (HW and qemu) */
980 if (satp_mode_map_max > satp_mode_supported_max) {
981 error_setg(errp, "satp_mode %s is higher than hw max capability %s",
982 satp_mode_str(satp_mode_map_max, rv32),
983 satp_mode_str(satp_mode_supported_max, rv32));
988 * Make sure the user did not ask for an invalid configuration as per
992 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
993 if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
994 (cpu->cfg.satp_mode.init & (1 << i)) &&
995 (cpu->cfg.satp_mode.supported & (1 << i))) {
996 error_setg(errp, "cannot disable %s satp mode if %s "
997 "is enabled", satp_mode_str(i, false),
998 satp_mode_str(satp_mode_map_max, false));
1004 /* Finally expand the map so that all valid modes are set */
1005 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1006 if (cpu->cfg.satp_mode.supported & (1 << i)) {
1007 cpu->cfg.satp_mode.map |= (1 << i);
1013 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1015 Error *local_err = NULL;
1018 * KVM accel does not have a specialized finalize()
1019 * callback because its extensions are validated
1020 * in the get()/set() callbacks of each property.
1022 if (tcg_enabled()) {
1023 riscv_tcg_cpu_finalize_features(cpu, &local_err);
1024 if (local_err != NULL) {
1025 error_propagate(errp, local_err);
1030 #ifndef CONFIG_USER_ONLY
1031 riscv_cpu_satp_mode_finalize(cpu, &local_err);
1032 if (local_err != NULL) {
1033 error_propagate(errp, local_err);
1039 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1041 CPUState *cs = CPU(dev);
1042 RISCVCPU *cpu = RISCV_CPU(dev);
1043 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1044 Error *local_err = NULL;
1046 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
1047 warn_report("The 'any' CPU is deprecated and will be "
1048 "removed in the future.");
1051 cpu_exec_realizefn(cs, &local_err);
1052 if (local_err != NULL) {
1053 error_propagate(errp, local_err);
1057 riscv_cpu_finalize_features(cpu, &local_err);
1058 if (local_err != NULL) {
1059 error_propagate(errp, local_err);
1063 riscv_cpu_register_gdb_regs_for_features(cs);
1065 #ifndef CONFIG_USER_ONLY
1066 if (cpu->cfg.debug) {
1067 riscv_trigger_realize(&cpu->env);
1074 mcc->parent_realize(dev, errp);
1077 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1079 if (tcg_enabled()) {
1080 return riscv_cpu_tcg_compatible(cpu);
1086 #ifndef CONFIG_USER_ONLY
1087 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1088 void *opaque, Error **errp)
1090 RISCVSATPMap *satp_map = opaque;
1091 uint8_t satp = satp_mode_from_str(name);
1094 value = satp_map->map & (1 << satp);
1096 visit_type_bool(v, name, &value, errp);
1099 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1100 void *opaque, Error **errp)
1102 RISCVSATPMap *satp_map = opaque;
1103 uint8_t satp = satp_mode_from_str(name);
1106 if (!visit_type_bool(v, name, &value, errp)) {
1110 satp_map->map = deposit32(satp_map->map, satp, 1, value);
1111 satp_map->init |= 1 << satp;
1114 void riscv_add_satp_mode_properties(Object *obj)
1116 RISCVCPU *cpu = RISCV_CPU(obj);
1118 if (cpu->env.misa_mxl == MXL_RV32) {
1119 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1120 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1122 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1123 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1124 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1125 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1126 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1127 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1128 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1129 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1133 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1135 RISCVCPU *cpu = RISCV_CPU(opaque);
1136 CPURISCVState *env = &cpu->env;
1138 if (irq < IRQ_LOCAL_MAX) {
1151 if (kvm_enabled()) {
1152 kvm_riscv_set_irq(cpu, irq, level);
1154 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1158 if (kvm_enabled()) {
1159 kvm_riscv_set_irq(cpu, irq, level);
1161 env->external_seip = level;
1162 riscv_cpu_update_mip(env, 1 << irq,
1163 BOOL_TO_MASK(level | env->software_seip));
1167 g_assert_not_reached();
1169 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1170 /* Require H-extension for handling guest local interrupts */
1171 if (!riscv_has_ext(env, RVH)) {
1172 g_assert_not_reached();
1175 /* Compute bit position in HGEIP CSR */
1176 irq = irq - IRQ_LOCAL_MAX + 1;
1177 if (env->geilen < irq) {
1178 g_assert_not_reached();
1181 /* Update HGEIP CSR */
1182 env->hgeip &= ~((target_ulong)1 << irq);
1184 env->hgeip |= (target_ulong)1 << irq;
1187 /* Update mip.SGEIP bit */
1188 riscv_cpu_update_mip(env, MIP_SGEIP,
1189 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1191 g_assert_not_reached();
1194 #endif /* CONFIG_USER_ONLY */
1196 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1198 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1201 static void riscv_cpu_post_init(Object *obj)
1203 accel_cpu_instance_init(CPU(obj));
1206 static void riscv_cpu_init(Object *obj)
1208 #ifndef CONFIG_USER_ONLY
1209 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1210 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1211 #endif /* CONFIG_USER_ONLY */
1214 * The timer and performance counters extensions were supported
1215 * in QEMU before they were added as discrete extensions in the
1216 * ISA. To keep compatibility we'll always default them to 'true'
1217 * for all CPUs. Each accelerator will decide what to do when
1218 * users disable them.
1220 RISCV_CPU(obj)->cfg.ext_zicntr = true;
1223 typedef struct misa_ext_info {
1225 const char *description;
1228 #define MISA_INFO_IDX(_bit) \
1231 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1232 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1234 static const MISAExtInfo misa_ext_info_arr[] = {
1235 MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1236 MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1237 MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1238 MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1239 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1240 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1241 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1242 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1243 MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1244 MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1245 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1246 MISA_EXT_INFO(RVV, "v", "Vector operations"),
1247 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1250 static int riscv_validate_misa_info_idx(uint32_t bit)
1255 * Our lowest valid input (RVA) is 1 and
1256 * __builtin_ctz() is UB with zero.
1259 idx = MISA_INFO_IDX(bit);
1261 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1265 const char *riscv_get_misa_ext_name(uint32_t bit)
1267 int idx = riscv_validate_misa_info_idx(bit);
1268 const char *val = misa_ext_info_arr[idx].name;
1270 g_assert(val != NULL);
1274 const char *riscv_get_misa_ext_description(uint32_t bit)
1276 int idx = riscv_validate_misa_info_idx(bit);
1277 const char *val = misa_ext_info_arr[idx].description;
1279 g_assert(val != NULL);
1283 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1284 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1287 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1288 /* Defaults for standard extensions */
1289 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1290 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1291 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1292 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1293 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1294 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1295 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1296 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1297 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1298 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1299 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1300 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1301 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1303 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1304 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1305 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1306 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1307 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1308 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1310 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1312 MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1313 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1314 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1315 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1316 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1317 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1318 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1319 MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1320 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1321 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1322 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1323 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1324 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1325 MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1326 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1327 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1328 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1330 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1331 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1332 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1333 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1335 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1336 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1338 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1340 MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1341 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1342 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1343 MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1344 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1345 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1346 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1347 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1349 DEFINE_PROP_END_OF_LIST(),
1352 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1353 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1354 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1355 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1356 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1357 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1358 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1359 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1360 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1361 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1362 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1363 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1364 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1366 DEFINE_PROP_END_OF_LIST(),
1369 /* These are experimental so mark with 'x-' */
1370 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1371 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false),
1372 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false),
1374 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false),
1375 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false),
1377 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false),
1378 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false),
1379 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false),
1381 /* Vector cryptography extensions */
1382 MULTI_EXT_CFG_BOOL("x-zvbb", ext_zvbb, false),
1383 MULTI_EXT_CFG_BOOL("x-zvbc", ext_zvbc, false),
1384 MULTI_EXT_CFG_BOOL("x-zvkg", ext_zvkg, false),
1385 MULTI_EXT_CFG_BOOL("x-zvkned", ext_zvkned, false),
1386 MULTI_EXT_CFG_BOOL("x-zvknha", ext_zvknha, false),
1387 MULTI_EXT_CFG_BOOL("x-zvknhb", ext_zvknhb, false),
1388 MULTI_EXT_CFG_BOOL("x-zvksed", ext_zvksed, false),
1389 MULTI_EXT_CFG_BOOL("x-zvksh", ext_zvksh, false),
1391 DEFINE_PROP_END_OF_LIST(),
1394 /* Deprecated entries marked for future removal */
1395 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1396 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1397 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1398 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1399 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1400 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1401 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1402 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1403 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1404 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1405 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1406 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1408 DEFINE_PROP_END_OF_LIST(),
1411 Property riscv_cpu_options[] = {
1412 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16),
1414 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
1415 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
1417 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
1418 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
1420 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1421 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1423 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
1424 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1426 DEFINE_PROP_END_OF_LIST(),
1429 static Property riscv_cpu_properties[] = {
1430 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1432 #ifndef CONFIG_USER_ONLY
1433 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1436 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1438 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1439 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1442 * write_misa() is marked as experimental for now so mark
1443 * it with -x and default to 'false'.
1445 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1446 DEFINE_PROP_END_OF_LIST(),
1449 static const gchar *riscv_gdb_arch_name(CPUState *cs)
1451 RISCVCPU *cpu = RISCV_CPU(cs);
1452 CPURISCVState *env = &cpu->env;
1454 switch (riscv_cpu_mxl(env)) {
1456 return "riscv:rv32";
1459 return "riscv:rv64";
1461 g_assert_not_reached();
1465 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1467 RISCVCPU *cpu = RISCV_CPU(cs);
1469 if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1470 return cpu->dyn_csr_xml;
1471 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1472 return cpu->dyn_vreg_xml;
1478 #ifndef CONFIG_USER_ONLY
1479 static int64_t riscv_get_arch_id(CPUState *cs)
1481 RISCVCPU *cpu = RISCV_CPU(cs);
1483 return cpu->env.mhartid;
1486 #include "hw/core/sysemu-cpu-ops.h"
1488 static const struct SysemuCPUOps riscv_sysemu_ops = {
1489 .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1490 .write_elf64_note = riscv_cpu_write_elf64_note,
1491 .write_elf32_note = riscv_cpu_write_elf32_note,
1492 .legacy_vmsd = &vmstate_riscv_cpu,
1496 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
1497 void *opaque, Error **errp)
1499 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1500 RISCVCPU *cpu = RISCV_CPU(obj);
1501 uint32_t prev_val = cpu->cfg.mvendorid;
1504 if (!visit_type_uint32(v, name, &value, errp)) {
1508 if (!dynamic_cpu && prev_val != value) {
1509 error_setg(errp, "Unable to change %s mvendorid (0x%x)",
1510 object_get_typename(obj), prev_val);
1514 cpu->cfg.mvendorid = value;
1517 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
1518 void *opaque, Error **errp)
1520 bool value = RISCV_CPU(obj)->cfg.mvendorid;
1522 visit_type_bool(v, name, &value, errp);
1525 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
1526 void *opaque, Error **errp)
1528 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1529 RISCVCPU *cpu = RISCV_CPU(obj);
1530 uint64_t prev_val = cpu->cfg.mimpid;
1533 if (!visit_type_uint64(v, name, &value, errp)) {
1537 if (!dynamic_cpu && prev_val != value) {
1538 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
1539 object_get_typename(obj), prev_val);
1543 cpu->cfg.mimpid = value;
1546 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
1547 void *opaque, Error **errp)
1549 bool value = RISCV_CPU(obj)->cfg.mimpid;
1551 visit_type_bool(v, name, &value, errp);
1554 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
1555 void *opaque, Error **errp)
1557 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1558 RISCVCPU *cpu = RISCV_CPU(obj);
1559 uint64_t prev_val = cpu->cfg.marchid;
1560 uint64_t value, invalid_val;
1563 if (!visit_type_uint64(v, name, &value, errp)) {
1567 if (!dynamic_cpu && prev_val != value) {
1568 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
1569 object_get_typename(obj), prev_val);
1573 switch (riscv_cpu_mxl(&cpu->env)) {
1582 g_assert_not_reached();
1585 invalid_val = 1LL << (mxlen - 1);
1587 if (value == invalid_val) {
1588 error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
1589 "and the remaining bits zero", mxlen);
1593 cpu->cfg.marchid = value;
1596 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
1597 void *opaque, Error **errp)
1599 bool value = RISCV_CPU(obj)->cfg.marchid;
1601 visit_type_bool(v, name, &value, errp);
1604 static void riscv_cpu_class_init(ObjectClass *c, void *data)
1606 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
1607 CPUClass *cc = CPU_CLASS(c);
1608 DeviceClass *dc = DEVICE_CLASS(c);
1609 ResettableClass *rc = RESETTABLE_CLASS(c);
1611 device_class_set_parent_realize(dc, riscv_cpu_realize,
1612 &mcc->parent_realize);
1614 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
1615 &mcc->parent_phases);
1617 cc->class_by_name = riscv_cpu_class_by_name;
1618 cc->has_work = riscv_cpu_has_work;
1619 cc->dump_state = riscv_cpu_dump_state;
1620 cc->set_pc = riscv_cpu_set_pc;
1621 cc->get_pc = riscv_cpu_get_pc;
1622 cc->gdb_read_register = riscv_cpu_gdb_read_register;
1623 cc->gdb_write_register = riscv_cpu_gdb_write_register;
1624 cc->gdb_num_core_regs = 33;
1625 cc->gdb_stop_before_watchpoint = true;
1626 cc->disas_set_info = riscv_cpu_disas_set_info;
1627 #ifndef CONFIG_USER_ONLY
1628 cc->sysemu_ops = &riscv_sysemu_ops;
1629 cc->get_arch_id = riscv_get_arch_id;
1631 cc->gdb_arch_name = riscv_gdb_arch_name;
1632 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
1634 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
1635 cpu_set_mvendorid, NULL, NULL);
1637 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
1638 cpu_set_mimpid, NULL, NULL);
1640 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
1641 cpu_set_marchid, NULL, NULL);
1643 device_class_set_props(dc, riscv_cpu_properties);
1646 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
1649 const RISCVIsaExtData *edata;
1650 char *old = *isa_str;
1651 char *new = *isa_str;
1653 for (edata = isa_edata_arr; edata && edata->name; edata++) {
1654 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
1655 new = g_strconcat(old, "_", edata->name, NULL);
1664 char *riscv_isa_string(RISCVCPU *cpu)
1667 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
1668 char *isa_str = g_new(char, maxlen);
1669 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
1670 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
1671 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
1672 *p++ = qemu_tolower(riscv_single_letter_exts[i]);
1676 if (!cpu->cfg.short_isa_string) {
1677 riscv_isa_string_ext(cpu, &isa_str, maxlen);
1682 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b)
1684 ObjectClass *class_a = (ObjectClass *)a;
1685 ObjectClass *class_b = (ObjectClass *)b;
1686 const char *name_a, *name_b;
1688 name_a = object_class_get_name(class_a);
1689 name_b = object_class_get_name(class_b);
1690 return strcmp(name_a, name_b);
1693 static void riscv_cpu_list_entry(gpointer data, gpointer user_data)
1695 const char *typename = object_class_get_name(OBJECT_CLASS(data));
1696 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX);
1698 qemu_printf("%.*s\n", len, typename);
1701 void riscv_cpu_list(void)
1705 list = object_class_get_list(TYPE_RISCV_CPU, false);
1706 list = g_slist_sort(list, riscv_cpu_list_compare);
1707 g_slist_foreach(list, riscv_cpu_list_entry, NULL);
1711 #define DEFINE_CPU(type_name, initfn) \
1713 .name = type_name, \
1714 .parent = TYPE_RISCV_CPU, \
1715 .instance_init = initfn \
1718 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \
1720 .name = type_name, \
1721 .parent = TYPE_RISCV_DYNAMIC_CPU, \
1722 .instance_init = initfn \
1725 static const TypeInfo riscv_cpu_type_infos[] = {
1727 .name = TYPE_RISCV_CPU,
1729 .instance_size = sizeof(RISCVCPU),
1730 .instance_align = __alignof(RISCVCPU),
1731 .instance_init = riscv_cpu_init,
1732 .instance_post_init = riscv_cpu_post_init,
1734 .class_size = sizeof(RISCVCPUClass),
1735 .class_init = riscv_cpu_class_init,
1738 .name = TYPE_RISCV_DYNAMIC_CPU,
1739 .parent = TYPE_RISCV_CPU,
1742 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init),
1743 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, riscv_max_cpu_init),
1744 #if defined(TARGET_RISCV32)
1745 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init),
1746 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init),
1747 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init),
1748 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init),
1749 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init),
1750 #elif defined(TARGET_RISCV64)
1751 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init),
1752 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
1753 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
1754 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
1755 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init),
1756 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init),
1757 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init),
1761 DEFINE_TYPES(riscv_cpu_type_infos)