4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
25 #include "cpu_vendorid.h"
27 #include "internals.h"
28 #include "time_helper.h"
29 #include "exec/exec-all.h"
30 #include "qapi/error.h"
31 #include "qapi/visitor.h"
32 #include "qemu/error-report.h"
33 #include "hw/qdev-properties.h"
34 #include "migration/vmstate.h"
35 #include "fpu/softfloat-helpers.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm_riscv.h"
41 /* RISC-V CPU definitions */
42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH";
47 int ext_enable_offset;
50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
54 * From vector_helper.c
55 * Note that vector data is stored in host-endian 64-bit chunks,
56 * so addressing bytes needs a host-endian fixup.
59 #define BYTE(x) ((x) ^ 7)
65 * Here are the ordering rules of extension naming defined by RISC-V
67 * 1. All extensions should be separated from other multi-letter extensions
69 * 2. The first letter following the 'Z' conventionally indicates the most
70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
71 * If multiple 'Z' extensions are named, they should be ordered first
72 * by category, then alphabetically within a category.
73 * 3. Standard supervisor-level extensions (starts with 'S') should be
74 * listed after standard unprivileged extensions. If multiple
75 * supervisor-level extensions are listed, they should be ordered
77 * 4. Non-standard extensions (starts with 'X') must be listed after all
78 * standard extensions. They must be separated from other multi-letter
79 * extensions by an underscore.
81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
84 static const struct isa_ext_data isa_edata_arr[] = {
85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom),
86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz),
87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr),
89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei),
90 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
91 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
92 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
93 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
94 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
95 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
96 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
97 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
98 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
99 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
100 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
101 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
102 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
103 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
104 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
105 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
106 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
107 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
108 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
109 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
110 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
111 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
112 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
113 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
114 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
115 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
116 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
117 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
118 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
119 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
120 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
121 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
122 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
123 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
124 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
125 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
126 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
127 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
128 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
129 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
130 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
131 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
132 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
133 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
134 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
135 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
136 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
137 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
138 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
139 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
140 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
141 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
142 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
143 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
144 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
145 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
146 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
147 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
148 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
149 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
150 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
151 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
154 static bool isa_ext_is_enabled(RISCVCPU *cpu,
155 const struct isa_ext_data *edata)
157 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
162 static void isa_ext_update_enabled(RISCVCPU *cpu,
163 const struct isa_ext_data *edata, bool en)
165 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
170 const char * const riscv_int_regnames[] = {
171 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
172 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
173 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
174 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
175 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
178 const char * const riscv_int_regnamesh[] = {
179 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
180 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
181 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
182 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
183 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
184 "x30h/t5h", "x31h/t6h"
187 const char * const riscv_fpr_regnames[] = {
188 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
189 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
190 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
191 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
192 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
193 "f30/ft10", "f31/ft11"
196 const char * const riscv_rvv_regnames[] = {
197 "v0", "v1", "v2", "v3", "v4", "v5", "v6",
198 "v7", "v8", "v9", "v10", "v11", "v12", "v13",
199 "v14", "v15", "v16", "v17", "v18", "v19", "v20",
200 "v21", "v22", "v23", "v24", "v25", "v26", "v27",
201 "v28", "v29", "v30", "v31"
204 static const char * const riscv_excp_names[] = {
207 "illegal_instruction",
225 "guest_exec_page_fault",
226 "guest_load_page_fault",
228 "guest_store_page_fault",
231 static const char * const riscv_intr_names[] = {
250 static void riscv_cpu_add_user_properties(Object *obj);
252 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
255 return (cause < ARRAY_SIZE(riscv_intr_names)) ?
256 riscv_intr_names[cause] : "(unknown)";
258 return (cause < ARRAY_SIZE(riscv_excp_names)) ?
259 riscv_excp_names[cause] : "(unknown)";
263 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
265 env->misa_mxl_max = env->misa_mxl = mxl;
266 env->misa_ext_mask = env->misa_ext = ext;
269 #ifndef CONFIG_USER_ONLY
270 static uint8_t satp_mode_from_str(const char *satp_mode_str)
272 if (!strncmp(satp_mode_str, "mbare", 5)) {
273 return VM_1_10_MBARE;
276 if (!strncmp(satp_mode_str, "sv32", 4)) {
280 if (!strncmp(satp_mode_str, "sv39", 4)) {
284 if (!strncmp(satp_mode_str, "sv48", 4)) {
288 if (!strncmp(satp_mode_str, "sv57", 4)) {
292 if (!strncmp(satp_mode_str, "sv64", 4)) {
296 g_assert_not_reached();
299 uint8_t satp_mode_max_from_map(uint32_t map)
301 /* map here has at least one bit set, so no problem with clz */
302 return 31 - __builtin_clz(map);
305 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
329 g_assert_not_reached();
332 static void set_satp_mode_max_supported(RISCVCPU *cpu,
335 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
336 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
338 for (int i = 0; i <= satp_mode; ++i) {
340 cpu->cfg.satp_mode.supported |= (1 << i);
345 /* Set the satp mode to the max supported */
346 static void set_satp_mode_default_map(RISCVCPU *cpu)
348 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
352 static void riscv_any_cpu_init(Object *obj)
354 RISCVCPU *cpu = RISCV_CPU(obj);
355 CPURISCVState *env = &cpu->env;
356 #if defined(TARGET_RISCV32)
357 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
358 #elif defined(TARGET_RISCV64)
359 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
362 #ifndef CONFIG_USER_ONLY
363 set_satp_mode_max_supported(RISCV_CPU(obj),
364 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
365 VM_1_10_SV32 : VM_1_10_SV57);
368 env->priv_ver = PRIV_VERSION_LATEST;
370 /* inherited from parent obj via riscv_cpu_init() */
371 cpu->cfg.ext_ifencei = true;
372 cpu->cfg.ext_icsr = true;
377 #if defined(TARGET_RISCV64)
378 static void rv64_base_cpu_init(Object *obj)
380 CPURISCVState *env = &RISCV_CPU(obj)->env;
381 /* We set this in the realise function */
382 set_misa(env, MXL_RV64, 0);
383 riscv_cpu_add_user_properties(obj);
384 /* Set latest version of privileged specification */
385 env->priv_ver = PRIV_VERSION_LATEST;
386 #ifndef CONFIG_USER_ONLY
387 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
391 static void rv64_sifive_u_cpu_init(Object *obj)
393 RISCVCPU *cpu = RISCV_CPU(obj);
394 CPURISCVState *env = &cpu->env;
395 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
396 env->priv_ver = PRIV_VERSION_1_10_0;
397 #ifndef CONFIG_USER_ONLY
398 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
401 /* inherited from parent obj via riscv_cpu_init() */
402 cpu->cfg.ext_ifencei = true;
403 cpu->cfg.ext_icsr = true;
408 static void rv64_sifive_e_cpu_init(Object *obj)
410 CPURISCVState *env = &RISCV_CPU(obj)->env;
411 RISCVCPU *cpu = RISCV_CPU(obj);
413 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
414 env->priv_ver = PRIV_VERSION_1_10_0;
415 #ifndef CONFIG_USER_ONLY
416 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
419 /* inherited from parent obj via riscv_cpu_init() */
420 cpu->cfg.ext_ifencei = true;
421 cpu->cfg.ext_icsr = true;
425 static void rv64_thead_c906_cpu_init(Object *obj)
427 CPURISCVState *env = &RISCV_CPU(obj)->env;
428 RISCVCPU *cpu = RISCV_CPU(obj);
430 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
431 env->priv_ver = PRIV_VERSION_1_11_0;
433 cpu->cfg.ext_zfa = true;
434 cpu->cfg.ext_zfh = true;
436 cpu->cfg.ext_xtheadba = true;
437 cpu->cfg.ext_xtheadbb = true;
438 cpu->cfg.ext_xtheadbs = true;
439 cpu->cfg.ext_xtheadcmo = true;
440 cpu->cfg.ext_xtheadcondmov = true;
441 cpu->cfg.ext_xtheadfmemidx = true;
442 cpu->cfg.ext_xtheadmac = true;
443 cpu->cfg.ext_xtheadmemidx = true;
444 cpu->cfg.ext_xtheadmempair = true;
445 cpu->cfg.ext_xtheadsync = true;
447 cpu->cfg.mvendorid = THEAD_VENDOR_ID;
448 #ifndef CONFIG_USER_ONLY
449 set_satp_mode_max_supported(cpu, VM_1_10_SV39);
452 /* inherited from parent obj via riscv_cpu_init() */
456 static void rv64_veyron_v1_cpu_init(Object *obj)
458 CPURISCVState *env = &RISCV_CPU(obj)->env;
459 RISCVCPU *cpu = RISCV_CPU(obj);
461 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH);
462 env->priv_ver = PRIV_VERSION_1_12_0;
464 /* Enable ISA extensions */
466 cpu->cfg.ext_ifencei = true;
467 cpu->cfg.ext_icsr = true;
469 cpu->cfg.ext_icbom = true;
470 cpu->cfg.cbom_blocksize = 64;
471 cpu->cfg.cboz_blocksize = 64;
472 cpu->cfg.ext_icboz = true;
473 cpu->cfg.ext_smaia = true;
474 cpu->cfg.ext_ssaia = true;
475 cpu->cfg.ext_sscofpmf = true;
476 cpu->cfg.ext_sstc = true;
477 cpu->cfg.ext_svinval = true;
478 cpu->cfg.ext_svnapot = true;
479 cpu->cfg.ext_svpbmt = true;
480 cpu->cfg.ext_smstateen = true;
481 cpu->cfg.ext_zba = true;
482 cpu->cfg.ext_zbb = true;
483 cpu->cfg.ext_zbc = true;
484 cpu->cfg.ext_zbs = true;
485 cpu->cfg.ext_XVentanaCondOps = true;
487 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
488 cpu->cfg.marchid = VEYRON_V1_MARCHID;
489 cpu->cfg.mimpid = VEYRON_V1_MIMPID;
491 #ifndef CONFIG_USER_ONLY
492 set_satp_mode_max_supported(cpu, VM_1_10_SV48);
496 static void rv128_base_cpu_init(Object *obj)
498 if (qemu_tcg_mttcg_enabled()) {
499 /* Missing 128-bit aligned atomics */
500 error_report("128-bit RISC-V currently does not work with Multi "
501 "Threaded TCG. Please use: -accel tcg,thread=single");
504 CPURISCVState *env = &RISCV_CPU(obj)->env;
505 /* We set this in the realise function */
506 set_misa(env, MXL_RV128, 0);
507 riscv_cpu_add_user_properties(obj);
508 /* Set latest version of privileged specification */
509 env->priv_ver = PRIV_VERSION_LATEST;
510 #ifndef CONFIG_USER_ONLY
511 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
515 static void rv32_base_cpu_init(Object *obj)
517 CPURISCVState *env = &RISCV_CPU(obj)->env;
518 /* We set this in the realise function */
519 set_misa(env, MXL_RV32, 0);
520 riscv_cpu_add_user_properties(obj);
521 /* Set latest version of privileged specification */
522 env->priv_ver = PRIV_VERSION_LATEST;
523 #ifndef CONFIG_USER_ONLY
524 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
528 static void rv32_sifive_u_cpu_init(Object *obj)
530 RISCVCPU *cpu = RISCV_CPU(obj);
531 CPURISCVState *env = &cpu->env;
532 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
533 env->priv_ver = PRIV_VERSION_1_10_0;
534 #ifndef CONFIG_USER_ONLY
535 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
538 /* inherited from parent obj via riscv_cpu_init() */
539 cpu->cfg.ext_ifencei = true;
540 cpu->cfg.ext_icsr = true;
545 static void rv32_sifive_e_cpu_init(Object *obj)
547 CPURISCVState *env = &RISCV_CPU(obj)->env;
548 RISCVCPU *cpu = RISCV_CPU(obj);
550 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
551 env->priv_ver = PRIV_VERSION_1_10_0;
552 #ifndef CONFIG_USER_ONLY
553 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
556 /* inherited from parent obj via riscv_cpu_init() */
557 cpu->cfg.ext_ifencei = true;
558 cpu->cfg.ext_icsr = true;
562 static void rv32_ibex_cpu_init(Object *obj)
564 CPURISCVState *env = &RISCV_CPU(obj)->env;
565 RISCVCPU *cpu = RISCV_CPU(obj);
567 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
568 env->priv_ver = PRIV_VERSION_1_11_0;
569 #ifndef CONFIG_USER_ONLY
570 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
572 cpu->cfg.epmp = true;
574 /* inherited from parent obj via riscv_cpu_init() */
575 cpu->cfg.ext_ifencei = true;
576 cpu->cfg.ext_icsr = true;
580 static void rv32_imafcu_nommu_cpu_init(Object *obj)
582 CPURISCVState *env = &RISCV_CPU(obj)->env;
583 RISCVCPU *cpu = RISCV_CPU(obj);
585 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
586 env->priv_ver = PRIV_VERSION_1_10_0;
587 #ifndef CONFIG_USER_ONLY
588 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
591 /* inherited from parent obj via riscv_cpu_init() */
592 cpu->cfg.ext_ifencei = true;
593 cpu->cfg.ext_icsr = true;
598 #if defined(CONFIG_KVM)
599 static void riscv_host_cpu_init(Object *obj)
601 CPURISCVState *env = &RISCV_CPU(obj)->env;
602 #if defined(TARGET_RISCV32)
603 set_misa(env, MXL_RV32, 0);
604 #elif defined(TARGET_RISCV64)
605 set_misa(env, MXL_RV64, 0);
607 riscv_cpu_add_user_properties(obj);
609 #endif /* CONFIG_KVM */
611 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
617 cpuname = g_strsplit(cpu_model, ",", 1);
618 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
619 oc = object_class_by_name(typename);
622 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) ||
623 object_class_is_abstract(oc)) {
629 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
631 RISCVCPU *cpu = RISCV_CPU(cs);
632 CPURISCVState *env = &cpu->env;
636 #if !defined(CONFIG_USER_ONLY)
637 if (riscv_has_ext(env, RVH)) {
638 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled);
641 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
642 #ifndef CONFIG_USER_ONLY
644 static const int dump_csrs[] = {
649 * CSR_SSTATUS is intentionally omitted here as its value
650 * can be figured out by looking at CSR_MSTATUS
685 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
686 int csrno = dump_csrs[i];
687 target_ulong val = 0;
688 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
691 * Rely on the smode, hmode, etc, predicates within csr.c
692 * to do the filtering of the registers that are present.
694 if (res == RISCV_EXCP_NONE) {
695 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
696 csr_ops[csrno].name, val);
702 for (i = 0; i < 32; i++) {
703 qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
704 riscv_int_regnames[i], env->gpr[i]);
706 qemu_fprintf(f, "\n");
709 if (flags & CPU_DUMP_FPU) {
710 for (i = 0; i < 32; i++) {
711 qemu_fprintf(f, " %-8s %016" PRIx64,
712 riscv_fpr_regnames[i], env->fpr[i]);
714 qemu_fprintf(f, "\n");
718 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
719 static const int dump_rvv_csrs[] = {
728 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
729 int csrno = dump_rvv_csrs[i];
730 target_ulong val = 0;
731 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
734 * Rely on the smode, hmode, etc, predicates within csr.c
735 * to do the filtering of the registers that are present.
737 if (res == RISCV_EXCP_NONE) {
738 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
739 csr_ops[csrno].name, val);
742 uint16_t vlenb = cpu->cfg.vlen >> 3;
744 for (i = 0; i < 32; i++) {
745 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
746 p = (uint8_t *)env->vreg;
747 for (j = vlenb - 1 ; j >= 0; j--) {
748 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
750 qemu_fprintf(f, "\n");
755 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
757 RISCVCPU *cpu = RISCV_CPU(cs);
758 CPURISCVState *env = &cpu->env;
760 if (env->xl == MXL_RV32) {
761 env->pc = (int32_t)value;
767 static vaddr riscv_cpu_get_pc(CPUState *cs)
769 RISCVCPU *cpu = RISCV_CPU(cs);
770 CPURISCVState *env = &cpu->env;
772 /* Match cpu_get_tb_cpu_state. */
773 if (env->xl == MXL_RV32) {
774 return env->pc & UINT32_MAX;
779 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
780 const TranslationBlock *tb)
782 if (!(tb_cflags(tb) & CF_PCREL)) {
783 RISCVCPU *cpu = RISCV_CPU(cs);
784 CPURISCVState *env = &cpu->env;
785 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
787 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
789 if (xl == MXL_RV32) {
790 env->pc = (int32_t) tb->pc;
797 static bool riscv_cpu_has_work(CPUState *cs)
799 #ifndef CONFIG_USER_ONLY
800 RISCVCPU *cpu = RISCV_CPU(cs);
801 CPURISCVState *env = &cpu->env;
803 * Definition of the WFI instruction requires it to ignore the privilege
804 * mode and delegation registers, but respect individual enables
806 return riscv_cpu_all_pending(env) != 0;
812 static void riscv_restore_state_to_opc(CPUState *cs,
813 const TranslationBlock *tb,
814 const uint64_t *data)
816 RISCVCPU *cpu = RISCV_CPU(cs);
817 CPURISCVState *env = &cpu->env;
818 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
821 if (tb_cflags(tb) & CF_PCREL) {
822 pc = (env->pc & TARGET_PAGE_MASK) | data[0];
827 if (xl == MXL_RV32) {
828 env->pc = (int32_t)pc;
835 static void riscv_cpu_reset_hold(Object *obj)
837 #ifndef CONFIG_USER_ONLY
841 CPUState *cs = CPU(obj);
842 RISCVCPU *cpu = RISCV_CPU(cs);
843 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
844 CPURISCVState *env = &cpu->env;
846 if (mcc->parent_phases.hold) {
847 mcc->parent_phases.hold(obj);
849 #ifndef CONFIG_USER_ONLY
850 env->misa_mxl = env->misa_mxl_max;
852 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
853 if (env->misa_mxl > MXL_RV32) {
855 * The reset status of SXL/UXL is undefined, but mstatus is WARL
856 * and we must ensure that the value after init is valid for read.
858 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
859 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
860 if (riscv_has_ext(env, RVH)) {
861 env->vsstatus = set_field(env->vsstatus,
862 MSTATUS64_SXL, env->misa_mxl);
863 env->vsstatus = set_field(env->vsstatus,
864 MSTATUS64_UXL, env->misa_mxl);
865 env->mstatus_hs = set_field(env->mstatus_hs,
866 MSTATUS64_SXL, env->misa_mxl);
867 env->mstatus_hs = set_field(env->mstatus_hs,
868 MSTATUS64_UXL, env->misa_mxl);
872 env->miclaim = MIP_SGEIP;
873 env->pc = env->resetvec;
875 env->two_stage_lookup = false;
877 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
878 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0);
879 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
880 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0);
882 /* Initialized default priorities of local interrupts. */
883 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
884 iprio = riscv_cpu_default_priority(i);
885 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
886 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
890 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
892 env->hviprio[irq] = env->miprio[irq];
896 /* mmte is supposed to have pm.current hardwired to 1 */
897 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
899 env->xl = riscv_cpu_mxl(env);
900 riscv_cpu_update_mask(env);
901 cs->exception_index = RISCV_EXCP_NONE;
903 set_default_nan_mode(1, &env->fp_status);
905 #ifndef CONFIG_USER_ONLY
906 if (cpu->cfg.debug) {
907 riscv_trigger_init(env);
911 kvm_riscv_reset_vcpu(cpu);
916 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
918 RISCVCPU *cpu = RISCV_CPU(s);
919 CPURISCVState *env = &cpu->env;
920 info->target_info = &cpu->cfg;
924 info->print_insn = print_insn_riscv32;
927 info->print_insn = print_insn_riscv64;
930 info->print_insn = print_insn_riscv128;
933 g_assert_not_reached();
937 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg,
940 int vext_version = VEXT_VERSION_1_00_0;
942 if (!is_power_of_2(cfg->vlen)) {
943 error_setg(errp, "Vector extension VLEN must be power of 2");
946 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) {
948 "Vector extension implementation only supports VLEN "
949 "in the range [128, %d]", RV_VLEN_MAX);
952 if (!is_power_of_2(cfg->elen)) {
953 error_setg(errp, "Vector extension ELEN must be power of 2");
956 if (cfg->elen > 64 || cfg->elen < 8) {
958 "Vector extension implementation only supports ELEN "
959 "in the range [8, 64]");
962 if (cfg->vext_spec) {
963 if (!g_strcmp0(cfg->vext_spec, "v1.0")) {
964 vext_version = VEXT_VERSION_1_00_0;
966 error_setg(errp, "Unsupported vector spec version '%s'",
971 qemu_log("vector version is not specified, "
972 "use the default value v1.0\n");
974 env->vext_ver = vext_version;
977 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp)
979 CPURISCVState *env = &cpu->env;
980 int priv_version = -1;
982 if (cpu->cfg.priv_spec) {
983 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) {
984 priv_version = PRIV_VERSION_1_12_0;
985 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
986 priv_version = PRIV_VERSION_1_11_0;
987 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
988 priv_version = PRIV_VERSION_1_10_0;
991 "Unsupported privilege spec version '%s'",
996 env->priv_ver = priv_version;
1000 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
1002 CPURISCVState *env = &cpu->env;
1005 /* Force disable extensions if priv spec version does not match */
1006 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
1007 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) &&
1008 (env->priv_ver < isa_edata_arr[i].min_version)) {
1009 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false);
1010 #ifndef CONFIG_USER_ONLY
1011 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
1012 " because privilege spec version does not match",
1013 isa_edata_arr[i].name, env->mhartid);
1015 warn_report("disabling %s extension because "
1016 "privilege spec version does not match",
1017 isa_edata_arr[i].name);
1023 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp)
1025 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
1026 CPUClass *cc = CPU_CLASS(mcc);
1027 CPURISCVState *env = &cpu->env;
1029 /* Validate that MISA_MXL is set properly. */
1030 switch (env->misa_mxl_max) {
1031 #ifdef TARGET_RISCV64
1034 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1038 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1041 g_assert_not_reached();
1044 if (env->misa_mxl_max != env->misa_mxl) {
1045 error_setg(errp, "misa_mxl_max must be equal to misa_mxl");
1051 * Check consistency between chosen extensions while setting
1052 * cpu->cfg accordingly.
1054 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
1056 CPURISCVState *env = &cpu->env;
1057 Error *local_err = NULL;
1059 /* Do some ISA extension error checking */
1060 if (riscv_has_ext(env, RVG) &&
1061 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) &&
1062 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) &&
1063 riscv_has_ext(env, RVD) &&
1064 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) {
1065 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei");
1066 cpu->cfg.ext_icsr = true;
1067 cpu->cfg.ext_ifencei = true;
1069 env->misa_ext |= RVI | RVM | RVA | RVF | RVD;
1070 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD;
1073 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
1075 "I and E extensions are incompatible");
1079 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) {
1081 "Either I or E extension must be set");
1085 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) {
1087 "Setting S extension without U extension is illegal");
1091 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) {
1093 "H depends on an I base integer ISA with 32 x registers");
1097 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) {
1098 error_setg(errp, "H extension implicitly requires S-mode");
1102 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) {
1103 error_setg(errp, "F extension requires Zicsr");
1107 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) {
1108 error_setg(errp, "Zawrs extension requires A extension");
1112 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) {
1113 error_setg(errp, "Zfa extension requires F extension");
1117 if (cpu->cfg.ext_zfh) {
1118 cpu->cfg.ext_zfhmin = true;
1121 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) {
1122 error_setg(errp, "Zfh/Zfhmin extensions require F extension");
1126 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) {
1127 error_setg(errp, "Zfbfmin extension depends on F extension");
1131 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) {
1132 error_setg(errp, "D extension requires F extension");
1136 if (riscv_has_ext(env, RVV)) {
1137 riscv_cpu_validate_v(env, &cpu->cfg, &local_err);
1138 if (local_err != NULL) {
1139 error_propagate(errp, local_err);
1143 /* The V vector extension depends on the Zve64d extension */
1144 cpu->cfg.ext_zve64d = true;
1147 /* The Zve64d extension depends on the Zve64f extension */
1148 if (cpu->cfg.ext_zve64d) {
1149 cpu->cfg.ext_zve64f = true;
1152 /* The Zve64f extension depends on the Zve32f extension */
1153 if (cpu->cfg.ext_zve64f) {
1154 cpu->cfg.ext_zve32f = true;
1157 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) {
1158 error_setg(errp, "Zve64d/V extensions require D extension");
1162 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) {
1163 error_setg(errp, "Zve32f/Zve64f extensions require F extension");
1167 if (cpu->cfg.ext_zvfh) {
1168 cpu->cfg.ext_zvfhmin = true;
1171 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
1172 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
1176 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
1177 error_setg(errp, "Zvfh extensions requires Zfhmin extension");
1181 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) {
1182 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension");
1186 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) {
1187 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension");
1191 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) {
1192 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension");
1196 /* Set the ISA extensions, checks should have happened above */
1197 if (cpu->cfg.ext_zhinx) {
1198 cpu->cfg.ext_zhinxmin = true;
1201 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) {
1202 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
1206 if (cpu->cfg.ext_zfinx) {
1207 if (!cpu->cfg.ext_icsr) {
1208 error_setg(errp, "Zfinx extension requires Zicsr");
1211 if (riscv_has_ext(env, RVF)) {
1213 "Zfinx cannot be supported together with F extension");
1218 if (cpu->cfg.ext_zce) {
1219 cpu->cfg.ext_zca = true;
1220 cpu->cfg.ext_zcb = true;
1221 cpu->cfg.ext_zcmp = true;
1222 cpu->cfg.ext_zcmt = true;
1223 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) {
1224 cpu->cfg.ext_zcf = true;
1228 if (riscv_has_ext(env, RVC)) {
1229 cpu->cfg.ext_zca = true;
1230 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) {
1231 cpu->cfg.ext_zcf = true;
1233 if (riscv_has_ext(env, RVD)) {
1234 cpu->cfg.ext_zcd = true;
1238 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
1239 error_setg(errp, "Zcf extension is only relevant to RV32");
1243 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) {
1244 error_setg(errp, "Zcf extension requires F extension");
1248 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) {
1249 error_setg(errp, "Zcd extension requires D extension");
1253 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb ||
1254 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) {
1255 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
1260 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) {
1261 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with "
1266 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) {
1267 error_setg(errp, "Zcmt extension requires Zicsr extension");
1271 if (cpu->cfg.ext_zk) {
1272 cpu->cfg.ext_zkn = true;
1273 cpu->cfg.ext_zkr = true;
1274 cpu->cfg.ext_zkt = true;
1277 if (cpu->cfg.ext_zkn) {
1278 cpu->cfg.ext_zbkb = true;
1279 cpu->cfg.ext_zbkc = true;
1280 cpu->cfg.ext_zbkx = true;
1281 cpu->cfg.ext_zkne = true;
1282 cpu->cfg.ext_zknd = true;
1283 cpu->cfg.ext_zknh = true;
1286 if (cpu->cfg.ext_zks) {
1287 cpu->cfg.ext_zbkb = true;
1288 cpu->cfg.ext_zbkc = true;
1289 cpu->cfg.ext_zbkx = true;
1290 cpu->cfg.ext_zksed = true;
1291 cpu->cfg.ext_zksh = true;
1295 * Disable isa extensions based on priv spec after we
1296 * validated and set everything we need.
1298 riscv_cpu_disable_priv_spec_isa_exts(cpu);
1301 #ifndef CONFIG_USER_ONLY
1302 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1304 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
1305 uint8_t satp_mode_map_max;
1306 uint8_t satp_mode_supported_max =
1307 satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1309 if (cpu->cfg.satp_mode.map == 0) {
1310 if (cpu->cfg.satp_mode.init == 0) {
1311 /* If unset by the user, we fallback to the default satp mode. */
1312 set_satp_mode_default_map(cpu);
1315 * Find the lowest level that was disabled and then enable the
1316 * first valid level below which can be found in
1317 * valid_vm_1_10_32/64.
1319 for (int i = 1; i < 16; ++i) {
1320 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1321 (cpu->cfg.satp_mode.supported & (1 << i))) {
1322 for (int j = i - 1; j >= 0; --j) {
1323 if (cpu->cfg.satp_mode.supported & (1 << j)) {
1324 cpu->cfg.satp_mode.map |= (1 << j);
1334 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1336 /* Make sure the user asked for a supported configuration (HW and qemu) */
1337 if (satp_mode_map_max > satp_mode_supported_max) {
1338 error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1339 satp_mode_str(satp_mode_map_max, rv32),
1340 satp_mode_str(satp_mode_supported_max, rv32));
1345 * Make sure the user did not ask for an invalid configuration as per
1346 * the specification.
1349 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1350 if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1351 (cpu->cfg.satp_mode.init & (1 << i)) &&
1352 (cpu->cfg.satp_mode.supported & (1 << i))) {
1353 error_setg(errp, "cannot disable %s satp mode if %s "
1354 "is enabled", satp_mode_str(i, false),
1355 satp_mode_str(satp_mode_map_max, false));
1361 /* Finally expand the map so that all valid modes are set */
1362 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1363 if (cpu->cfg.satp_mode.supported & (1 << i)) {
1364 cpu->cfg.satp_mode.map |= (1 << i);
1370 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1372 #ifndef CONFIG_USER_ONLY
1373 Error *local_err = NULL;
1375 riscv_cpu_satp_mode_finalize(cpu, &local_err);
1376 if (local_err != NULL) {
1377 error_propagate(errp, local_err);
1383 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp)
1385 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) {
1386 error_setg(errp, "H extension requires priv spec 1.12.0");
1391 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp)
1393 RISCVCPU *cpu = RISCV_CPU(dev);
1394 CPURISCVState *env = &cpu->env;
1395 Error *local_err = NULL;
1397 riscv_cpu_validate_misa_mxl(cpu, &local_err);
1398 if (local_err != NULL) {
1399 error_propagate(errp, local_err);
1403 riscv_cpu_validate_priv_spec(cpu, &local_err);
1404 if (local_err != NULL) {
1405 error_propagate(errp, local_err);
1409 riscv_cpu_validate_misa_priv(env, &local_err);
1410 if (local_err != NULL) {
1411 error_propagate(errp, local_err);
1415 if (cpu->cfg.epmp && !cpu->cfg.pmp) {
1417 * Enhanced PMP should only be available
1418 * on harts with PMP support
1420 error_setg(errp, "Invalid configuration: EPMP requires PMP support");
1424 riscv_cpu_validate_set_extensions(cpu, &local_err);
1425 if (local_err != NULL) {
1426 error_propagate(errp, local_err);
1430 #ifndef CONFIG_USER_ONLY
1431 CPU(dev)->tcg_cflags |= CF_PCREL;
1433 if (cpu->cfg.ext_sstc) {
1434 riscv_timer_init(cpu);
1437 if (cpu->cfg.pmu_num) {
1438 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) {
1439 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1440 riscv_pmu_timer_cb, cpu);
1446 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1448 CPUState *cs = CPU(dev);
1449 RISCVCPU *cpu = RISCV_CPU(dev);
1450 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1451 Error *local_err = NULL;
1453 cpu_exec_realizefn(cs, &local_err);
1454 if (local_err != NULL) {
1455 error_propagate(errp, local_err);
1459 if (tcg_enabled()) {
1460 riscv_cpu_realize_tcg(dev, &local_err);
1461 if (local_err != NULL) {
1462 error_propagate(errp, local_err);
1467 riscv_cpu_finalize_features(cpu, &local_err);
1468 if (local_err != NULL) {
1469 error_propagate(errp, local_err);
1473 riscv_cpu_register_gdb_regs_for_features(cs);
1478 mcc->parent_realize(dev, errp);
1481 #ifndef CONFIG_USER_ONLY
1482 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1483 void *opaque, Error **errp)
1485 RISCVSATPMap *satp_map = opaque;
1486 uint8_t satp = satp_mode_from_str(name);
1489 value = satp_map->map & (1 << satp);
1491 visit_type_bool(v, name, &value, errp);
1494 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1495 void *opaque, Error **errp)
1497 RISCVSATPMap *satp_map = opaque;
1498 uint8_t satp = satp_mode_from_str(name);
1501 if (!visit_type_bool(v, name, &value, errp)) {
1505 satp_map->map = deposit32(satp_map->map, satp, 1, value);
1506 satp_map->init |= 1 << satp;
1509 static void riscv_add_satp_mode_properties(Object *obj)
1511 RISCVCPU *cpu = RISCV_CPU(obj);
1513 if (cpu->env.misa_mxl == MXL_RV32) {
1514 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1515 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1517 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1518 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1519 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1520 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1521 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1522 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1523 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1524 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1528 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1530 RISCVCPU *cpu = RISCV_CPU(opaque);
1531 CPURISCVState *env = &cpu->env;
1533 if (irq < IRQ_LOCAL_MAX) {
1546 if (kvm_enabled()) {
1547 kvm_riscv_set_irq(cpu, irq, level);
1549 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1553 if (kvm_enabled()) {
1554 kvm_riscv_set_irq(cpu, irq, level);
1556 env->external_seip = level;
1557 riscv_cpu_update_mip(env, 1 << irq,
1558 BOOL_TO_MASK(level | env->software_seip));
1562 g_assert_not_reached();
1564 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1565 /* Require H-extension for handling guest local interrupts */
1566 if (!riscv_has_ext(env, RVH)) {
1567 g_assert_not_reached();
1570 /* Compute bit position in HGEIP CSR */
1571 irq = irq - IRQ_LOCAL_MAX + 1;
1572 if (env->geilen < irq) {
1573 g_assert_not_reached();
1576 /* Update HGEIP CSR */
1577 env->hgeip &= ~((target_ulong)1 << irq);
1579 env->hgeip |= (target_ulong)1 << irq;
1582 /* Update mip.SGEIP bit */
1583 riscv_cpu_update_mip(env, MIP_SGEIP,
1584 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1586 g_assert_not_reached();
1589 #endif /* CONFIG_USER_ONLY */
1591 static void riscv_cpu_init(Object *obj)
1593 RISCVCPU *cpu = RISCV_CPU(obj);
1595 cpu_set_cpustate_pointers(cpu);
1597 #ifndef CONFIG_USER_ONLY
1598 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq,
1599 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1600 #endif /* CONFIG_USER_ONLY */
1603 typedef struct RISCVCPUMisaExtConfig {
1605 const char *description;
1606 target_ulong misa_bit;
1608 } RISCVCPUMisaExtConfig;
1610 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1611 void *opaque, Error **errp)
1613 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1614 target_ulong misa_bit = misa_ext_cfg->misa_bit;
1615 RISCVCPU *cpu = RISCV_CPU(obj);
1616 CPURISCVState *env = &cpu->env;
1619 if (!visit_type_bool(v, name, &value, errp)) {
1624 env->misa_ext |= misa_bit;
1625 env->misa_ext_mask |= misa_bit;
1627 env->misa_ext &= ~misa_bit;
1628 env->misa_ext_mask &= ~misa_bit;
1632 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1633 void *opaque, Error **errp)
1635 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1636 target_ulong misa_bit = misa_ext_cfg->misa_bit;
1637 RISCVCPU *cpu = RISCV_CPU(obj);
1638 CPURISCVState *env = &cpu->env;
1641 value = env->misa_ext & misa_bit;
1643 visit_type_bool(v, name, &value, errp);
1646 typedef struct misa_ext_info {
1648 const char *description;
1651 #define MISA_INFO_IDX(_bit) \
1654 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1655 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1657 static const MISAExtInfo misa_ext_info_arr[] = {
1658 MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1659 MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1660 MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1661 MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1662 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1663 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1664 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1665 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1666 MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1667 MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1668 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1669 MISA_EXT_INFO(RVV, "v", "Vector operations"),
1670 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1673 static int riscv_validate_misa_info_idx(uint32_t bit)
1678 * Our lowest valid input (RVA) is 1 and
1679 * __builtin_ctz() is UB with zero.
1682 idx = MISA_INFO_IDX(bit);
1684 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1688 const char *riscv_get_misa_ext_name(uint32_t bit)
1690 int idx = riscv_validate_misa_info_idx(bit);
1691 const char *val = misa_ext_info_arr[idx].name;
1693 g_assert(val != NULL);
1697 const char *riscv_get_misa_ext_description(uint32_t bit)
1699 int idx = riscv_validate_misa_info_idx(bit);
1700 const char *val = misa_ext_info_arr[idx].description;
1702 g_assert(val != NULL);
1706 #define MISA_CFG(_bit, _enabled) \
1707 {.misa_bit = _bit, .enabled = _enabled}
1709 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
1710 MISA_CFG(RVA, true),
1711 MISA_CFG(RVC, true),
1712 MISA_CFG(RVD, true),
1713 MISA_CFG(RVF, true),
1714 MISA_CFG(RVI, true),
1715 MISA_CFG(RVE, false),
1716 MISA_CFG(RVM, true),
1717 MISA_CFG(RVS, true),
1718 MISA_CFG(RVU, true),
1719 MISA_CFG(RVH, true),
1720 MISA_CFG(RVJ, false),
1721 MISA_CFG(RVV, false),
1722 MISA_CFG(RVG, false),
1725 static void riscv_cpu_add_misa_properties(Object *cpu_obj)
1729 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) {
1730 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
1731 int bit = misa_cfg->misa_bit;
1733 misa_cfg->name = riscv_get_misa_ext_name(bit);
1734 misa_cfg->description = riscv_get_misa_ext_description(bit);
1736 /* Check if KVM already created the property */
1737 if (object_property_find(cpu_obj, misa_cfg->name)) {
1741 object_property_add(cpu_obj, misa_cfg->name, "bool",
1742 cpu_get_misa_ext_cfg,
1743 cpu_set_misa_ext_cfg,
1744 NULL, (void *)misa_cfg);
1745 object_property_set_description(cpu_obj, misa_cfg->name,
1746 misa_cfg->description);
1747 object_property_set_bool(cpu_obj, misa_cfg->name,
1748 misa_cfg->enabled, NULL);
1752 static Property riscv_cpu_extensions[] = {
1753 /* Defaults for standard extensions */
1754 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16),
1755 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false),
1756 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
1757 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
1758 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
1759 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true),
1760 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true),
1761 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
1762 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
1763 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
1764 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false),
1765 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false),
1766 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
1767 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
1768 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true),
1770 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
1771 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
1772 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1773 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1775 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false),
1776 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true),
1777 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false),
1778 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false),
1779 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false),
1781 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true),
1782 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true),
1783 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
1784 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false),
1785 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false),
1786 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false),
1787 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
1788 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false),
1789 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false),
1790 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false),
1791 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false),
1792 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false),
1793 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false),
1794 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false),
1795 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false),
1796 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false),
1797 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false),
1799 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false),
1800 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false),
1801 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false),
1802 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false),
1804 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true),
1805 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
1806 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true),
1807 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1809 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false),
1811 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false),
1812 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false),
1813 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false),
1814 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false),
1815 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false),
1816 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false),
1817 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false),
1819 /* Vendor-specific custom extensions */
1820 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false),
1821 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false),
1822 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false),
1823 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false),
1824 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false),
1825 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false),
1826 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false),
1827 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false),
1828 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false),
1829 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false),
1830 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false),
1831 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
1833 /* These are experimental so mark with 'x-' */
1834 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false),
1837 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
1838 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false),
1839 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false),
1841 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false),
1842 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false),
1844 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false),
1845 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false),
1846 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false),
1848 DEFINE_PROP_END_OF_LIST(),
1852 #ifndef CONFIG_USER_ONLY
1853 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
1855 void *opaque, Error **errp)
1857 const char *propname = opaque;
1860 if (!visit_type_bool(v, name, &value, errp)) {
1865 error_setg(errp, "extension %s is not available with KVM",
1872 * Add CPU properties with user-facing flags.
1874 * This will overwrite existing env->misa_ext values with the
1875 * defaults set via riscv_cpu_add_misa_properties().
1877 static void riscv_cpu_add_user_properties(Object *obj)
1880 DeviceState *dev = DEVICE(obj);
1882 #ifndef CONFIG_USER_ONLY
1883 riscv_add_satp_mode_properties(obj);
1885 if (kvm_enabled()) {
1886 kvm_riscv_init_user_properties(obj);
1890 riscv_cpu_add_misa_properties(obj);
1892 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1893 #ifndef CONFIG_USER_ONLY
1894 if (kvm_enabled()) {
1895 /* Check if KVM created the property already */
1896 if (object_property_find(obj, prop->name)) {
1901 * Set the default to disabled for every extension
1902 * unknown to KVM and error out if the user attempts
1903 * to enable any of them.
1905 * We're giving a pass for non-bool properties since they're
1906 * not related to the availability of extensions and can be
1907 * safely ignored as is.
1909 if (prop->info == &qdev_prop_bool) {
1910 object_property_add(obj, prop->name, "bool",
1911 NULL, cpu_set_cfg_unavailable,
1912 NULL, (void *)prop->name);
1917 qdev_property_add_static(dev, prop);
1921 static Property riscv_cpu_properties[] = {
1922 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1924 #ifndef CONFIG_USER_ONLY
1925 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1928 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1930 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1931 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1934 * write_misa() is marked as experimental for now so mark
1935 * it with -x and default to 'false'.
1937 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1938 DEFINE_PROP_END_OF_LIST(),
1941 static gchar *riscv_gdb_arch_name(CPUState *cs)
1943 RISCVCPU *cpu = RISCV_CPU(cs);
1944 CPURISCVState *env = &cpu->env;
1946 switch (riscv_cpu_mxl(env)) {
1948 return g_strdup("riscv:rv32");
1951 return g_strdup("riscv:rv64");
1953 g_assert_not_reached();
1957 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1959 RISCVCPU *cpu = RISCV_CPU(cs);
1961 if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1962 return cpu->dyn_csr_xml;
1963 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1964 return cpu->dyn_vreg_xml;
1970 #ifndef CONFIG_USER_ONLY
1971 static int64_t riscv_get_arch_id(CPUState *cs)
1973 RISCVCPU *cpu = RISCV_CPU(cs);
1975 return cpu->env.mhartid;
1978 #include "hw/core/sysemu-cpu-ops.h"
1980 static const struct SysemuCPUOps riscv_sysemu_ops = {
1981 .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1982 .write_elf64_note = riscv_cpu_write_elf64_note,
1983 .write_elf32_note = riscv_cpu_write_elf32_note,
1984 .legacy_vmsd = &vmstate_riscv_cpu,
1988 #include "hw/core/tcg-cpu-ops.h"
1990 static const struct TCGCPUOps riscv_tcg_ops = {
1991 .initialize = riscv_translate_init,
1992 .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
1993 .restore_state_to_opc = riscv_restore_state_to_opc,
1995 #ifndef CONFIG_USER_ONLY
1996 .tlb_fill = riscv_cpu_tlb_fill,
1997 .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
1998 .do_interrupt = riscv_cpu_do_interrupt,
1999 .do_transaction_failed = riscv_cpu_do_transaction_failed,
2000 .do_unaligned_access = riscv_cpu_do_unaligned_access,
2001 .debug_excp_handler = riscv_cpu_debug_excp_handler,
2002 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
2003 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
2004 #endif /* !CONFIG_USER_ONLY */
2007 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
2009 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
2012 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
2013 void *opaque, Error **errp)
2015 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2016 RISCVCPU *cpu = RISCV_CPU(obj);
2017 uint32_t prev_val = cpu->cfg.mvendorid;
2020 if (!visit_type_uint32(v, name, &value, errp)) {
2024 if (!dynamic_cpu && prev_val != value) {
2025 error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2026 object_get_typename(obj), prev_val);
2030 cpu->cfg.mvendorid = value;
2033 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
2034 void *opaque, Error **errp)
2036 bool value = RISCV_CPU(obj)->cfg.mvendorid;
2038 visit_type_bool(v, name, &value, errp);
2041 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
2042 void *opaque, Error **errp)
2044 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2045 RISCVCPU *cpu = RISCV_CPU(obj);
2046 uint64_t prev_val = cpu->cfg.mimpid;
2049 if (!visit_type_uint64(v, name, &value, errp)) {
2053 if (!dynamic_cpu && prev_val != value) {
2054 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2055 object_get_typename(obj), prev_val);
2059 cpu->cfg.mimpid = value;
2062 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
2063 void *opaque, Error **errp)
2065 bool value = RISCV_CPU(obj)->cfg.mimpid;
2067 visit_type_bool(v, name, &value, errp);
2070 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
2071 void *opaque, Error **errp)
2073 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2074 RISCVCPU *cpu = RISCV_CPU(obj);
2075 uint64_t prev_val = cpu->cfg.marchid;
2076 uint64_t value, invalid_val;
2079 if (!visit_type_uint64(v, name, &value, errp)) {
2083 if (!dynamic_cpu && prev_val != value) {
2084 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2085 object_get_typename(obj), prev_val);
2089 switch (riscv_cpu_mxl(&cpu->env)) {
2098 g_assert_not_reached();
2101 invalid_val = 1LL << (mxlen - 1);
2103 if (value == invalid_val) {
2104 error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2105 "and the remaining bits zero", mxlen);
2109 cpu->cfg.marchid = value;
2112 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
2113 void *opaque, Error **errp)
2115 bool value = RISCV_CPU(obj)->cfg.marchid;
2117 visit_type_bool(v, name, &value, errp);
2120 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2122 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2123 CPUClass *cc = CPU_CLASS(c);
2124 DeviceClass *dc = DEVICE_CLASS(c);
2125 ResettableClass *rc = RESETTABLE_CLASS(c);
2127 device_class_set_parent_realize(dc, riscv_cpu_realize,
2128 &mcc->parent_realize);
2130 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2131 &mcc->parent_phases);
2133 cc->class_by_name = riscv_cpu_class_by_name;
2134 cc->has_work = riscv_cpu_has_work;
2135 cc->dump_state = riscv_cpu_dump_state;
2136 cc->set_pc = riscv_cpu_set_pc;
2137 cc->get_pc = riscv_cpu_get_pc;
2138 cc->gdb_read_register = riscv_cpu_gdb_read_register;
2139 cc->gdb_write_register = riscv_cpu_gdb_write_register;
2140 cc->gdb_num_core_regs = 33;
2141 cc->gdb_stop_before_watchpoint = true;
2142 cc->disas_set_info = riscv_cpu_disas_set_info;
2143 #ifndef CONFIG_USER_ONLY
2144 cc->sysemu_ops = &riscv_sysemu_ops;
2145 cc->get_arch_id = riscv_get_arch_id;
2147 cc->gdb_arch_name = riscv_gdb_arch_name;
2148 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
2149 cc->tcg_ops = &riscv_tcg_ops;
2151 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
2152 cpu_set_mvendorid, NULL, NULL);
2154 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
2155 cpu_set_mimpid, NULL, NULL);
2157 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
2158 cpu_set_marchid, NULL, NULL);
2160 device_class_set_props(dc, riscv_cpu_properties);
2163 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2166 char *old = *isa_str;
2167 char *new = *isa_str;
2170 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
2171 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
2172 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL);
2181 char *riscv_isa_string(RISCVCPU *cpu)
2184 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2185 char *isa_str = g_new(char, maxlen);
2186 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
2187 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2188 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2189 *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2193 if (!cpu->cfg.short_isa_string) {
2194 riscv_isa_string_ext(cpu, &isa_str, maxlen);
2199 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b)
2201 ObjectClass *class_a = (ObjectClass *)a;
2202 ObjectClass *class_b = (ObjectClass *)b;
2203 const char *name_a, *name_b;
2205 name_a = object_class_get_name(class_a);
2206 name_b = object_class_get_name(class_b);
2207 return strcmp(name_a, name_b);
2210 static void riscv_cpu_list_entry(gpointer data, gpointer user_data)
2212 const char *typename = object_class_get_name(OBJECT_CLASS(data));
2213 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX);
2215 qemu_printf("%.*s\n", len, typename);
2218 void riscv_cpu_list(void)
2222 list = object_class_get_list(TYPE_RISCV_CPU, false);
2223 list = g_slist_sort(list, riscv_cpu_list_compare);
2224 g_slist_foreach(list, riscv_cpu_list_entry, NULL);
2228 #define DEFINE_CPU(type_name, initfn) \
2230 .name = type_name, \
2231 .parent = TYPE_RISCV_CPU, \
2232 .instance_init = initfn \
2235 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \
2237 .name = type_name, \
2238 .parent = TYPE_RISCV_DYNAMIC_CPU, \
2239 .instance_init = initfn \
2242 static const TypeInfo riscv_cpu_type_infos[] = {
2244 .name = TYPE_RISCV_CPU,
2246 .instance_size = sizeof(RISCVCPU),
2247 .instance_align = __alignof__(RISCVCPU),
2248 .instance_init = riscv_cpu_init,
2250 .class_size = sizeof(RISCVCPUClass),
2251 .class_init = riscv_cpu_class_init,
2254 .name = TYPE_RISCV_DYNAMIC_CPU,
2255 .parent = TYPE_RISCV_CPU,
2258 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init),
2259 #if defined(CONFIG_KVM)
2260 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init),
2262 #if defined(TARGET_RISCV32)
2263 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init),
2264 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init),
2265 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init),
2266 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init),
2267 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init),
2268 #elif defined(TARGET_RISCV64)
2269 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init),
2270 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
2271 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
2272 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
2273 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init),
2274 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init),
2275 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init),
2279 DEFINE_TYPES(riscv_cpu_type_infos)