2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
36 /* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
41 #undef tcg_global_mem_new
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #if TARGET_REGISTER_BITS == 64
47 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
49 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
52 #define TCGv_tl TCGv_i32
53 #define tcg_temp_new_tl tcg_temp_new_i32
54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg TCGv_i64
60 #define tcg_temp_new tcg_temp_new_i64
61 #define tcg_global_mem_new tcg_global_mem_new_i64
63 #define tcg_gen_movi_reg tcg_gen_movi_i64
64 #define tcg_gen_mov_reg tcg_gen_mov_i64
65 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71 #define tcg_gen_ld_reg tcg_gen_ld_i64
72 #define tcg_gen_st8_reg tcg_gen_st8_i64
73 #define tcg_gen_st16_reg tcg_gen_st16_i64
74 #define tcg_gen_st32_reg tcg_gen_st32_i64
75 #define tcg_gen_st_reg tcg_gen_st_i64
76 #define tcg_gen_add_reg tcg_gen_add_i64
77 #define tcg_gen_addi_reg tcg_gen_addi_i64
78 #define tcg_gen_sub_reg tcg_gen_sub_i64
79 #define tcg_gen_neg_reg tcg_gen_neg_i64
80 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
81 #define tcg_gen_subi_reg tcg_gen_subi_i64
82 #define tcg_gen_and_reg tcg_gen_and_i64
83 #define tcg_gen_andi_reg tcg_gen_andi_i64
84 #define tcg_gen_or_reg tcg_gen_or_i64
85 #define tcg_gen_ori_reg tcg_gen_ori_i64
86 #define tcg_gen_xor_reg tcg_gen_xor_i64
87 #define tcg_gen_xori_reg tcg_gen_xori_i64
88 #define tcg_gen_not_reg tcg_gen_not_i64
89 #define tcg_gen_shl_reg tcg_gen_shl_i64
90 #define tcg_gen_shli_reg tcg_gen_shli_i64
91 #define tcg_gen_shr_reg tcg_gen_shr_i64
92 #define tcg_gen_shri_reg tcg_gen_shri_i64
93 #define tcg_gen_sar_reg tcg_gen_sar_i64
94 #define tcg_gen_sari_reg tcg_gen_sari_i64
95 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
96 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99 #define tcg_gen_mul_reg tcg_gen_mul_i64
100 #define tcg_gen_muli_reg tcg_gen_muli_i64
101 #define tcg_gen_div_reg tcg_gen_div_i64
102 #define tcg_gen_rem_reg tcg_gen_rem_i64
103 #define tcg_gen_divu_reg tcg_gen_divu_i64
104 #define tcg_gen_remu_reg tcg_gen_remu_i64
105 #define tcg_gen_discard_reg tcg_gen_discard_i64
106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122 #define tcg_gen_andc_reg tcg_gen_andc_i64
123 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
124 #define tcg_gen_nand_reg tcg_gen_nand_i64
125 #define tcg_gen_nor_reg tcg_gen_nor_i64
126 #define tcg_gen_orc_reg tcg_gen_orc_i64
127 #define tcg_gen_clz_reg tcg_gen_clz_i64
128 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
129 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
130 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
134 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
135 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
136 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
137 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139 #define tcg_gen_extract_reg tcg_gen_extract_i64
140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
142 #define tcg_constant_reg tcg_constant_i64
143 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
144 #define tcg_gen_add2_reg tcg_gen_add2_i64
145 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
146 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
149 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
151 #define TCGv_reg TCGv_i32
152 #define tcg_temp_new tcg_temp_new_i32
153 #define tcg_global_mem_new tcg_global_mem_new_i32
155 #define tcg_gen_movi_reg tcg_gen_movi_i32
156 #define tcg_gen_mov_reg tcg_gen_mov_i32
157 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
162 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
163 #define tcg_gen_ld_reg tcg_gen_ld_i32
164 #define tcg_gen_st8_reg tcg_gen_st8_i32
165 #define tcg_gen_st16_reg tcg_gen_st16_i32
166 #define tcg_gen_st32_reg tcg_gen_st32_i32
167 #define tcg_gen_st_reg tcg_gen_st_i32
168 #define tcg_gen_add_reg tcg_gen_add_i32
169 #define tcg_gen_addi_reg tcg_gen_addi_i32
170 #define tcg_gen_sub_reg tcg_gen_sub_i32
171 #define tcg_gen_neg_reg tcg_gen_neg_i32
172 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
173 #define tcg_gen_subi_reg tcg_gen_subi_i32
174 #define tcg_gen_and_reg tcg_gen_and_i32
175 #define tcg_gen_andi_reg tcg_gen_andi_i32
176 #define tcg_gen_or_reg tcg_gen_or_i32
177 #define tcg_gen_ori_reg tcg_gen_ori_i32
178 #define tcg_gen_xor_reg tcg_gen_xor_i32
179 #define tcg_gen_xori_reg tcg_gen_xori_i32
180 #define tcg_gen_not_reg tcg_gen_not_i32
181 #define tcg_gen_shl_reg tcg_gen_shl_i32
182 #define tcg_gen_shli_reg tcg_gen_shli_i32
183 #define tcg_gen_shr_reg tcg_gen_shr_i32
184 #define tcg_gen_shri_reg tcg_gen_shri_i32
185 #define tcg_gen_sar_reg tcg_gen_sar_i32
186 #define tcg_gen_sari_reg tcg_gen_sari_i32
187 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
188 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
190 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191 #define tcg_gen_mul_reg tcg_gen_mul_i32
192 #define tcg_gen_muli_reg tcg_gen_muli_i32
193 #define tcg_gen_div_reg tcg_gen_div_i32
194 #define tcg_gen_rem_reg tcg_gen_rem_i32
195 #define tcg_gen_divu_reg tcg_gen_divu_i32
196 #define tcg_gen_remu_reg tcg_gen_remu_i32
197 #define tcg_gen_discard_reg tcg_gen_discard_i32
198 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
209 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
210 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213 #define tcg_gen_andc_reg tcg_gen_andc_i32
214 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
215 #define tcg_gen_nand_reg tcg_gen_nand_i32
216 #define tcg_gen_nor_reg tcg_gen_nor_i32
217 #define tcg_gen_orc_reg tcg_gen_orc_i32
218 #define tcg_gen_clz_reg tcg_gen_clz_i32
219 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
220 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
221 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
225 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
226 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
227 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
228 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
229 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230 #define tcg_gen_extract_reg tcg_gen_extract_i32
231 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
232 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
233 #define tcg_constant_reg tcg_constant_i32
234 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
235 #define tcg_gen_add2_reg tcg_gen_add2_i32
236 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
237 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
240 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
241 #endif /* TARGET_REGISTER_BITS */
243 typedef struct DisasCond {
248 typedef struct DisasContext {
249 DisasContextBase base;
266 #ifdef CONFIG_USER_ONLY
271 #ifdef CONFIG_USER_ONLY
272 #define UNALIGN(C) (C)->unalign
274 #define UNALIGN(C) MO_ALIGN
277 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
278 static int expand_sm_imm(DisasContext *ctx, int val)
280 if (val & PSW_SM_E) {
281 val = (val & ~PSW_SM_E) | PSW_E;
283 if (val & PSW_SM_W) {
284 val = (val & ~PSW_SM_W) | PSW_W;
289 /* Inverted space register indicates 0 means sr0 not inferred from base. */
290 static int expand_sr3x(DisasContext *ctx, int val)
295 /* Convert the M:A bits within a memory insn to the tri-state value
296 we use for the final M. */
297 static int ma_to_m(DisasContext *ctx, int val)
299 return val & 2 ? (val & 1 ? -1 : 1) : 0;
302 /* Convert the sign of the displacement to a pre or post-modify. */
303 static int pos_to_m(DisasContext *ctx, int val)
308 static int neg_to_m(DisasContext *ctx, int val)
313 /* Used for branch targets and fp memory ops. */
314 static int expand_shl2(DisasContext *ctx, int val)
319 /* Used for fp memory ops. */
320 static int expand_shl3(DisasContext *ctx, int val)
325 /* Used for assemble_21. */
326 static int expand_shl11(DisasContext *ctx, int val)
332 /* Include the auto-generated decoder. */
333 #include "decode-insns.c.inc"
335 /* We are not using a goto_tb (for whatever reason), but have updated
336 the iaq (for whatever reason), so don't do it again on exit. */
337 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
339 /* We are exiting the TB, but have neither emitted a goto_tb, nor
340 updated the iaq for the next instruction to be executed. */
341 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
343 /* Similarly, but we want to return to the main loop immediately
344 to recognize unmasked interrupts. */
345 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
346 #define DISAS_EXIT DISAS_TARGET_3
348 /* global register indexes */
349 static TCGv_reg cpu_gr[32];
350 static TCGv_i64 cpu_sr[4];
351 static TCGv_i64 cpu_srH;
352 static TCGv_reg cpu_iaoq_f;
353 static TCGv_reg cpu_iaoq_b;
354 static TCGv_i64 cpu_iasq_f;
355 static TCGv_i64 cpu_iasq_b;
356 static TCGv_reg cpu_sar;
357 static TCGv_reg cpu_psw_n;
358 static TCGv_reg cpu_psw_v;
359 static TCGv_reg cpu_psw_cb;
360 static TCGv_reg cpu_psw_cb_msb;
362 void hppa_translate_init(void)
364 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
366 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
367 static const GlobalVar vars[] = {
368 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
379 /* Use the symbolic register names that match the disassembler. */
380 static const char gr_names[32][4] = {
381 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
382 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
383 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
384 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
386 /* SR[4-7] are not global registers so that we can index them. */
387 static const char sr_names[5][4] = {
388 "sr0", "sr1", "sr2", "sr3", "srH"
394 for (i = 1; i < 32; i++) {
395 cpu_gr[i] = tcg_global_mem_new(tcg_env,
396 offsetof(CPUHPPAState, gr[i]),
399 for (i = 0; i < 4; i++) {
400 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
401 offsetof(CPUHPPAState, sr[i]),
404 cpu_srH = tcg_global_mem_new_i64(tcg_env,
405 offsetof(CPUHPPAState, sr[4]),
408 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
409 const GlobalVar *v = &vars[i];
410 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
413 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
414 offsetof(CPUHPPAState, iasq_f),
416 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
417 offsetof(CPUHPPAState, iasq_b),
421 static DisasCond cond_make_f(void)
430 static DisasCond cond_make_t(void)
433 .c = TCG_COND_ALWAYS,
439 static DisasCond cond_make_n(void)
444 .a1 = tcg_constant_reg(0)
448 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
450 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
452 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
456 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
458 TCGv_reg tmp = tcg_temp_new();
459 tcg_gen_mov_reg(tmp, a0);
460 return cond_make_0_tmp(c, tmp);
463 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
465 DisasCond r = { .c = c };
467 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
468 r.a0 = tcg_temp_new();
469 tcg_gen_mov_reg(r.a0, a0);
470 r.a1 = tcg_temp_new();
471 tcg_gen_mov_reg(r.a1, a1);
476 static void cond_free(DisasCond *cond)
483 case TCG_COND_ALWAYS:
484 cond->c = TCG_COND_NEVER;
491 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
494 TCGv_reg t = tcg_temp_new();
495 tcg_gen_movi_reg(t, 0);
502 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
504 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
505 return tcg_temp_new();
511 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
513 if (ctx->null_cond.c != TCG_COND_NEVER) {
514 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
515 ctx->null_cond.a1, dest, t);
517 tcg_gen_mov_reg(dest, t);
521 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
524 save_or_nullify(ctx, cpu_gr[reg], t);
536 static TCGv_i32 load_frw_i32(unsigned rt)
538 TCGv_i32 ret = tcg_temp_new_i32();
539 tcg_gen_ld_i32(ret, tcg_env,
540 offsetof(CPUHPPAState, fr[rt & 31])
541 + (rt & 32 ? LO_OFS : HI_OFS));
545 static TCGv_i32 load_frw0_i32(unsigned rt)
548 TCGv_i32 ret = tcg_temp_new_i32();
549 tcg_gen_movi_i32(ret, 0);
552 return load_frw_i32(rt);
556 static TCGv_i64 load_frw0_i64(unsigned rt)
558 TCGv_i64 ret = tcg_temp_new_i64();
560 tcg_gen_movi_i64(ret, 0);
562 tcg_gen_ld32u_i64(ret, tcg_env,
563 offsetof(CPUHPPAState, fr[rt & 31])
564 + (rt & 32 ? LO_OFS : HI_OFS));
569 static void save_frw_i32(unsigned rt, TCGv_i32 val)
571 tcg_gen_st_i32(val, tcg_env,
572 offsetof(CPUHPPAState, fr[rt & 31])
573 + (rt & 32 ? LO_OFS : HI_OFS));
579 static TCGv_i64 load_frd(unsigned rt)
581 TCGv_i64 ret = tcg_temp_new_i64();
582 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
586 static TCGv_i64 load_frd0(unsigned rt)
589 TCGv_i64 ret = tcg_temp_new_i64();
590 tcg_gen_movi_i64(ret, 0);
597 static void save_frd(unsigned rt, TCGv_i64 val)
599 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
602 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
604 #ifdef CONFIG_USER_ONLY
605 tcg_gen_movi_i64(dest, 0);
608 tcg_gen_mov_i64(dest, cpu_sr[reg]);
609 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
610 tcg_gen_mov_i64(dest, cpu_srH);
612 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
617 /* Skip over the implementation of an insn that has been nullified.
618 Use this when the insn is too complex for a conditional move. */
619 static void nullify_over(DisasContext *ctx)
621 if (ctx->null_cond.c != TCG_COND_NEVER) {
622 /* The always condition should have been handled in the main loop. */
623 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
625 ctx->null_lab = gen_new_label();
627 /* If we're using PSW[N], copy it to a temp because... */
628 if (ctx->null_cond.a0 == cpu_psw_n) {
629 ctx->null_cond.a0 = tcg_temp_new();
630 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
632 /* ... we clear it before branching over the implementation,
633 so that (1) it's clear after nullifying this insn and
634 (2) if this insn nullifies the next, PSW[N] is valid. */
635 if (ctx->psw_n_nonzero) {
636 ctx->psw_n_nonzero = false;
637 tcg_gen_movi_reg(cpu_psw_n, 0);
640 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
641 ctx->null_cond.a1, ctx->null_lab);
642 cond_free(&ctx->null_cond);
646 /* Save the current nullification state to PSW[N]. */
647 static void nullify_save(DisasContext *ctx)
649 if (ctx->null_cond.c == TCG_COND_NEVER) {
650 if (ctx->psw_n_nonzero) {
651 tcg_gen_movi_reg(cpu_psw_n, 0);
655 if (ctx->null_cond.a0 != cpu_psw_n) {
656 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
657 ctx->null_cond.a0, ctx->null_cond.a1);
658 ctx->psw_n_nonzero = true;
660 cond_free(&ctx->null_cond);
663 /* Set a PSW[N] to X. The intention is that this is used immediately
664 before a goto_tb/exit_tb, so that there is no fallthru path to other
665 code within the TB. Therefore we do not update psw_n_nonzero. */
666 static void nullify_set(DisasContext *ctx, bool x)
668 if (ctx->psw_n_nonzero || x) {
669 tcg_gen_movi_reg(cpu_psw_n, x);
673 /* Mark the end of an instruction that may have been nullified.
674 This is the pair to nullify_over. Always returns true so that
675 it may be tail-called from a translate function. */
676 static bool nullify_end(DisasContext *ctx)
678 TCGLabel *null_lab = ctx->null_lab;
679 DisasJumpType status = ctx->base.is_jmp;
681 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
682 For UPDATED, we cannot update on the nullified path. */
683 assert(status != DISAS_IAQ_N_UPDATED);
685 if (likely(null_lab == NULL)) {
686 /* The current insn wasn't conditional or handled the condition
687 applied to it without a branch, so the (new) setting of
688 NULL_COND can be applied directly to the next insn. */
691 ctx->null_lab = NULL;
693 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
694 /* The next instruction will be unconditional,
695 and NULL_COND already reflects that. */
696 gen_set_label(null_lab);
698 /* The insn that we just executed is itself nullifying the next
699 instruction. Store the condition in the PSW[N] global.
700 We asserted PSW[N] = 0 in nullify_over, so that after the
701 label we have the proper value in place. */
703 gen_set_label(null_lab);
704 ctx->null_cond = cond_make_n();
706 if (status == DISAS_NORETURN) {
707 ctx->base.is_jmp = DISAS_NEXT;
712 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
714 if (unlikely(ival == -1)) {
715 tcg_gen_mov_reg(dest, vval);
717 tcg_gen_movi_reg(dest, ival);
721 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
723 return ctx->iaoq_f + disp + 8;
726 static void gen_excp_1(int exception)
728 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
731 static void gen_excp(DisasContext *ctx, int exception)
733 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
734 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
736 gen_excp_1(exception);
737 ctx->base.is_jmp = DISAS_NORETURN;
740 static bool gen_excp_iir(DisasContext *ctx, int exc)
743 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
744 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
746 return nullify_end(ctx);
749 static bool gen_illegal(DisasContext *ctx)
751 return gen_excp_iir(ctx, EXCP_ILL);
754 #ifdef CONFIG_USER_ONLY
755 #define CHECK_MOST_PRIVILEGED(EXCP) \
756 return gen_excp_iir(ctx, EXCP)
758 #define CHECK_MOST_PRIVILEGED(EXCP) \
760 if (ctx->privilege != 0) { \
761 return gen_excp_iir(ctx, EXCP); \
766 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
768 return translator_use_goto_tb(&ctx->base, dest);
771 /* If the next insn is to be nullified, and it's on the same page,
772 and we're not attempting to set a breakpoint on it, then we can
773 totally skip the nullified insn. This avoids creating and
774 executing a TB that merely branches to the next TB. */
775 static bool use_nullify_skip(DisasContext *ctx)
777 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
778 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
781 static void gen_goto_tb(DisasContext *ctx, int which,
782 target_ureg f, target_ureg b)
784 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
785 tcg_gen_goto_tb(which);
786 tcg_gen_movi_reg(cpu_iaoq_f, f);
787 tcg_gen_movi_reg(cpu_iaoq_b, b);
788 tcg_gen_exit_tb(ctx->base.tb, which);
790 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
791 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
792 tcg_gen_lookup_and_goto_ptr();
796 static bool cond_need_sv(int c)
798 return c == 2 || c == 3 || c == 6;
801 static bool cond_need_cb(int c)
803 return c == 4 || c == 5;
807 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
808 * the Parisc 1.1 Architecture Reference Manual for details.
811 static DisasCond do_cond(unsigned cf, TCGv_reg res,
812 TCGv_reg cb_msb, TCGv_reg sv)
818 case 0: /* Never / TR (0 / 1) */
819 cond = cond_make_f();
821 case 1: /* = / <> (Z / !Z) */
822 cond = cond_make_0(TCG_COND_EQ, res);
824 case 2: /* < / >= (N ^ V / !(N ^ V) */
825 tmp = tcg_temp_new();
826 tcg_gen_xor_reg(tmp, res, sv);
827 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
829 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
833 * ((res < 0) ^ (sv < 0)) | !res
834 * ((res ^ sv) < 0) | !res
835 * (~(res ^ sv) >= 0) | !res
836 * !(~(res ^ sv) >> 31) | !res
837 * !(~(res ^ sv) >> 31 & res)
839 tmp = tcg_temp_new();
840 tcg_gen_eqv_reg(tmp, res, sv);
841 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
842 tcg_gen_and_reg(tmp, tmp, res);
843 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
845 case 4: /* NUV / UV (!C / C) */
846 cond = cond_make_0(TCG_COND_EQ, cb_msb);
848 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
849 tmp = tcg_temp_new();
850 tcg_gen_neg_reg(tmp, cb_msb);
851 tcg_gen_and_reg(tmp, tmp, res);
852 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
854 case 6: /* SV / NSV (V / !V) */
855 cond = cond_make_0(TCG_COND_LT, sv);
857 case 7: /* OD / EV */
858 tmp = tcg_temp_new();
859 tcg_gen_andi_reg(tmp, res, 1);
860 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
863 g_assert_not_reached();
866 cond.c = tcg_invert_cond(cond.c);
872 /* Similar, but for the special case of subtraction without borrow, we
873 can use the inputs directly. This can allow other computation to be
874 deleted as unused. */
876 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
877 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
883 cond = cond_make(TCG_COND_EQ, in1, in2);
886 cond = cond_make(TCG_COND_LT, in1, in2);
889 cond = cond_make(TCG_COND_LE, in1, in2);
891 case 4: /* << / >>= */
892 cond = cond_make(TCG_COND_LTU, in1, in2);
894 case 5: /* <<= / >> */
895 cond = cond_make(TCG_COND_LEU, in1, in2);
898 return do_cond(cf, res, NULL, sv);
901 cond.c = tcg_invert_cond(cond.c);
908 * Similar, but for logicals, where the carry and overflow bits are not
909 * computed, and use of them is undefined.
911 * Undefined or not, hardware does not trap. It seems reasonable to
912 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
913 * how cases c={2,3} are treated.
916 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
920 case 9: /* undef, C */
921 case 11: /* undef, C & !Z */
922 case 12: /* undef, V */
923 return cond_make_f();
926 case 8: /* undef, !C */
927 case 10: /* undef, !C | Z */
928 case 13: /* undef, !V */
929 return cond_make_t();
932 return cond_make_0(TCG_COND_EQ, res);
934 return cond_make_0(TCG_COND_NE, res);
936 return cond_make_0(TCG_COND_LT, res);
938 return cond_make_0(TCG_COND_GE, res);
940 return cond_make_0(TCG_COND_LE, res);
942 return cond_make_0(TCG_COND_GT, res);
946 return do_cond(cf, res, NULL, NULL);
949 g_assert_not_reached();
953 /* Similar, but for shift/extract/deposit conditions. */
955 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
959 /* Convert the compressed condition codes to standard.
960 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
961 4-7 are the reverse of 0-3. */
968 return do_log_cond(c * 2 + f, res);
971 /* Similar, but for unit conditions. */
973 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
974 TCGv_reg in1, TCGv_reg in2)
977 TCGv_reg tmp, cb = NULL;
980 /* Since we want to test lots of carry-out bits all at once, do not
981 * do our normal thing and compute carry-in of bit B+1 since that
982 * leaves us with carry bits spread across two words.
985 tmp = tcg_temp_new();
986 tcg_gen_or_reg(cb, in1, in2);
987 tcg_gen_and_reg(tmp, in1, in2);
988 tcg_gen_andc_reg(cb, cb, res);
989 tcg_gen_or_reg(cb, cb, tmp);
993 case 0: /* never / TR */
994 case 1: /* undefined */
995 case 5: /* undefined */
996 cond = cond_make_f();
999 case 2: /* SBZ / NBZ */
1000 /* See hasless(v,1) from
1001 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1003 tmp = tcg_temp_new();
1004 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1005 tcg_gen_andc_reg(tmp, tmp, res);
1006 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1007 cond = cond_make_0(TCG_COND_NE, tmp);
1010 case 3: /* SHZ / NHZ */
1011 tmp = tcg_temp_new();
1012 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1013 tcg_gen_andc_reg(tmp, tmp, res);
1014 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1015 cond = cond_make_0(TCG_COND_NE, tmp);
1018 case 4: /* SDC / NDC */
1019 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1020 cond = cond_make_0(TCG_COND_NE, cb);
1023 case 6: /* SBC / NBC */
1024 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1025 cond = cond_make_0(TCG_COND_NE, cb);
1028 case 7: /* SHC / NHC */
1029 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1030 cond = cond_make_0(TCG_COND_NE, cb);
1034 g_assert_not_reached();
1037 cond.c = tcg_invert_cond(cond.c);
1043 /* Compute signed overflow for addition. */
1044 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1045 TCGv_reg in1, TCGv_reg in2)
1047 TCGv_reg sv = tcg_temp_new();
1048 TCGv_reg tmp = tcg_temp_new();
1050 tcg_gen_xor_reg(sv, res, in1);
1051 tcg_gen_xor_reg(tmp, in1, in2);
1052 tcg_gen_andc_reg(sv, sv, tmp);
1057 /* Compute signed overflow for subtraction. */
1058 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1059 TCGv_reg in1, TCGv_reg in2)
1061 TCGv_reg sv = tcg_temp_new();
1062 TCGv_reg tmp = tcg_temp_new();
1064 tcg_gen_xor_reg(sv, res, in1);
1065 tcg_gen_xor_reg(tmp, in1, in2);
1066 tcg_gen_and_reg(sv, sv, tmp);
1071 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1072 TCGv_reg in2, unsigned shift, bool is_l,
1073 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1075 TCGv_reg dest, cb, cb_msb, sv, tmp;
1076 unsigned c = cf >> 1;
1079 dest = tcg_temp_new();
1084 tmp = tcg_temp_new();
1085 tcg_gen_shli_reg(tmp, in1, shift);
1089 if (!is_l || cond_need_cb(c)) {
1090 TCGv_reg zero = tcg_constant_reg(0);
1091 cb_msb = tcg_temp_new();
1092 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1094 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1097 cb = tcg_temp_new();
1098 tcg_gen_xor_reg(cb, in1, in2);
1099 tcg_gen_xor_reg(cb, cb, dest);
1102 tcg_gen_add_reg(dest, in1, in2);
1104 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1108 /* Compute signed overflow if required. */
1110 if (is_tsv || cond_need_sv(c)) {
1111 sv = do_add_sv(ctx, dest, in1, in2);
1113 /* ??? Need to include overflow from shift. */
1114 gen_helper_tsv(tcg_env, sv);
1118 /* Emit any conditional trap before any writeback. */
1119 cond = do_cond(cf, dest, cb_msb, sv);
1121 tmp = tcg_temp_new();
1122 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1123 gen_helper_tcond(tcg_env, tmp);
1126 /* Write back the result. */
1128 save_or_nullify(ctx, cpu_psw_cb, cb);
1129 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1131 save_gpr(ctx, rt, dest);
1133 /* Install the new nullification. */
1134 cond_free(&ctx->null_cond);
1135 ctx->null_cond = cond;
1138 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1139 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1141 TCGv_reg tcg_r1, tcg_r2;
1146 tcg_r1 = load_gpr(ctx, a->r1);
1147 tcg_r2 = load_gpr(ctx, a->r2);
1148 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1149 return nullify_end(ctx);
1152 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1153 bool is_tsv, bool is_tc)
1155 TCGv_reg tcg_im, tcg_r2;
1160 tcg_im = tcg_constant_reg(a->i);
1161 tcg_r2 = load_gpr(ctx, a->r);
1162 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1163 return nullify_end(ctx);
1166 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1167 TCGv_reg in2, bool is_tsv, bool is_b,
1168 bool is_tc, unsigned cf)
1170 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1171 unsigned c = cf >> 1;
1174 dest = tcg_temp_new();
1175 cb = tcg_temp_new();
1176 cb_msb = tcg_temp_new();
1178 zero = tcg_constant_reg(0);
1180 /* DEST,C = IN1 + ~IN2 + C. */
1181 tcg_gen_not_reg(cb, in2);
1182 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1183 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1184 tcg_gen_xor_reg(cb, cb, in1);
1185 tcg_gen_xor_reg(cb, cb, dest);
1187 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1188 operations by seeding the high word with 1 and subtracting. */
1189 tcg_gen_movi_reg(cb_msb, 1);
1190 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1191 tcg_gen_eqv_reg(cb, in1, in2);
1192 tcg_gen_xor_reg(cb, cb, dest);
1195 /* Compute signed overflow if required. */
1197 if (is_tsv || cond_need_sv(c)) {
1198 sv = do_sub_sv(ctx, dest, in1, in2);
1200 gen_helper_tsv(tcg_env, sv);
1204 /* Compute the condition. We cannot use the special case for borrow. */
1206 cond = do_sub_cond(cf, dest, in1, in2, sv);
1208 cond = do_cond(cf, dest, cb_msb, sv);
1211 /* Emit any conditional trap before any writeback. */
1213 tmp = tcg_temp_new();
1214 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1215 gen_helper_tcond(tcg_env, tmp);
1218 /* Write back the result. */
1219 save_or_nullify(ctx, cpu_psw_cb, cb);
1220 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1221 save_gpr(ctx, rt, dest);
1223 /* Install the new nullification. */
1224 cond_free(&ctx->null_cond);
1225 ctx->null_cond = cond;
1228 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1229 bool is_tsv, bool is_b, bool is_tc)
1231 TCGv_reg tcg_r1, tcg_r2;
1236 tcg_r1 = load_gpr(ctx, a->r1);
1237 tcg_r2 = load_gpr(ctx, a->r2);
1238 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1239 return nullify_end(ctx);
1242 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1244 TCGv_reg tcg_im, tcg_r2;
1249 tcg_im = tcg_constant_reg(a->i);
1250 tcg_r2 = load_gpr(ctx, a->r);
1251 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1252 return nullify_end(ctx);
1255 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1256 TCGv_reg in2, unsigned cf)
1261 dest = tcg_temp_new();
1262 tcg_gen_sub_reg(dest, in1, in2);
1264 /* Compute signed overflow if required. */
1266 if (cond_need_sv(cf >> 1)) {
1267 sv = do_sub_sv(ctx, dest, in1, in2);
1270 /* Form the condition for the compare. */
1271 cond = do_sub_cond(cf, dest, in1, in2, sv);
1274 tcg_gen_movi_reg(dest, 0);
1275 save_gpr(ctx, rt, dest);
1277 /* Install the new nullification. */
1278 cond_free(&ctx->null_cond);
1279 ctx->null_cond = cond;
1282 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1283 TCGv_reg in2, unsigned cf,
1284 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1286 TCGv_reg dest = dest_gpr(ctx, rt);
1288 /* Perform the operation, and writeback. */
1290 save_gpr(ctx, rt, dest);
1292 /* Install the new nullification. */
1293 cond_free(&ctx->null_cond);
1295 ctx->null_cond = do_log_cond(cf, dest);
1299 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1300 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1302 TCGv_reg tcg_r1, tcg_r2;
1307 tcg_r1 = load_gpr(ctx, a->r1);
1308 tcg_r2 = load_gpr(ctx, a->r2);
1309 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1310 return nullify_end(ctx);
1313 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1314 TCGv_reg in2, unsigned cf, bool is_tc,
1315 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1321 dest = dest_gpr(ctx, rt);
1323 save_gpr(ctx, rt, dest);
1324 cond_free(&ctx->null_cond);
1326 dest = tcg_temp_new();
1329 cond = do_unit_cond(cf, dest, in1, in2);
1332 TCGv_reg tmp = tcg_temp_new();
1333 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1334 gen_helper_tcond(tcg_env, tmp);
1336 save_gpr(ctx, rt, dest);
1338 cond_free(&ctx->null_cond);
1339 ctx->null_cond = cond;
1343 #ifndef CONFIG_USER_ONLY
1344 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1345 from the top 2 bits of the base register. There are a few system
1346 instructions that have a 3-bit space specifier, for which SR0 is
1347 not special. To handle this, pass ~SP. */
1348 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1358 spc = tcg_temp_new_tl();
1359 load_spr(ctx, spc, sp);
1362 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1366 ptr = tcg_temp_new_ptr();
1367 tmp = tcg_temp_new();
1368 spc = tcg_temp_new_tl();
1370 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1371 tcg_gen_andi_reg(tmp, tmp, 030);
1372 tcg_gen_trunc_reg_ptr(ptr, tmp);
1374 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1375 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1381 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1382 unsigned rb, unsigned rx, int scale, target_sreg disp,
1383 unsigned sp, int modify, bool is_phys)
1385 TCGv_reg base = load_gpr(ctx, rb);
1388 /* Note that RX is mutually exclusive with DISP. */
1390 ofs = tcg_temp_new();
1391 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1392 tcg_gen_add_reg(ofs, ofs, base);
1393 } else if (disp || modify) {
1394 ofs = tcg_temp_new();
1395 tcg_gen_addi_reg(ofs, base, disp);
1401 #ifdef CONFIG_USER_ONLY
1402 *pgva = (modify <= 0 ? ofs : base);
1404 TCGv_tl addr = tcg_temp_new_tl();
1405 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1406 if (ctx->tb_flags & PSW_W) {
1407 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1410 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1416 /* Emit a memory load. The modify parameter should be
1417 * < 0 for pre-modify,
1418 * > 0 for post-modify,
1419 * = 0 for no base register update.
1421 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1422 unsigned rx, int scale, target_sreg disp,
1423 unsigned sp, int modify, MemOp mop)
1428 /* Caller uses nullify_over/nullify_end. */
1429 assert(ctx->null_cond.c == TCG_COND_NEVER);
1431 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1432 ctx->mmu_idx == MMU_PHYS_IDX);
1433 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1435 save_gpr(ctx, rb, ofs);
1439 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1440 unsigned rx, int scale, target_sreg disp,
1441 unsigned sp, int modify, MemOp mop)
1446 /* Caller uses nullify_over/nullify_end. */
1447 assert(ctx->null_cond.c == TCG_COND_NEVER);
1449 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1450 ctx->mmu_idx == MMU_PHYS_IDX);
1451 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1453 save_gpr(ctx, rb, ofs);
1457 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1458 unsigned rx, int scale, target_sreg disp,
1459 unsigned sp, int modify, MemOp mop)
1464 /* Caller uses nullify_over/nullify_end. */
1465 assert(ctx->null_cond.c == TCG_COND_NEVER);
1467 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1468 ctx->mmu_idx == MMU_PHYS_IDX);
1469 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1471 save_gpr(ctx, rb, ofs);
1475 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1476 unsigned rx, int scale, target_sreg disp,
1477 unsigned sp, int modify, MemOp mop)
1482 /* Caller uses nullify_over/nullify_end. */
1483 assert(ctx->null_cond.c == TCG_COND_NEVER);
1485 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1486 ctx->mmu_idx == MMU_PHYS_IDX);
1487 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1489 save_gpr(ctx, rb, ofs);
1493 #if TARGET_REGISTER_BITS == 64
1494 #define do_load_reg do_load_64
1495 #define do_store_reg do_store_64
1497 #define do_load_reg do_load_32
1498 #define do_store_reg do_store_32
1501 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1502 unsigned rx, int scale, target_sreg disp,
1503 unsigned sp, int modify, MemOp mop)
1510 /* No base register update. */
1511 dest = dest_gpr(ctx, rt);
1513 /* Make sure if RT == RB, we see the result of the load. */
1514 dest = tcg_temp_new();
1516 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1517 save_gpr(ctx, rt, dest);
1519 return nullify_end(ctx);
1522 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1523 unsigned rx, int scale, target_sreg disp,
1524 unsigned sp, int modify)
1530 tmp = tcg_temp_new_i32();
1531 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1532 save_frw_i32(rt, tmp);
1535 gen_helper_loaded_fr0(tcg_env);
1538 return nullify_end(ctx);
1541 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1543 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1544 a->disp, a->sp, a->m);
1547 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1548 unsigned rx, int scale, target_sreg disp,
1549 unsigned sp, int modify)
1555 tmp = tcg_temp_new_i64();
1556 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1560 gen_helper_loaded_fr0(tcg_env);
1563 return nullify_end(ctx);
1566 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1568 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1569 a->disp, a->sp, a->m);
1572 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1573 target_sreg disp, unsigned sp,
1574 int modify, MemOp mop)
1577 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1578 return nullify_end(ctx);
1581 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1582 unsigned rx, int scale, target_sreg disp,
1583 unsigned sp, int modify)
1589 tmp = load_frw_i32(rt);
1590 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1592 return nullify_end(ctx);
1595 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1597 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1598 a->disp, a->sp, a->m);
1601 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1602 unsigned rx, int scale, target_sreg disp,
1603 unsigned sp, int modify)
1610 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1612 return nullify_end(ctx);
1615 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1617 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1618 a->disp, a->sp, a->m);
1621 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1622 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1627 tmp = load_frw0_i32(ra);
1629 func(tmp, tcg_env, tmp);
1631 save_frw_i32(rt, tmp);
1632 return nullify_end(ctx);
1635 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1636 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1643 dst = tcg_temp_new_i32();
1645 func(dst, tcg_env, src);
1647 save_frw_i32(rt, dst);
1648 return nullify_end(ctx);
1651 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1652 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1657 tmp = load_frd0(ra);
1659 func(tmp, tcg_env, tmp);
1662 return nullify_end(ctx);
1665 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1666 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1672 src = load_frw0_i32(ra);
1673 dst = tcg_temp_new_i64();
1675 func(dst, tcg_env, src);
1678 return nullify_end(ctx);
1681 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1682 unsigned ra, unsigned rb,
1683 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1688 a = load_frw0_i32(ra);
1689 b = load_frw0_i32(rb);
1691 func(a, tcg_env, a, b);
1693 save_frw_i32(rt, a);
1694 return nullify_end(ctx);
1697 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1698 unsigned ra, unsigned rb,
1699 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1707 func(a, tcg_env, a, b);
1710 return nullify_end(ctx);
1713 /* Emit an unconditional branch to a direct target, which may or may not
1714 have already had nullification handled. */
1715 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1716 unsigned link, bool is_n)
1718 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1720 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1724 ctx->null_cond.c = TCG_COND_ALWAYS;
1730 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1733 if (is_n && use_nullify_skip(ctx)) {
1734 nullify_set(ctx, 0);
1735 gen_goto_tb(ctx, 0, dest, dest + 4);
1737 nullify_set(ctx, is_n);
1738 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1743 nullify_set(ctx, 0);
1744 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1745 ctx->base.is_jmp = DISAS_NORETURN;
1750 /* Emit a conditional branch to a direct target. If the branch itself
1751 is nullified, we should have already used nullify_over. */
1752 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1755 target_ureg dest = iaoq_dest(ctx, disp);
1756 TCGLabel *taken = NULL;
1757 TCGCond c = cond->c;
1760 assert(ctx->null_cond.c == TCG_COND_NEVER);
1762 /* Handle TRUE and NEVER as direct branches. */
1763 if (c == TCG_COND_ALWAYS) {
1764 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1766 if (c == TCG_COND_NEVER) {
1767 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1770 taken = gen_new_label();
1771 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1774 /* Not taken: Condition not satisfied; nullify on backward branches. */
1775 n = is_n && disp < 0;
1776 if (n && use_nullify_skip(ctx)) {
1777 nullify_set(ctx, 0);
1778 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1780 if (!n && ctx->null_lab) {
1781 gen_set_label(ctx->null_lab);
1782 ctx->null_lab = NULL;
1784 nullify_set(ctx, n);
1785 if (ctx->iaoq_n == -1) {
1786 /* The temporary iaoq_n_var died at the branch above.
1787 Regenerate it here instead of saving it. */
1788 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1790 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1793 gen_set_label(taken);
1795 /* Taken: Condition satisfied; nullify on forward branches. */
1796 n = is_n && disp >= 0;
1797 if (n && use_nullify_skip(ctx)) {
1798 nullify_set(ctx, 0);
1799 gen_goto_tb(ctx, 1, dest, dest + 4);
1801 nullify_set(ctx, n);
1802 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1805 /* Not taken: the branch itself was nullified. */
1806 if (ctx->null_lab) {
1807 gen_set_label(ctx->null_lab);
1808 ctx->null_lab = NULL;
1809 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1811 ctx->base.is_jmp = DISAS_NORETURN;
1816 /* Emit an unconditional branch to an indirect target. This handles
1817 nullification of the branch itself. */
1818 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1819 unsigned link, bool is_n)
1821 TCGv_reg a0, a1, next, tmp;
1824 assert(ctx->null_lab == NULL);
1826 if (ctx->null_cond.c == TCG_COND_NEVER) {
1828 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1830 next = tcg_temp_new();
1831 tcg_gen_mov_reg(next, dest);
1833 if (use_nullify_skip(ctx)) {
1834 tcg_gen_mov_reg(cpu_iaoq_f, next);
1835 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1836 nullify_set(ctx, 0);
1837 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1840 ctx->null_cond.c = TCG_COND_ALWAYS;
1843 ctx->iaoq_n_var = next;
1844 } else if (is_n && use_nullify_skip(ctx)) {
1845 /* The (conditional) branch, B, nullifies the next insn, N,
1846 and we're allowed to skip execution N (no single-step or
1847 tracepoint in effect). Since the goto_ptr that we must use
1848 for the indirect branch consumes no special resources, we
1849 can (conditionally) skip B and continue execution. */
1850 /* The use_nullify_skip test implies we have a known control path. */
1851 tcg_debug_assert(ctx->iaoq_b != -1);
1852 tcg_debug_assert(ctx->iaoq_n != -1);
1854 /* We do have to handle the non-local temporary, DEST, before
1855 branching. Since IOAQ_F is not really live at this point, we
1856 can simply store DEST optimistically. Similarly with IAOQ_B. */
1857 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1858 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1862 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1864 tcg_gen_lookup_and_goto_ptr();
1865 return nullify_end(ctx);
1867 c = ctx->null_cond.c;
1868 a0 = ctx->null_cond.a0;
1869 a1 = ctx->null_cond.a1;
1871 tmp = tcg_temp_new();
1872 next = tcg_temp_new();
1874 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1875 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1877 ctx->iaoq_n_var = next;
1880 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1884 /* The branch nullifies the next insn, which means the state of N
1885 after the branch is the inverse of the state of N that applied
1887 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1888 cond_free(&ctx->null_cond);
1889 ctx->null_cond = cond_make_n();
1890 ctx->psw_n_nonzero = true;
1892 cond_free(&ctx->null_cond);
1899 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1900 * IAOQ_Next{30..31} ← GR[b]{30..31};
1902 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1903 * which keeps the privilege level from being increased.
1905 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1908 switch (ctx->privilege) {
1910 /* Privilege 0 is maximum and is allowed to decrease. */
1913 /* Privilege 3 is minimum and is never allowed to increase. */
1914 dest = tcg_temp_new();
1915 tcg_gen_ori_reg(dest, offset, 3);
1918 dest = tcg_temp_new();
1919 tcg_gen_andi_reg(dest, offset, -4);
1920 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1921 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1927 #ifdef CONFIG_USER_ONLY
1928 /* On Linux, page zero is normally marked execute only + gateway.
1929 Therefore normal read or write is supposed to fail, but specific
1930 offsets have kernel code mapped to raise permissions to implement
1931 system calls. Handling this via an explicit check here, rather
1932 in than the "be disp(sr2,r0)" instruction that probably sent us
1933 here, is the easiest way to handle the branch delay slot on the
1934 aforementioned BE. */
1935 static void do_page_zero(DisasContext *ctx)
1937 /* If by some means we get here with PSW[N]=1, that implies that
1938 the B,GATE instruction would be skipped, and we'd fault on the
1939 next insn within the privileged page. */
1940 switch (ctx->null_cond.c) {
1941 case TCG_COND_NEVER:
1943 case TCG_COND_ALWAYS:
1944 tcg_gen_movi_reg(cpu_psw_n, 0);
1947 /* Since this is always the first (and only) insn within the
1948 TB, we should know the state of PSW[N] from TB->FLAGS. */
1949 g_assert_not_reached();
1952 /* Check that we didn't arrive here via some means that allowed
1953 non-sequential instruction execution. Normally the PSW[B] bit
1954 detects this by disallowing the B,GATE instruction to execute
1955 under such conditions. */
1956 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1960 switch (ctx->iaoq_f & -4) {
1961 case 0x00: /* Null pointer call */
1962 gen_excp_1(EXCP_IMP);
1963 ctx->base.is_jmp = DISAS_NORETURN;
1966 case 0xb0: /* LWS */
1967 gen_excp_1(EXCP_SYSCALL_LWS);
1968 ctx->base.is_jmp = DISAS_NORETURN;
1971 case 0xe0: /* SET_THREAD_POINTER */
1972 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1973 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
1974 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
1975 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1978 case 0x100: /* SYSCALL */
1979 gen_excp_1(EXCP_SYSCALL);
1980 ctx->base.is_jmp = DISAS_NORETURN;
1985 gen_excp_1(EXCP_ILL);
1986 ctx->base.is_jmp = DISAS_NORETURN;
1992 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1994 cond_free(&ctx->null_cond);
1998 static bool trans_break(DisasContext *ctx, arg_break *a)
2000 return gen_excp_iir(ctx, EXCP_BREAK);
2003 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2005 /* No point in nullifying the memory barrier. */
2006 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2008 cond_free(&ctx->null_cond);
2012 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2015 TCGv_reg tmp = dest_gpr(ctx, rt);
2016 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2017 save_gpr(ctx, rt, tmp);
2019 cond_free(&ctx->null_cond);
2023 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2026 unsigned rs = a->sp;
2027 TCGv_i64 t0 = tcg_temp_new_i64();
2028 TCGv_reg t1 = tcg_temp_new();
2030 load_spr(ctx, t0, rs);
2031 tcg_gen_shri_i64(t0, t0, 32);
2032 tcg_gen_trunc_i64_reg(t1, t0);
2034 save_gpr(ctx, rt, t1);
2036 cond_free(&ctx->null_cond);
2040 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2043 unsigned ctl = a->r;
2048 #ifdef TARGET_HPPA64
2050 /* MFSAR without ,W masks low 5 bits. */
2051 tmp = dest_gpr(ctx, rt);
2052 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2053 save_gpr(ctx, rt, tmp);
2057 save_gpr(ctx, rt, cpu_sar);
2059 case CR_IT: /* Interval Timer */
2060 /* FIXME: Respect PSW_S bit. */
2062 tmp = dest_gpr(ctx, rt);
2063 if (translator_io_start(&ctx->base)) {
2064 gen_helper_read_interval_timer(tmp);
2065 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2067 gen_helper_read_interval_timer(tmp);
2069 save_gpr(ctx, rt, tmp);
2070 return nullify_end(ctx);
2075 /* All other control registers are privileged. */
2076 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2080 tmp = tcg_temp_new();
2081 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2082 save_gpr(ctx, rt, tmp);
2085 cond_free(&ctx->null_cond);
2089 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2092 unsigned rs = a->sp;
2096 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2100 t64 = tcg_temp_new_i64();
2101 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2102 tcg_gen_shli_i64(t64, t64, 32);
2105 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2106 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2108 tcg_gen_mov_i64(cpu_sr[rs], t64);
2111 return nullify_end(ctx);
2114 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2116 unsigned ctl = a->t;
2120 if (ctl == CR_SAR) {
2121 reg = load_gpr(ctx, a->r);
2122 tmp = tcg_temp_new();
2123 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2124 save_or_nullify(ctx, cpu_sar, tmp);
2126 cond_free(&ctx->null_cond);
2130 /* All other control registers are privileged or read-only. */
2131 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2133 #ifndef CONFIG_USER_ONLY
2135 reg = load_gpr(ctx, a->r);
2139 gen_helper_write_interval_timer(tcg_env, reg);
2142 gen_helper_write_eirr(tcg_env, reg);
2145 gen_helper_write_eiem(tcg_env, reg);
2146 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2151 /* FIXME: Respect PSW_Q bit */
2152 /* The write advances the queue and stores to the back element. */
2153 tmp = tcg_temp_new();
2154 tcg_gen_ld_reg(tmp, tcg_env,
2155 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2156 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2157 tcg_gen_st_reg(reg, tcg_env,
2158 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2165 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2166 #ifndef CONFIG_USER_ONLY
2167 gen_helper_change_prot_id(tcg_env);
2172 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2175 return nullify_end(ctx);
2179 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2181 TCGv_reg tmp = tcg_temp_new();
2183 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2184 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2185 save_or_nullify(ctx, cpu_sar, tmp);
2187 cond_free(&ctx->null_cond);
2191 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2193 TCGv_reg dest = dest_gpr(ctx, a->t);
2195 #ifdef CONFIG_USER_ONLY
2196 /* We don't implement space registers in user mode. */
2197 tcg_gen_movi_reg(dest, 0);
2199 TCGv_i64 t0 = tcg_temp_new_i64();
2201 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2202 tcg_gen_shri_i64(t0, t0, 32);
2203 tcg_gen_trunc_i64_reg(dest, t0);
2205 save_gpr(ctx, a->t, dest);
2207 cond_free(&ctx->null_cond);
2211 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2213 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2214 #ifndef CONFIG_USER_ONLY
2219 tmp = tcg_temp_new();
2220 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2221 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2222 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2223 save_gpr(ctx, a->t, tmp);
2225 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2226 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2227 return nullify_end(ctx);
2231 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2233 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2234 #ifndef CONFIG_USER_ONLY
2239 tmp = tcg_temp_new();
2240 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2241 tcg_gen_ori_reg(tmp, tmp, a->i);
2242 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2243 save_gpr(ctx, a->t, tmp);
2245 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2246 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2247 return nullify_end(ctx);
2251 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2253 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2254 #ifndef CONFIG_USER_ONLY
2258 reg = load_gpr(ctx, a->r);
2259 tmp = tcg_temp_new();
2260 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2262 /* Exit the TB to recognize new interrupts. */
2263 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2264 return nullify_end(ctx);
2268 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2270 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2271 #ifndef CONFIG_USER_ONLY
2275 gen_helper_rfi_r(tcg_env);
2277 gen_helper_rfi(tcg_env);
2279 /* Exit the TB to recognize new interrupts. */
2280 tcg_gen_exit_tb(NULL, 0);
2281 ctx->base.is_jmp = DISAS_NORETURN;
2283 return nullify_end(ctx);
2287 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2289 return do_rfi(ctx, false);
2292 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2294 return do_rfi(ctx, true);
2297 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2299 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2300 #ifndef CONFIG_USER_ONLY
2302 gen_helper_halt(tcg_env);
2303 ctx->base.is_jmp = DISAS_NORETURN;
2304 return nullify_end(ctx);
2308 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2310 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2311 #ifndef CONFIG_USER_ONLY
2313 gen_helper_reset(tcg_env);
2314 ctx->base.is_jmp = DISAS_NORETURN;
2315 return nullify_end(ctx);
2319 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2321 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2322 #ifndef CONFIG_USER_ONLY
2324 gen_helper_getshadowregs(tcg_env);
2325 return nullify_end(ctx);
2329 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2332 TCGv_reg dest = dest_gpr(ctx, a->b);
2333 TCGv_reg src1 = load_gpr(ctx, a->b);
2334 TCGv_reg src2 = load_gpr(ctx, a->x);
2336 /* The only thing we need to do is the base register modification. */
2337 tcg_gen_add_reg(dest, src1, src2);
2338 save_gpr(ctx, a->b, dest);
2340 cond_free(&ctx->null_cond);
2344 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2347 TCGv_i32 level, want;
2352 dest = dest_gpr(ctx, a->t);
2353 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2356 level = tcg_constant_i32(a->ri);
2358 level = tcg_temp_new_i32();
2359 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2360 tcg_gen_andi_i32(level, level, 3);
2362 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2364 gen_helper_probe(dest, tcg_env, addr, level, want);
2366 save_gpr(ctx, a->t, dest);
2367 return nullify_end(ctx);
2370 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2372 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2373 #ifndef CONFIG_USER_ONLY
2379 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2380 reg = load_gpr(ctx, a->r);
2382 gen_helper_itlba(tcg_env, addr, reg);
2384 gen_helper_itlbp(tcg_env, addr, reg);
2387 /* Exit TB for TLB change if mmu is enabled. */
2388 if (ctx->tb_flags & PSW_C) {
2389 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2391 return nullify_end(ctx);
2395 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2397 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2398 #ifndef CONFIG_USER_ONLY
2404 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2406 save_gpr(ctx, a->b, ofs);
2409 gen_helper_ptlbe(tcg_env);
2411 gen_helper_ptlb(tcg_env, addr);
2414 /* Exit TB for TLB change if mmu is enabled. */
2415 if (ctx->tb_flags & PSW_C) {
2416 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2418 return nullify_end(ctx);
2423 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2425 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2426 * page 13-9 (195/206)
2428 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2430 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2431 #ifndef CONFIG_USER_ONLY
2432 TCGv_tl addr, atl, stl;
2439 * if (not (pcxl or pcxl2))
2440 * return gen_illegal(ctx);
2442 * Note for future: these are 32-bit systems; no hppa64.
2445 atl = tcg_temp_new_tl();
2446 stl = tcg_temp_new_tl();
2447 addr = tcg_temp_new_tl();
2449 tcg_gen_ld32u_i64(stl, tcg_env,
2450 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2451 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2452 tcg_gen_ld32u_i64(atl, tcg_env,
2453 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2454 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2455 tcg_gen_shli_i64(stl, stl, 32);
2456 tcg_gen_or_tl(addr, atl, stl);
2458 reg = load_gpr(ctx, a->r);
2460 gen_helper_itlba(tcg_env, addr, reg);
2462 gen_helper_itlbp(tcg_env, addr, reg);
2465 /* Exit TB for TLB change if mmu is enabled. */
2466 if (ctx->tb_flags & PSW_C) {
2467 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2469 return nullify_end(ctx);
2473 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2475 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2476 #ifndef CONFIG_USER_ONLY
2478 TCGv_reg ofs, paddr;
2482 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2484 paddr = tcg_temp_new();
2485 gen_helper_lpa(paddr, tcg_env, vaddr);
2487 /* Note that physical address result overrides base modification. */
2489 save_gpr(ctx, a->b, ofs);
2491 save_gpr(ctx, a->t, paddr);
2493 return nullify_end(ctx);
2497 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2499 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2501 /* The Coherence Index is an implementation-defined function of the
2502 physical address. Two addresses with the same CI have a coherent
2503 view of the cache. Our implementation is to return 0 for all,
2504 since the entire address space is coherent. */
2505 save_gpr(ctx, a->t, tcg_constant_reg(0));
2507 cond_free(&ctx->null_cond);
2511 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2513 return do_add_reg(ctx, a, false, false, false, false);
2516 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2518 return do_add_reg(ctx, a, true, false, false, false);
2521 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2523 return do_add_reg(ctx, a, false, true, false, false);
2526 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2528 return do_add_reg(ctx, a, false, false, false, true);
2531 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2533 return do_add_reg(ctx, a, false, true, false, true);
2536 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2538 return do_sub_reg(ctx, a, false, false, false);
2541 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2543 return do_sub_reg(ctx, a, true, false, false);
2546 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2548 return do_sub_reg(ctx, a, false, false, true);
2551 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2553 return do_sub_reg(ctx, a, true, false, true);
2556 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2558 return do_sub_reg(ctx, a, false, true, false);
2561 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2563 return do_sub_reg(ctx, a, true, true, false);
2566 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2568 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2571 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2573 return do_log_reg(ctx, a, tcg_gen_and_reg);
2576 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2579 unsigned r2 = a->r2;
2580 unsigned r1 = a->r1;
2583 if (rt == 0) { /* NOP */
2584 cond_free(&ctx->null_cond);
2587 if (r2 == 0) { /* COPY */
2589 TCGv_reg dest = dest_gpr(ctx, rt);
2590 tcg_gen_movi_reg(dest, 0);
2591 save_gpr(ctx, rt, dest);
2593 save_gpr(ctx, rt, cpu_gr[r1]);
2595 cond_free(&ctx->null_cond);
2598 #ifndef CONFIG_USER_ONLY
2599 /* These are QEMU extensions and are nops in the real architecture:
2601 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2602 * or %r31,%r31,%r31 -- death loop; offline cpu
2603 * currently implemented as idle.
2605 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2606 /* No need to check for supervisor, as userland can only pause
2607 until the next timer interrupt. */
2610 /* Advance the instruction queue. */
2611 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2612 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2613 nullify_set(ctx, 0);
2615 /* Tell the qemu main loop to halt until this cpu has work. */
2616 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2617 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2618 gen_excp_1(EXCP_HALTED);
2619 ctx->base.is_jmp = DISAS_NORETURN;
2621 return nullify_end(ctx);
2625 return do_log_reg(ctx, a, tcg_gen_or_reg);
2628 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2630 return do_log_reg(ctx, a, tcg_gen_xor_reg);
2633 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2635 TCGv_reg tcg_r1, tcg_r2;
2640 tcg_r1 = load_gpr(ctx, a->r1);
2641 tcg_r2 = load_gpr(ctx, a->r2);
2642 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2643 return nullify_end(ctx);
2646 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2648 TCGv_reg tcg_r1, tcg_r2;
2653 tcg_r1 = load_gpr(ctx, a->r1);
2654 tcg_r2 = load_gpr(ctx, a->r2);
2655 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2656 return nullify_end(ctx);
2659 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2661 TCGv_reg tcg_r1, tcg_r2, tmp;
2666 tcg_r1 = load_gpr(ctx, a->r1);
2667 tcg_r2 = load_gpr(ctx, a->r2);
2668 tmp = tcg_temp_new();
2669 tcg_gen_not_reg(tmp, tcg_r2);
2670 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2671 return nullify_end(ctx);
2674 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2676 return do_uaddcm(ctx, a, false);
2679 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2681 return do_uaddcm(ctx, a, true);
2684 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2690 tmp = tcg_temp_new();
2691 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2693 tcg_gen_not_reg(tmp, tmp);
2695 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2696 tcg_gen_muli_reg(tmp, tmp, 6);
2697 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2698 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2699 return nullify_end(ctx);
2702 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2704 return do_dcor(ctx, a, false);
2707 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2709 return do_dcor(ctx, a, true);
2712 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2714 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2718 in1 = load_gpr(ctx, a->r1);
2719 in2 = load_gpr(ctx, a->r2);
2721 add1 = tcg_temp_new();
2722 add2 = tcg_temp_new();
2723 addc = tcg_temp_new();
2724 dest = tcg_temp_new();
2725 zero = tcg_constant_reg(0);
2727 /* Form R1 << 1 | PSW[CB]{8}. */
2728 tcg_gen_add_reg(add1, in1, in1);
2729 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2731 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2732 carry{8} requires that we subtract via + ~R2 + 1, as described in
2733 the manual. By extracting and masking V, we can produce the
2734 proper inputs to the addition without movcond. */
2735 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2736 tcg_gen_xor_reg(add2, in2, addc);
2737 tcg_gen_andi_reg(addc, addc, 1);
2738 /* ??? This is only correct for 32-bit. */
2739 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2740 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2742 /* Write back the result register. */
2743 save_gpr(ctx, a->t, dest);
2745 /* Write back PSW[CB]. */
2746 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2747 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2749 /* Write back PSW[V] for the division step. */
2750 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2751 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2753 /* Install the new nullification. */
2756 if (cond_need_sv(a->cf >> 1)) {
2757 /* ??? The lshift is supposed to contribute to overflow. */
2758 sv = do_add_sv(ctx, dest, add1, add2);
2760 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2763 return nullify_end(ctx);
2766 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2768 return do_add_imm(ctx, a, false, false);
2771 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2773 return do_add_imm(ctx, a, true, false);
2776 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2778 return do_add_imm(ctx, a, false, true);
2781 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2783 return do_add_imm(ctx, a, true, true);
2786 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2788 return do_sub_imm(ctx, a, false);
2791 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2793 return do_sub_imm(ctx, a, true);
2796 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2798 TCGv_reg tcg_im, tcg_r2;
2804 tcg_im = tcg_constant_reg(a->i);
2805 tcg_r2 = load_gpr(ctx, a->r);
2806 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2808 return nullify_end(ctx);
2811 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2813 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2814 return gen_illegal(ctx);
2816 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2817 a->disp, a->sp, a->m, a->size | MO_TE);
2821 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2823 assert(a->x == 0 && a->scale == 0);
2824 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2825 return gen_illegal(ctx);
2827 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2831 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2833 MemOp mop = MO_TE | MO_ALIGN | a->size;
2834 TCGv_reg zero, dest, ofs;
2840 /* Base register modification. Make sure if RT == RB,
2841 we see the result of the load. */
2842 dest = tcg_temp_new();
2844 dest = dest_gpr(ctx, a->t);
2847 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2848 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2851 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2852 * However actual hardware succeeds with aligned mod 4.
2853 * Detect this case and log a GUEST_ERROR.
2855 * TODO: HPPA64 relaxes the over-alignment requirement
2856 * with the ,co completer.
2858 gen_helper_ldc_check(addr);
2860 zero = tcg_constant_reg(0);
2861 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2864 save_gpr(ctx, a->b, ofs);
2866 save_gpr(ctx, a->t, dest);
2868 return nullify_end(ctx);
2871 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2878 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2879 ctx->mmu_idx == MMU_PHYS_IDX);
2880 val = load_gpr(ctx, a->r);
2882 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2883 gen_helper_stby_e_parallel(tcg_env, addr, val);
2885 gen_helper_stby_e(tcg_env, addr, val);
2888 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2889 gen_helper_stby_b_parallel(tcg_env, addr, val);
2891 gen_helper_stby_b(tcg_env, addr, val);
2895 tcg_gen_andi_reg(ofs, ofs, ~3);
2896 save_gpr(ctx, a->b, ofs);
2899 return nullify_end(ctx);
2902 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2904 int hold_mmu_idx = ctx->mmu_idx;
2906 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2907 ctx->mmu_idx = MMU_PHYS_IDX;
2909 ctx->mmu_idx = hold_mmu_idx;
2913 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2915 int hold_mmu_idx = ctx->mmu_idx;
2917 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2918 ctx->mmu_idx = MMU_PHYS_IDX;
2920 ctx->mmu_idx = hold_mmu_idx;
2924 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2926 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2928 tcg_gen_movi_reg(tcg_rt, a->i);
2929 save_gpr(ctx, a->t, tcg_rt);
2930 cond_free(&ctx->null_cond);
2934 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2936 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2937 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2939 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
2940 save_gpr(ctx, 1, tcg_r1);
2941 cond_free(&ctx->null_cond);
2945 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2947 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2949 /* Special case rb == 0, for the LDI pseudo-op.
2950 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2952 tcg_gen_movi_reg(tcg_rt, a->i);
2954 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
2956 save_gpr(ctx, a->t, tcg_rt);
2957 cond_free(&ctx->null_cond);
2961 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2962 unsigned c, unsigned f, unsigned n, int disp)
2964 TCGv_reg dest, in2, sv;
2967 in2 = load_gpr(ctx, r);
2968 dest = tcg_temp_new();
2970 tcg_gen_sub_reg(dest, in1, in2);
2973 if (cond_need_sv(c)) {
2974 sv = do_sub_sv(ctx, dest, in1, in2);
2977 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
2978 return do_cbranch(ctx, disp, n, &cond);
2981 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
2984 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
2987 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
2990 return do_cmpb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
2993 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2994 unsigned c, unsigned f, unsigned n, int disp)
2996 TCGv_reg dest, in2, sv, cb_msb;
2999 in2 = load_gpr(ctx, r);
3000 dest = tcg_temp_new();
3004 if (cond_need_cb(c)) {
3005 cb_msb = tcg_temp_new();
3006 tcg_gen_movi_reg(cb_msb, 0);
3007 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3009 tcg_gen_add_reg(dest, in1, in2);
3011 if (cond_need_sv(c)) {
3012 sv = do_add_sv(ctx, dest, in1, in2);
3015 cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3016 save_gpr(ctx, r, dest);
3017 return do_cbranch(ctx, disp, n, &cond);
3020 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3023 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3026 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3029 return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3032 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3034 TCGv_reg tmp, tcg_r;
3039 tmp = tcg_temp_new();
3040 tcg_r = load_gpr(ctx, a->r);
3041 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3043 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3044 return do_cbranch(ctx, a->disp, a->n, &cond);
3047 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3049 TCGv_reg tmp, tcg_r;
3054 tmp = tcg_temp_new();
3055 tcg_r = load_gpr(ctx, a->r);
3056 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3058 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3059 return do_cbranch(ctx, a->disp, a->n, &cond);
3062 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3069 dest = dest_gpr(ctx, a->r2);
3071 tcg_gen_movi_reg(dest, 0);
3073 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3076 cond = do_sed_cond(a->c, dest);
3077 return do_cbranch(ctx, a->disp, a->n, &cond);
3080 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3087 dest = dest_gpr(ctx, a->r);
3088 tcg_gen_movi_reg(dest, a->i);
3090 cond = do_sed_cond(a->c, dest);
3091 return do_cbranch(ctx, a->disp, a->n, &cond);
3094 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3102 dest = dest_gpr(ctx, a->t);
3104 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3105 tcg_gen_shr_reg(dest, dest, cpu_sar);
3106 } else if (a->r1 == a->r2) {
3107 TCGv_i32 t32 = tcg_temp_new_i32();
3108 TCGv_i32 s32 = tcg_temp_new_i32();
3110 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3111 tcg_gen_trunc_reg_i32(s32, cpu_sar);
3112 tcg_gen_rotr_i32(t32, t32, s32);
3113 tcg_gen_extu_i32_reg(dest, t32);
3115 TCGv_i64 t = tcg_temp_new_i64();
3116 TCGv_i64 s = tcg_temp_new_i64();
3118 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3119 tcg_gen_extu_reg_i64(s, cpu_sar);
3120 tcg_gen_shr_i64(t, t, s);
3121 tcg_gen_trunc_i64_reg(dest, t);
3123 save_gpr(ctx, a->t, dest);
3125 /* Install the new nullification. */
3126 cond_free(&ctx->null_cond);
3128 ctx->null_cond = do_sed_cond(a->c, dest);
3130 return nullify_end(ctx);
3133 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3135 unsigned sa = 31 - a->cpos;
3142 dest = dest_gpr(ctx, a->t);
3143 t2 = load_gpr(ctx, a->r2);
3145 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3146 } else if (TARGET_REGISTER_BITS == 32) {
3147 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3148 } else if (a->r1 == a->r2) {
3149 TCGv_i32 t32 = tcg_temp_new_i32();
3150 tcg_gen_trunc_reg_i32(t32, t2);
3151 tcg_gen_rotri_i32(t32, t32, sa);
3152 tcg_gen_extu_i32_reg(dest, t32);
3154 TCGv_i64 t64 = tcg_temp_new_i64();
3155 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3156 tcg_gen_shri_i64(t64, t64, sa);
3157 tcg_gen_trunc_i64_reg(dest, t64);
3159 save_gpr(ctx, a->t, dest);
3161 /* Install the new nullification. */
3162 cond_free(&ctx->null_cond);
3164 ctx->null_cond = do_sed_cond(a->c, dest);
3166 return nullify_end(ctx);
3169 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3171 unsigned len = 32 - a->clen;
3172 TCGv_reg dest, src, tmp;
3178 dest = dest_gpr(ctx, a->t);
3179 src = load_gpr(ctx, a->r);
3180 tmp = tcg_temp_new();
3182 /* Recall that SAR is using big-endian bit numbering. */
3183 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3185 tcg_gen_sar_reg(dest, src, tmp);
3186 tcg_gen_sextract_reg(dest, dest, 0, len);
3188 tcg_gen_shr_reg(dest, src, tmp);
3189 tcg_gen_extract_reg(dest, dest, 0, len);
3191 save_gpr(ctx, a->t, dest);
3193 /* Install the new nullification. */
3194 cond_free(&ctx->null_cond);
3196 ctx->null_cond = do_sed_cond(a->c, dest);
3198 return nullify_end(ctx);
3201 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3203 unsigned len = 32 - a->clen;
3204 unsigned cpos = 31 - a->pos;
3211 dest = dest_gpr(ctx, a->t);
3212 src = load_gpr(ctx, a->r);
3214 tcg_gen_sextract_reg(dest, src, cpos, len);
3216 tcg_gen_extract_reg(dest, src, cpos, len);
3218 save_gpr(ctx, a->t, dest);
3220 /* Install the new nullification. */
3221 cond_free(&ctx->null_cond);
3223 ctx->null_cond = do_sed_cond(a->c, dest);
3225 return nullify_end(ctx);
3228 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3230 unsigned len = 32 - a->clen;
3231 target_sreg mask0, mask1;
3237 if (a->cpos + len > 32) {
3241 dest = dest_gpr(ctx, a->t);
3242 mask0 = deposit64(0, a->cpos, len, a->i);
3243 mask1 = deposit64(-1, a->cpos, len, a->i);
3246 TCGv_reg src = load_gpr(ctx, a->t);
3248 tcg_gen_andi_reg(dest, src, mask1);
3251 tcg_gen_ori_reg(dest, src, mask0);
3253 tcg_gen_movi_reg(dest, mask0);
3255 save_gpr(ctx, a->t, dest);
3257 /* Install the new nullification. */
3258 cond_free(&ctx->null_cond);
3260 ctx->null_cond = do_sed_cond(a->c, dest);
3262 return nullify_end(ctx);
3265 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3267 unsigned rs = a->nz ? a->t : 0;
3268 unsigned len = 32 - a->clen;
3274 if (a->cpos + len > 32) {
3278 dest = dest_gpr(ctx, a->t);
3279 val = load_gpr(ctx, a->r);
3281 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3283 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3285 save_gpr(ctx, a->t, dest);
3287 /* Install the new nullification. */
3288 cond_free(&ctx->null_cond);
3290 ctx->null_cond = do_sed_cond(a->c, dest);
3292 return nullify_end(ctx);
3295 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3296 unsigned nz, unsigned clen, TCGv_reg val)
3298 unsigned rs = nz ? rt : 0;
3299 unsigned len = 32 - clen;
3300 TCGv_reg mask, tmp, shift, dest;
3301 unsigned msb = 1U << (len - 1);
3303 dest = dest_gpr(ctx, rt);
3304 shift = tcg_temp_new();
3305 tmp = tcg_temp_new();
3307 /* Convert big-endian bit numbering in SAR to left-shift. */
3308 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3310 mask = tcg_temp_new();
3311 tcg_gen_movi_reg(mask, msb + (msb - 1));
3312 tcg_gen_and_reg(tmp, val, mask);
3314 tcg_gen_shl_reg(mask, mask, shift);
3315 tcg_gen_shl_reg(tmp, tmp, shift);
3316 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3317 tcg_gen_or_reg(dest, dest, tmp);
3319 tcg_gen_shl_reg(dest, tmp, shift);
3321 save_gpr(ctx, rt, dest);
3323 /* Install the new nullification. */
3324 cond_free(&ctx->null_cond);
3326 ctx->null_cond = do_sed_cond(c, dest);
3328 return nullify_end(ctx);
3331 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3336 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3339 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3344 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, tcg_constant_reg(a->i));
3347 static bool trans_be(DisasContext *ctx, arg_be *a)
3351 #ifdef CONFIG_USER_ONLY
3352 /* ??? It seems like there should be a good way of using
3353 "be disp(sr2, r0)", the canonical gateway entry mechanism
3354 to our advantage. But that appears to be inconvenient to
3355 manage along side branch delay slots. Therefore we handle
3356 entry into the gateway page via absolute address. */
3357 /* Since we don't implement spaces, just branch. Do notice the special
3358 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3359 goto_tb to the TB containing the syscall. */
3361 return do_dbranch(ctx, a->disp, a->l, a->n);
3367 tmp = tcg_temp_new();
3368 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3369 tmp = do_ibranch_priv(ctx, tmp);
3371 #ifdef CONFIG_USER_ONLY
3372 return do_ibranch(ctx, tmp, a->l, a->n);
3374 TCGv_i64 new_spc = tcg_temp_new_i64();
3376 load_spr(ctx, new_spc, a->sp);
3378 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3379 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3381 if (a->n && use_nullify_skip(ctx)) {
3382 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3383 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3384 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3385 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3387 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3388 if (ctx->iaoq_b == -1) {
3389 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3391 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3392 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3393 nullify_set(ctx, a->n);
3395 tcg_gen_lookup_and_goto_ptr();
3396 ctx->base.is_jmp = DISAS_NORETURN;
3397 return nullify_end(ctx);
3401 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3403 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3406 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3408 target_ureg dest = iaoq_dest(ctx, a->disp);
3412 /* Make sure the caller hasn't done something weird with the queue.
3413 * ??? This is not quite the same as the PSW[B] bit, which would be
3414 * expensive to track. Real hardware will trap for
3416 * b gateway+4 (in delay slot of first branch)
3417 * However, checking for a non-sequential instruction queue *will*
3418 * diagnose the security hole
3421 * in which instructions at evil would run with increased privs.
3423 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3424 return gen_illegal(ctx);
3427 #ifndef CONFIG_USER_ONLY
3428 if (ctx->tb_flags & PSW_C) {
3429 CPUHPPAState *env = cpu_env(ctx->cs);
3430 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3431 /* If we could not find a TLB entry, then we need to generate an
3432 ITLB miss exception so the kernel will provide it.
3433 The resulting TLB fill operation will invalidate this TB and
3434 we will re-translate, at which point we *will* be able to find
3435 the TLB entry and determine if this is in fact a gateway page. */
3437 gen_excp(ctx, EXCP_ITLB_MISS);
3440 /* No change for non-gateway pages or for priv decrease. */
3441 if (type >= 4 && type - 4 < ctx->privilege) {
3442 dest = deposit32(dest, 0, 2, type - 4);
3445 dest &= -4; /* priv = 0 */
3450 TCGv_reg tmp = dest_gpr(ctx, a->l);
3451 if (ctx->privilege < 3) {
3452 tcg_gen_andi_reg(tmp, tmp, -4);
3454 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3455 save_gpr(ctx, a->l, tmp);
3458 return do_dbranch(ctx, dest, 0, a->n);
3461 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3464 TCGv_reg tmp = tcg_temp_new();
3465 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3466 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3467 /* The computation here never changes privilege level. */
3468 return do_ibranch(ctx, tmp, a->l, a->n);
3470 /* BLR R0,RX is a good way to load PC+8 into RX. */
3471 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3475 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3480 dest = load_gpr(ctx, a->b);
3482 dest = tcg_temp_new();
3483 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3484 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3486 dest = do_ibranch_priv(ctx, dest);
3487 return do_ibranch(ctx, dest, 0, a->n);
3490 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3494 #ifdef CONFIG_USER_ONLY
3495 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3496 return do_ibranch(ctx, dest, a->l, a->n);
3499 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3501 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3502 if (ctx->iaoq_b == -1) {
3503 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3505 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3506 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3508 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3510 nullify_set(ctx, a->n);
3511 tcg_gen_lookup_and_goto_ptr();
3512 ctx->base.is_jmp = DISAS_NORETURN;
3513 return nullify_end(ctx);
3521 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3523 tcg_gen_mov_i32(dst, src);
3526 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3530 if (TARGET_REGISTER_BITS == 64) {
3531 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3533 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3537 save_frd(0, tcg_constant_i64(ret));
3538 return nullify_end(ctx);
3541 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3543 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3546 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3548 tcg_gen_mov_i64(dst, src);
3551 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3553 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3556 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3558 tcg_gen_andi_i32(dst, src, INT32_MAX);
3561 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3563 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3566 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3568 tcg_gen_andi_i64(dst, src, INT64_MAX);
3571 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3573 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3576 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3578 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3581 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3583 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3586 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3588 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3591 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3593 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3596 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3598 tcg_gen_xori_i32(dst, src, INT32_MIN);
3601 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3603 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3606 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3608 tcg_gen_xori_i64(dst, src, INT64_MIN);
3611 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3613 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3616 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3618 tcg_gen_ori_i32(dst, src, INT32_MIN);
3621 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3623 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3626 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3628 tcg_gen_ori_i64(dst, src, INT64_MIN);
3631 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3633 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3640 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3642 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3645 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3647 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3650 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3652 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3655 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3657 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3660 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3662 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3665 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3667 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3670 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3672 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3675 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3677 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3680 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3682 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3685 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3687 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3690 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3692 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3695 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3697 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3700 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3702 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3705 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3707 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3710 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3712 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3715 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3717 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3720 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3722 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3725 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3727 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3730 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3732 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3735 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3737 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3740 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3742 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3745 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3747 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3750 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3752 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3755 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3757 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3760 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3762 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3765 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3767 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3774 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3776 TCGv_i32 ta, tb, tc, ty;
3780 ta = load_frw0_i32(a->r1);
3781 tb = load_frw0_i32(a->r2);
3782 ty = tcg_constant_i32(a->y);
3783 tc = tcg_constant_i32(a->c);
3785 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
3787 return nullify_end(ctx);
3790 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3797 ta = load_frd0(a->r1);
3798 tb = load_frd0(a->r2);
3799 ty = tcg_constant_i32(a->y);
3800 tc = tcg_constant_i32(a->c);
3802 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
3804 return nullify_end(ctx);
3807 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3814 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
3821 case 0: /* simple */
3822 tcg_gen_andi_reg(t, t, 0x4000000);
3823 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3851 TCGv_reg c = tcg_constant_reg(mask);
3852 tcg_gen_or_reg(t, t, c);
3853 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3855 tcg_gen_andi_reg(t, t, mask);
3856 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3859 unsigned cbit = (a->y ^ 1) - 1;
3861 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3862 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3866 return nullify_end(ctx);
3873 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3875 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3878 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3880 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3883 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3885 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3888 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3890 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3893 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3895 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3898 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3900 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3903 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3905 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3908 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3910 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3913 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3919 x = load_frw0_i64(a->r1);
3920 y = load_frw0_i64(a->r2);
3921 tcg_gen_mul_i64(x, x, y);
3924 return nullify_end(ctx);
3927 /* Convert the fmpyadd single-precision register encodings to standard. */
3928 static inline int fmpyadd_s_reg(unsigned r)
3930 return (r & 16) * 2 + 16 + (r & 15);
3933 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3935 int tm = fmpyadd_s_reg(a->tm);
3936 int ra = fmpyadd_s_reg(a->ra);
3937 int ta = fmpyadd_s_reg(a->ta);
3938 int rm2 = fmpyadd_s_reg(a->rm2);
3939 int rm1 = fmpyadd_s_reg(a->rm1);
3943 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3944 do_fop_weww(ctx, ta, ta, ra,
3945 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3947 return nullify_end(ctx);
3950 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3952 return do_fmpyadd_s(ctx, a, false);
3955 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
3957 return do_fmpyadd_s(ctx, a, true);
3960 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3964 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
3965 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
3966 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3968 return nullify_end(ctx);
3971 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
3973 return do_fmpyadd_d(ctx, a, false);
3976 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
3978 return do_fmpyadd_d(ctx, a, true);
3981 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
3986 x = load_frw0_i32(a->rm1);
3987 y = load_frw0_i32(a->rm2);
3988 z = load_frw0_i32(a->ra3);
3991 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
3993 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
3996 save_frw_i32(a->t, x);
3997 return nullify_end(ctx);
4000 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4005 x = load_frd0(a->rm1);
4006 y = load_frd0(a->rm2);
4007 z = load_frd0(a->ra3);
4010 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4012 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4016 return nullify_end(ctx);
4019 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4021 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4022 #ifndef CONFIG_USER_ONLY
4023 if (a->i == 0x100) {
4024 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4026 gen_helper_diag_btlb(tcg_env);
4027 return nullify_end(ctx);
4030 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4034 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4036 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4040 ctx->tb_flags = ctx->base.tb->flags;
4042 #ifdef CONFIG_USER_ONLY
4043 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4044 ctx->mmu_idx = MMU_USER_IDX;
4045 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4046 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4047 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4049 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4050 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4051 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4054 /* Recover the IAOQ values from the GVA + PRIV. */
4055 uint64_t cs_base = ctx->base.tb->cs_base;
4056 uint64_t iasq_f = cs_base & ~0xffffffffull;
4057 int32_t diff = cs_base;
4059 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4060 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4063 ctx->iaoq_n_var = NULL;
4065 /* Bound the number of instructions by those left on the page. */
4066 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4067 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4070 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4072 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4074 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4075 ctx->null_cond = cond_make_f();
4076 ctx->psw_n_nonzero = false;
4077 if (ctx->tb_flags & PSW_N) {
4078 ctx->null_cond.c = TCG_COND_ALWAYS;
4079 ctx->psw_n_nonzero = true;
4081 ctx->null_lab = NULL;
4084 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4086 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4088 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4091 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4093 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4094 CPUHPPAState *env = cpu_env(cs);
4097 /* Execute one insn. */
4098 #ifdef CONFIG_USER_ONLY
4099 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4101 ret = ctx->base.is_jmp;
4102 assert(ret != DISAS_NEXT);
4106 /* Always fetch the insn, even if nullified, so that we check
4107 the page permissions for execute. */
4108 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4110 /* Set up the IA queue for the next insn.
4111 This will be overwritten by a branch. */
4112 if (ctx->iaoq_b == -1) {
4114 ctx->iaoq_n_var = tcg_temp_new();
4115 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4117 ctx->iaoq_n = ctx->iaoq_b + 4;
4118 ctx->iaoq_n_var = NULL;
4121 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4122 ctx->null_cond.c = TCG_COND_NEVER;
4126 if (!decode(ctx, insn)) {
4129 ret = ctx->base.is_jmp;
4130 assert(ctx->null_lab == NULL);
4134 /* Advance the insn queue. Note that this check also detects
4135 a priority change within the instruction queue. */
4136 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4137 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4138 && use_goto_tb(ctx, ctx->iaoq_b)
4139 && (ctx->null_cond.c == TCG_COND_NEVER
4140 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4141 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4142 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4143 ctx->base.is_jmp = ret = DISAS_NORETURN;
4145 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4148 ctx->iaoq_f = ctx->iaoq_b;
4149 ctx->iaoq_b = ctx->iaoq_n;
4150 ctx->base.pc_next += 4;
4153 case DISAS_NORETURN:
4154 case DISAS_IAQ_N_UPDATED:
4158 case DISAS_IAQ_N_STALE:
4159 case DISAS_IAQ_N_STALE_EXIT:
4160 if (ctx->iaoq_f == -1) {
4161 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4162 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4163 #ifndef CONFIG_USER_ONLY
4164 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4167 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4169 : DISAS_IAQ_N_UPDATED);
4170 } else if (ctx->iaoq_b == -1) {
4171 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4176 g_assert_not_reached();
4180 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4182 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4183 DisasJumpType is_jmp = ctx->base.is_jmp;
4186 case DISAS_NORETURN:
4188 case DISAS_TOO_MANY:
4189 case DISAS_IAQ_N_STALE:
4190 case DISAS_IAQ_N_STALE_EXIT:
4191 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4192 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4195 case DISAS_IAQ_N_UPDATED:
4196 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4197 tcg_gen_lookup_and_goto_ptr();
4202 tcg_gen_exit_tb(NULL, 0);
4205 g_assert_not_reached();
4209 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4210 CPUState *cs, FILE *logfile)
4212 target_ulong pc = dcbase->pc_first;
4214 #ifdef CONFIG_USER_ONLY
4217 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4220 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4223 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4226 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4231 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4232 target_disas(logfile, cs, pc, dcbase->tb->size);
4235 static const TranslatorOps hppa_tr_ops = {
4236 .init_disas_context = hppa_tr_init_disas_context,
4237 .tb_start = hppa_tr_tb_start,
4238 .insn_start = hppa_tr_insn_start,
4239 .translate_insn = hppa_tr_translate_insn,
4240 .tb_stop = hppa_tr_tb_stop,
4241 .disas_log = hppa_tr_disas_log,
4244 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4245 target_ulong pc, void *host_pc)
4248 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);