2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
36 /* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
41 #undef tcg_global_mem_new
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #if TARGET_REGISTER_BITS == 64
47 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
49 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
52 #define TCGv_tl TCGv_i32
53 #define tcg_temp_new_tl tcg_temp_new_i32
54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg TCGv_i64
60 #define tcg_temp_new tcg_temp_new_i64
61 #define tcg_global_mem_new tcg_global_mem_new_i64
63 #define tcg_gen_movi_reg tcg_gen_movi_i64
64 #define tcg_gen_mov_reg tcg_gen_mov_i64
65 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71 #define tcg_gen_ld_reg tcg_gen_ld_i64
72 #define tcg_gen_st8_reg tcg_gen_st8_i64
73 #define tcg_gen_st16_reg tcg_gen_st16_i64
74 #define tcg_gen_st32_reg tcg_gen_st32_i64
75 #define tcg_gen_st_reg tcg_gen_st_i64
76 #define tcg_gen_add_reg tcg_gen_add_i64
77 #define tcg_gen_addi_reg tcg_gen_addi_i64
78 #define tcg_gen_sub_reg tcg_gen_sub_i64
79 #define tcg_gen_neg_reg tcg_gen_neg_i64
80 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
81 #define tcg_gen_subi_reg tcg_gen_subi_i64
82 #define tcg_gen_and_reg tcg_gen_and_i64
83 #define tcg_gen_andi_reg tcg_gen_andi_i64
84 #define tcg_gen_or_reg tcg_gen_or_i64
85 #define tcg_gen_ori_reg tcg_gen_ori_i64
86 #define tcg_gen_xor_reg tcg_gen_xor_i64
87 #define tcg_gen_xori_reg tcg_gen_xori_i64
88 #define tcg_gen_not_reg tcg_gen_not_i64
89 #define tcg_gen_shl_reg tcg_gen_shl_i64
90 #define tcg_gen_shli_reg tcg_gen_shli_i64
91 #define tcg_gen_shr_reg tcg_gen_shr_i64
92 #define tcg_gen_shri_reg tcg_gen_shri_i64
93 #define tcg_gen_sar_reg tcg_gen_sar_i64
94 #define tcg_gen_sari_reg tcg_gen_sari_i64
95 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
96 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99 #define tcg_gen_mul_reg tcg_gen_mul_i64
100 #define tcg_gen_muli_reg tcg_gen_muli_i64
101 #define tcg_gen_div_reg tcg_gen_div_i64
102 #define tcg_gen_rem_reg tcg_gen_rem_i64
103 #define tcg_gen_divu_reg tcg_gen_divu_i64
104 #define tcg_gen_remu_reg tcg_gen_remu_i64
105 #define tcg_gen_discard_reg tcg_gen_discard_i64
106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122 #define tcg_gen_andc_reg tcg_gen_andc_i64
123 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
124 #define tcg_gen_nand_reg tcg_gen_nand_i64
125 #define tcg_gen_nor_reg tcg_gen_nor_i64
126 #define tcg_gen_orc_reg tcg_gen_orc_i64
127 #define tcg_gen_clz_reg tcg_gen_clz_i64
128 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
129 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
130 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
134 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
135 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
136 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
137 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139 #define tcg_gen_extract_reg tcg_gen_extract_i64
140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
142 #define tcg_constant_reg tcg_constant_i64
143 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
144 #define tcg_gen_add2_reg tcg_gen_add2_i64
145 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
146 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
149 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
151 #define TCGv_reg TCGv_i32
152 #define tcg_temp_new tcg_temp_new_i32
153 #define tcg_global_mem_new tcg_global_mem_new_i32
155 #define tcg_gen_movi_reg tcg_gen_movi_i32
156 #define tcg_gen_mov_reg tcg_gen_mov_i32
157 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
162 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
163 #define tcg_gen_ld_reg tcg_gen_ld_i32
164 #define tcg_gen_st8_reg tcg_gen_st8_i32
165 #define tcg_gen_st16_reg tcg_gen_st16_i32
166 #define tcg_gen_st32_reg tcg_gen_st32_i32
167 #define tcg_gen_st_reg tcg_gen_st_i32
168 #define tcg_gen_add_reg tcg_gen_add_i32
169 #define tcg_gen_addi_reg tcg_gen_addi_i32
170 #define tcg_gen_sub_reg tcg_gen_sub_i32
171 #define tcg_gen_neg_reg tcg_gen_neg_i32
172 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
173 #define tcg_gen_subi_reg tcg_gen_subi_i32
174 #define tcg_gen_and_reg tcg_gen_and_i32
175 #define tcg_gen_andi_reg tcg_gen_andi_i32
176 #define tcg_gen_or_reg tcg_gen_or_i32
177 #define tcg_gen_ori_reg tcg_gen_ori_i32
178 #define tcg_gen_xor_reg tcg_gen_xor_i32
179 #define tcg_gen_xori_reg tcg_gen_xori_i32
180 #define tcg_gen_not_reg tcg_gen_not_i32
181 #define tcg_gen_shl_reg tcg_gen_shl_i32
182 #define tcg_gen_shli_reg tcg_gen_shli_i32
183 #define tcg_gen_shr_reg tcg_gen_shr_i32
184 #define tcg_gen_shri_reg tcg_gen_shri_i32
185 #define tcg_gen_sar_reg tcg_gen_sar_i32
186 #define tcg_gen_sari_reg tcg_gen_sari_i32
187 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
188 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
190 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191 #define tcg_gen_mul_reg tcg_gen_mul_i32
192 #define tcg_gen_muli_reg tcg_gen_muli_i32
193 #define tcg_gen_div_reg tcg_gen_div_i32
194 #define tcg_gen_rem_reg tcg_gen_rem_i32
195 #define tcg_gen_divu_reg tcg_gen_divu_i32
196 #define tcg_gen_remu_reg tcg_gen_remu_i32
197 #define tcg_gen_discard_reg tcg_gen_discard_i32
198 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
209 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
210 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213 #define tcg_gen_andc_reg tcg_gen_andc_i32
214 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
215 #define tcg_gen_nand_reg tcg_gen_nand_i32
216 #define tcg_gen_nor_reg tcg_gen_nor_i32
217 #define tcg_gen_orc_reg tcg_gen_orc_i32
218 #define tcg_gen_clz_reg tcg_gen_clz_i32
219 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
220 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
221 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
225 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
226 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
227 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
228 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
229 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230 #define tcg_gen_extract_reg tcg_gen_extract_i32
231 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
232 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
233 #define tcg_constant_reg tcg_constant_i32
234 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
235 #define tcg_gen_add2_reg tcg_gen_add2_i32
236 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
237 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
240 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
241 #endif /* TARGET_REGISTER_BITS */
243 typedef struct DisasCond {
248 typedef struct DisasContext {
249 DisasContextBase base;
266 #ifdef CONFIG_USER_ONLY
271 #ifdef CONFIG_USER_ONLY
272 #define UNALIGN(C) (C)->unalign
274 #define UNALIGN(C) MO_ALIGN
277 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
278 static int expand_sm_imm(DisasContext *ctx, int val)
280 if (val & PSW_SM_E) {
281 val = (val & ~PSW_SM_E) | PSW_E;
283 if (val & PSW_SM_W) {
284 val = (val & ~PSW_SM_W) | PSW_W;
289 /* Inverted space register indicates 0 means sr0 not inferred from base. */
290 static int expand_sr3x(DisasContext *ctx, int val)
295 /* Convert the M:A bits within a memory insn to the tri-state value
296 we use for the final M. */
297 static int ma_to_m(DisasContext *ctx, int val)
299 return val & 2 ? (val & 1 ? -1 : 1) : 0;
302 /* Convert the sign of the displacement to a pre or post-modify. */
303 static int pos_to_m(DisasContext *ctx, int val)
308 static int neg_to_m(DisasContext *ctx, int val)
313 /* Used for branch targets and fp memory ops. */
314 static int expand_shl2(DisasContext *ctx, int val)
319 /* Used for fp memory ops. */
320 static int expand_shl3(DisasContext *ctx, int val)
325 /* Used for assemble_21. */
326 static int expand_shl11(DisasContext *ctx, int val)
332 /* Include the auto-generated decoder. */
333 #include "decode-insns.c.inc"
335 /* We are not using a goto_tb (for whatever reason), but have updated
336 the iaq (for whatever reason), so don't do it again on exit. */
337 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
339 /* We are exiting the TB, but have neither emitted a goto_tb, nor
340 updated the iaq for the next instruction to be executed. */
341 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
343 /* Similarly, but we want to return to the main loop immediately
344 to recognize unmasked interrupts. */
345 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
346 #define DISAS_EXIT DISAS_TARGET_3
348 /* global register indexes */
349 static TCGv_reg cpu_gr[32];
350 static TCGv_i64 cpu_sr[4];
351 static TCGv_i64 cpu_srH;
352 static TCGv_reg cpu_iaoq_f;
353 static TCGv_reg cpu_iaoq_b;
354 static TCGv_i64 cpu_iasq_f;
355 static TCGv_i64 cpu_iasq_b;
356 static TCGv_reg cpu_sar;
357 static TCGv_reg cpu_psw_n;
358 static TCGv_reg cpu_psw_v;
359 static TCGv_reg cpu_psw_cb;
360 static TCGv_reg cpu_psw_cb_msb;
362 void hppa_translate_init(void)
364 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
366 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
367 static const GlobalVar vars[] = {
368 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
379 /* Use the symbolic register names that match the disassembler. */
380 static const char gr_names[32][4] = {
381 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
382 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
383 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
384 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
386 /* SR[4-7] are not global registers so that we can index them. */
387 static const char sr_names[5][4] = {
388 "sr0", "sr1", "sr2", "sr3", "srH"
394 for (i = 1; i < 32; i++) {
395 cpu_gr[i] = tcg_global_mem_new(tcg_env,
396 offsetof(CPUHPPAState, gr[i]),
399 for (i = 0; i < 4; i++) {
400 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
401 offsetof(CPUHPPAState, sr[i]),
404 cpu_srH = tcg_global_mem_new_i64(tcg_env,
405 offsetof(CPUHPPAState, sr[4]),
408 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
409 const GlobalVar *v = &vars[i];
410 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
413 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
414 offsetof(CPUHPPAState, iasq_f),
416 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
417 offsetof(CPUHPPAState, iasq_b),
421 static DisasCond cond_make_f(void)
430 static DisasCond cond_make_t(void)
433 .c = TCG_COND_ALWAYS,
439 static DisasCond cond_make_n(void)
444 .a1 = tcg_constant_reg(0)
448 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
450 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
452 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
456 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
458 TCGv_reg tmp = tcg_temp_new();
459 tcg_gen_mov_reg(tmp, a0);
460 return cond_make_0_tmp(c, tmp);
463 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
465 DisasCond r = { .c = c };
467 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
468 r.a0 = tcg_temp_new();
469 tcg_gen_mov_reg(r.a0, a0);
470 r.a1 = tcg_temp_new();
471 tcg_gen_mov_reg(r.a1, a1);
476 static void cond_free(DisasCond *cond)
483 case TCG_COND_ALWAYS:
484 cond->c = TCG_COND_NEVER;
491 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
493 TCGv_reg t = tcg_temp_new();
494 tcg_gen_movi_reg(t, v);
498 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
501 TCGv_reg t = tcg_temp_new();
502 tcg_gen_movi_reg(t, 0);
509 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
511 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
512 return tcg_temp_new();
518 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
520 if (ctx->null_cond.c != TCG_COND_NEVER) {
521 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
522 ctx->null_cond.a1, dest, t);
524 tcg_gen_mov_reg(dest, t);
528 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
531 save_or_nullify(ctx, cpu_gr[reg], t);
543 static TCGv_i32 load_frw_i32(unsigned rt)
545 TCGv_i32 ret = tcg_temp_new_i32();
546 tcg_gen_ld_i32(ret, tcg_env,
547 offsetof(CPUHPPAState, fr[rt & 31])
548 + (rt & 32 ? LO_OFS : HI_OFS));
552 static TCGv_i32 load_frw0_i32(unsigned rt)
555 TCGv_i32 ret = tcg_temp_new_i32();
556 tcg_gen_movi_i32(ret, 0);
559 return load_frw_i32(rt);
563 static TCGv_i64 load_frw0_i64(unsigned rt)
565 TCGv_i64 ret = tcg_temp_new_i64();
567 tcg_gen_movi_i64(ret, 0);
569 tcg_gen_ld32u_i64(ret, tcg_env,
570 offsetof(CPUHPPAState, fr[rt & 31])
571 + (rt & 32 ? LO_OFS : HI_OFS));
576 static void save_frw_i32(unsigned rt, TCGv_i32 val)
578 tcg_gen_st_i32(val, tcg_env,
579 offsetof(CPUHPPAState, fr[rt & 31])
580 + (rt & 32 ? LO_OFS : HI_OFS));
586 static TCGv_i64 load_frd(unsigned rt)
588 TCGv_i64 ret = tcg_temp_new_i64();
589 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
593 static TCGv_i64 load_frd0(unsigned rt)
596 TCGv_i64 ret = tcg_temp_new_i64();
597 tcg_gen_movi_i64(ret, 0);
604 static void save_frd(unsigned rt, TCGv_i64 val)
606 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
609 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
611 #ifdef CONFIG_USER_ONLY
612 tcg_gen_movi_i64(dest, 0);
615 tcg_gen_mov_i64(dest, cpu_sr[reg]);
616 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
617 tcg_gen_mov_i64(dest, cpu_srH);
619 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
624 /* Skip over the implementation of an insn that has been nullified.
625 Use this when the insn is too complex for a conditional move. */
626 static void nullify_over(DisasContext *ctx)
628 if (ctx->null_cond.c != TCG_COND_NEVER) {
629 /* The always condition should have been handled in the main loop. */
630 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
632 ctx->null_lab = gen_new_label();
634 /* If we're using PSW[N], copy it to a temp because... */
635 if (ctx->null_cond.a0 == cpu_psw_n) {
636 ctx->null_cond.a0 = tcg_temp_new();
637 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
639 /* ... we clear it before branching over the implementation,
640 so that (1) it's clear after nullifying this insn and
641 (2) if this insn nullifies the next, PSW[N] is valid. */
642 if (ctx->psw_n_nonzero) {
643 ctx->psw_n_nonzero = false;
644 tcg_gen_movi_reg(cpu_psw_n, 0);
647 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
648 ctx->null_cond.a1, ctx->null_lab);
649 cond_free(&ctx->null_cond);
653 /* Save the current nullification state to PSW[N]. */
654 static void nullify_save(DisasContext *ctx)
656 if (ctx->null_cond.c == TCG_COND_NEVER) {
657 if (ctx->psw_n_nonzero) {
658 tcg_gen_movi_reg(cpu_psw_n, 0);
662 if (ctx->null_cond.a0 != cpu_psw_n) {
663 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
664 ctx->null_cond.a0, ctx->null_cond.a1);
665 ctx->psw_n_nonzero = true;
667 cond_free(&ctx->null_cond);
670 /* Set a PSW[N] to X. The intention is that this is used immediately
671 before a goto_tb/exit_tb, so that there is no fallthru path to other
672 code within the TB. Therefore we do not update psw_n_nonzero. */
673 static void nullify_set(DisasContext *ctx, bool x)
675 if (ctx->psw_n_nonzero || x) {
676 tcg_gen_movi_reg(cpu_psw_n, x);
680 /* Mark the end of an instruction that may have been nullified.
681 This is the pair to nullify_over. Always returns true so that
682 it may be tail-called from a translate function. */
683 static bool nullify_end(DisasContext *ctx)
685 TCGLabel *null_lab = ctx->null_lab;
686 DisasJumpType status = ctx->base.is_jmp;
688 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
689 For UPDATED, we cannot update on the nullified path. */
690 assert(status != DISAS_IAQ_N_UPDATED);
692 if (likely(null_lab == NULL)) {
693 /* The current insn wasn't conditional or handled the condition
694 applied to it without a branch, so the (new) setting of
695 NULL_COND can be applied directly to the next insn. */
698 ctx->null_lab = NULL;
700 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
701 /* The next instruction will be unconditional,
702 and NULL_COND already reflects that. */
703 gen_set_label(null_lab);
705 /* The insn that we just executed is itself nullifying the next
706 instruction. Store the condition in the PSW[N] global.
707 We asserted PSW[N] = 0 in nullify_over, so that after the
708 label we have the proper value in place. */
710 gen_set_label(null_lab);
711 ctx->null_cond = cond_make_n();
713 if (status == DISAS_NORETURN) {
714 ctx->base.is_jmp = DISAS_NEXT;
719 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
721 if (unlikely(ival == -1)) {
722 tcg_gen_mov_reg(dest, vval);
724 tcg_gen_movi_reg(dest, ival);
728 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
730 return ctx->iaoq_f + disp + 8;
733 static void gen_excp_1(int exception)
735 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
738 static void gen_excp(DisasContext *ctx, int exception)
740 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
741 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
743 gen_excp_1(exception);
744 ctx->base.is_jmp = DISAS_NORETURN;
747 static bool gen_excp_iir(DisasContext *ctx, int exc)
750 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
751 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
753 return nullify_end(ctx);
756 static bool gen_illegal(DisasContext *ctx)
758 return gen_excp_iir(ctx, EXCP_ILL);
761 #ifdef CONFIG_USER_ONLY
762 #define CHECK_MOST_PRIVILEGED(EXCP) \
763 return gen_excp_iir(ctx, EXCP)
765 #define CHECK_MOST_PRIVILEGED(EXCP) \
767 if (ctx->privilege != 0) { \
768 return gen_excp_iir(ctx, EXCP); \
773 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
775 return translator_use_goto_tb(&ctx->base, dest);
778 /* If the next insn is to be nullified, and it's on the same page,
779 and we're not attempting to set a breakpoint on it, then we can
780 totally skip the nullified insn. This avoids creating and
781 executing a TB that merely branches to the next TB. */
782 static bool use_nullify_skip(DisasContext *ctx)
784 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
785 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
788 static void gen_goto_tb(DisasContext *ctx, int which,
789 target_ureg f, target_ureg b)
791 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
792 tcg_gen_goto_tb(which);
793 tcg_gen_movi_reg(cpu_iaoq_f, f);
794 tcg_gen_movi_reg(cpu_iaoq_b, b);
795 tcg_gen_exit_tb(ctx->base.tb, which);
797 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
798 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
799 tcg_gen_lookup_and_goto_ptr();
803 static bool cond_need_sv(int c)
805 return c == 2 || c == 3 || c == 6;
808 static bool cond_need_cb(int c)
810 return c == 4 || c == 5;
814 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
815 * the Parisc 1.1 Architecture Reference Manual for details.
818 static DisasCond do_cond(unsigned cf, TCGv_reg res,
819 TCGv_reg cb_msb, TCGv_reg sv)
825 case 0: /* Never / TR (0 / 1) */
826 cond = cond_make_f();
828 case 1: /* = / <> (Z / !Z) */
829 cond = cond_make_0(TCG_COND_EQ, res);
831 case 2: /* < / >= (N ^ V / !(N ^ V) */
832 tmp = tcg_temp_new();
833 tcg_gen_xor_reg(tmp, res, sv);
834 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
836 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
840 * ((res < 0) ^ (sv < 0)) | !res
841 * ((res ^ sv) < 0) | !res
842 * (~(res ^ sv) >= 0) | !res
843 * !(~(res ^ sv) >> 31) | !res
844 * !(~(res ^ sv) >> 31 & res)
846 tmp = tcg_temp_new();
847 tcg_gen_eqv_reg(tmp, res, sv);
848 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
849 tcg_gen_and_reg(tmp, tmp, res);
850 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
852 case 4: /* NUV / UV (!C / C) */
853 cond = cond_make_0(TCG_COND_EQ, cb_msb);
855 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
856 tmp = tcg_temp_new();
857 tcg_gen_neg_reg(tmp, cb_msb);
858 tcg_gen_and_reg(tmp, tmp, res);
859 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
861 case 6: /* SV / NSV (V / !V) */
862 cond = cond_make_0(TCG_COND_LT, sv);
864 case 7: /* OD / EV */
865 tmp = tcg_temp_new();
866 tcg_gen_andi_reg(tmp, res, 1);
867 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
870 g_assert_not_reached();
873 cond.c = tcg_invert_cond(cond.c);
879 /* Similar, but for the special case of subtraction without borrow, we
880 can use the inputs directly. This can allow other computation to be
881 deleted as unused. */
883 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
884 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
890 cond = cond_make(TCG_COND_EQ, in1, in2);
893 cond = cond_make(TCG_COND_LT, in1, in2);
896 cond = cond_make(TCG_COND_LE, in1, in2);
898 case 4: /* << / >>= */
899 cond = cond_make(TCG_COND_LTU, in1, in2);
901 case 5: /* <<= / >> */
902 cond = cond_make(TCG_COND_LEU, in1, in2);
905 return do_cond(cf, res, NULL, sv);
908 cond.c = tcg_invert_cond(cond.c);
915 * Similar, but for logicals, where the carry and overflow bits are not
916 * computed, and use of them is undefined.
918 * Undefined or not, hardware does not trap. It seems reasonable to
919 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
920 * how cases c={2,3} are treated.
923 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
927 case 9: /* undef, C */
928 case 11: /* undef, C & !Z */
929 case 12: /* undef, V */
930 return cond_make_f();
933 case 8: /* undef, !C */
934 case 10: /* undef, !C | Z */
935 case 13: /* undef, !V */
936 return cond_make_t();
939 return cond_make_0(TCG_COND_EQ, res);
941 return cond_make_0(TCG_COND_NE, res);
943 return cond_make_0(TCG_COND_LT, res);
945 return cond_make_0(TCG_COND_GE, res);
947 return cond_make_0(TCG_COND_LE, res);
949 return cond_make_0(TCG_COND_GT, res);
953 return do_cond(cf, res, NULL, NULL);
956 g_assert_not_reached();
960 /* Similar, but for shift/extract/deposit conditions. */
962 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
966 /* Convert the compressed condition codes to standard.
967 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
968 4-7 are the reverse of 0-3. */
975 return do_log_cond(c * 2 + f, res);
978 /* Similar, but for unit conditions. */
980 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
981 TCGv_reg in1, TCGv_reg in2)
984 TCGv_reg tmp, cb = NULL;
987 /* Since we want to test lots of carry-out bits all at once, do not
988 * do our normal thing and compute carry-in of bit B+1 since that
989 * leaves us with carry bits spread across two words.
992 tmp = tcg_temp_new();
993 tcg_gen_or_reg(cb, in1, in2);
994 tcg_gen_and_reg(tmp, in1, in2);
995 tcg_gen_andc_reg(cb, cb, res);
996 tcg_gen_or_reg(cb, cb, tmp);
1000 case 0: /* never / TR */
1001 case 1: /* undefined */
1002 case 5: /* undefined */
1003 cond = cond_make_f();
1006 case 2: /* SBZ / NBZ */
1007 /* See hasless(v,1) from
1008 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1010 tmp = tcg_temp_new();
1011 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1012 tcg_gen_andc_reg(tmp, tmp, res);
1013 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1014 cond = cond_make_0(TCG_COND_NE, tmp);
1017 case 3: /* SHZ / NHZ */
1018 tmp = tcg_temp_new();
1019 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1020 tcg_gen_andc_reg(tmp, tmp, res);
1021 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1022 cond = cond_make_0(TCG_COND_NE, tmp);
1025 case 4: /* SDC / NDC */
1026 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1027 cond = cond_make_0(TCG_COND_NE, cb);
1030 case 6: /* SBC / NBC */
1031 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1032 cond = cond_make_0(TCG_COND_NE, cb);
1035 case 7: /* SHC / NHC */
1036 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1037 cond = cond_make_0(TCG_COND_NE, cb);
1041 g_assert_not_reached();
1044 cond.c = tcg_invert_cond(cond.c);
1050 /* Compute signed overflow for addition. */
1051 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1052 TCGv_reg in1, TCGv_reg in2)
1054 TCGv_reg sv = tcg_temp_new();
1055 TCGv_reg tmp = tcg_temp_new();
1057 tcg_gen_xor_reg(sv, res, in1);
1058 tcg_gen_xor_reg(tmp, in1, in2);
1059 tcg_gen_andc_reg(sv, sv, tmp);
1064 /* Compute signed overflow for subtraction. */
1065 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1066 TCGv_reg in1, TCGv_reg in2)
1068 TCGv_reg sv = tcg_temp_new();
1069 TCGv_reg tmp = tcg_temp_new();
1071 tcg_gen_xor_reg(sv, res, in1);
1072 tcg_gen_xor_reg(tmp, in1, in2);
1073 tcg_gen_and_reg(sv, sv, tmp);
1078 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1079 TCGv_reg in2, unsigned shift, bool is_l,
1080 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1082 TCGv_reg dest, cb, cb_msb, sv, tmp;
1083 unsigned c = cf >> 1;
1086 dest = tcg_temp_new();
1091 tmp = tcg_temp_new();
1092 tcg_gen_shli_reg(tmp, in1, shift);
1096 if (!is_l || cond_need_cb(c)) {
1097 TCGv_reg zero = tcg_constant_reg(0);
1098 cb_msb = tcg_temp_new();
1099 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1101 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1104 cb = tcg_temp_new();
1105 tcg_gen_xor_reg(cb, in1, in2);
1106 tcg_gen_xor_reg(cb, cb, dest);
1109 tcg_gen_add_reg(dest, in1, in2);
1111 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1115 /* Compute signed overflow if required. */
1117 if (is_tsv || cond_need_sv(c)) {
1118 sv = do_add_sv(ctx, dest, in1, in2);
1120 /* ??? Need to include overflow from shift. */
1121 gen_helper_tsv(tcg_env, sv);
1125 /* Emit any conditional trap before any writeback. */
1126 cond = do_cond(cf, dest, cb_msb, sv);
1128 tmp = tcg_temp_new();
1129 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1130 gen_helper_tcond(tcg_env, tmp);
1133 /* Write back the result. */
1135 save_or_nullify(ctx, cpu_psw_cb, cb);
1136 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1138 save_gpr(ctx, rt, dest);
1140 /* Install the new nullification. */
1141 cond_free(&ctx->null_cond);
1142 ctx->null_cond = cond;
1145 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1146 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1148 TCGv_reg tcg_r1, tcg_r2;
1153 tcg_r1 = load_gpr(ctx, a->r1);
1154 tcg_r2 = load_gpr(ctx, a->r2);
1155 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1156 return nullify_end(ctx);
1159 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1160 bool is_tsv, bool is_tc)
1162 TCGv_reg tcg_im, tcg_r2;
1167 tcg_im = load_const(ctx, a->i);
1168 tcg_r2 = load_gpr(ctx, a->r);
1169 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1170 return nullify_end(ctx);
1173 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1174 TCGv_reg in2, bool is_tsv, bool is_b,
1175 bool is_tc, unsigned cf)
1177 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1178 unsigned c = cf >> 1;
1181 dest = tcg_temp_new();
1182 cb = tcg_temp_new();
1183 cb_msb = tcg_temp_new();
1185 zero = tcg_constant_reg(0);
1187 /* DEST,C = IN1 + ~IN2 + C. */
1188 tcg_gen_not_reg(cb, in2);
1189 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1190 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1191 tcg_gen_xor_reg(cb, cb, in1);
1192 tcg_gen_xor_reg(cb, cb, dest);
1194 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1195 operations by seeding the high word with 1 and subtracting. */
1196 tcg_gen_movi_reg(cb_msb, 1);
1197 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1198 tcg_gen_eqv_reg(cb, in1, in2);
1199 tcg_gen_xor_reg(cb, cb, dest);
1202 /* Compute signed overflow if required. */
1204 if (is_tsv || cond_need_sv(c)) {
1205 sv = do_sub_sv(ctx, dest, in1, in2);
1207 gen_helper_tsv(tcg_env, sv);
1211 /* Compute the condition. We cannot use the special case for borrow. */
1213 cond = do_sub_cond(cf, dest, in1, in2, sv);
1215 cond = do_cond(cf, dest, cb_msb, sv);
1218 /* Emit any conditional trap before any writeback. */
1220 tmp = tcg_temp_new();
1221 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1222 gen_helper_tcond(tcg_env, tmp);
1225 /* Write back the result. */
1226 save_or_nullify(ctx, cpu_psw_cb, cb);
1227 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1228 save_gpr(ctx, rt, dest);
1230 /* Install the new nullification. */
1231 cond_free(&ctx->null_cond);
1232 ctx->null_cond = cond;
1235 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1236 bool is_tsv, bool is_b, bool is_tc)
1238 TCGv_reg tcg_r1, tcg_r2;
1243 tcg_r1 = load_gpr(ctx, a->r1);
1244 tcg_r2 = load_gpr(ctx, a->r2);
1245 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1246 return nullify_end(ctx);
1249 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1251 TCGv_reg tcg_im, tcg_r2;
1256 tcg_im = load_const(ctx, a->i);
1257 tcg_r2 = load_gpr(ctx, a->r);
1258 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1259 return nullify_end(ctx);
1262 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1263 TCGv_reg in2, unsigned cf)
1268 dest = tcg_temp_new();
1269 tcg_gen_sub_reg(dest, in1, in2);
1271 /* Compute signed overflow if required. */
1273 if (cond_need_sv(cf >> 1)) {
1274 sv = do_sub_sv(ctx, dest, in1, in2);
1277 /* Form the condition for the compare. */
1278 cond = do_sub_cond(cf, dest, in1, in2, sv);
1281 tcg_gen_movi_reg(dest, 0);
1282 save_gpr(ctx, rt, dest);
1284 /* Install the new nullification. */
1285 cond_free(&ctx->null_cond);
1286 ctx->null_cond = cond;
1289 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1290 TCGv_reg in2, unsigned cf,
1291 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1293 TCGv_reg dest = dest_gpr(ctx, rt);
1295 /* Perform the operation, and writeback. */
1297 save_gpr(ctx, rt, dest);
1299 /* Install the new nullification. */
1300 cond_free(&ctx->null_cond);
1302 ctx->null_cond = do_log_cond(cf, dest);
1306 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1307 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1309 TCGv_reg tcg_r1, tcg_r2;
1314 tcg_r1 = load_gpr(ctx, a->r1);
1315 tcg_r2 = load_gpr(ctx, a->r2);
1316 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1317 return nullify_end(ctx);
1320 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1321 TCGv_reg in2, unsigned cf, bool is_tc,
1322 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1328 dest = dest_gpr(ctx, rt);
1330 save_gpr(ctx, rt, dest);
1331 cond_free(&ctx->null_cond);
1333 dest = tcg_temp_new();
1336 cond = do_unit_cond(cf, dest, in1, in2);
1339 TCGv_reg tmp = tcg_temp_new();
1340 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1341 gen_helper_tcond(tcg_env, tmp);
1343 save_gpr(ctx, rt, dest);
1345 cond_free(&ctx->null_cond);
1346 ctx->null_cond = cond;
1350 #ifndef CONFIG_USER_ONLY
1351 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1352 from the top 2 bits of the base register. There are a few system
1353 instructions that have a 3-bit space specifier, for which SR0 is
1354 not special. To handle this, pass ~SP. */
1355 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1365 spc = tcg_temp_new_tl();
1366 load_spr(ctx, spc, sp);
1369 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1373 ptr = tcg_temp_new_ptr();
1374 tmp = tcg_temp_new();
1375 spc = tcg_temp_new_tl();
1377 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1378 tcg_gen_andi_reg(tmp, tmp, 030);
1379 tcg_gen_trunc_reg_ptr(ptr, tmp);
1381 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1382 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1388 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1389 unsigned rb, unsigned rx, int scale, target_sreg disp,
1390 unsigned sp, int modify, bool is_phys)
1392 TCGv_reg base = load_gpr(ctx, rb);
1395 /* Note that RX is mutually exclusive with DISP. */
1397 ofs = tcg_temp_new();
1398 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1399 tcg_gen_add_reg(ofs, ofs, base);
1400 } else if (disp || modify) {
1401 ofs = tcg_temp_new();
1402 tcg_gen_addi_reg(ofs, base, disp);
1408 #ifdef CONFIG_USER_ONLY
1409 *pgva = (modify <= 0 ? ofs : base);
1411 TCGv_tl addr = tcg_temp_new_tl();
1412 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1413 if (ctx->tb_flags & PSW_W) {
1414 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1417 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1423 /* Emit a memory load. The modify parameter should be
1424 * < 0 for pre-modify,
1425 * > 0 for post-modify,
1426 * = 0 for no base register update.
1428 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1429 unsigned rx, int scale, target_sreg disp,
1430 unsigned sp, int modify, MemOp mop)
1435 /* Caller uses nullify_over/nullify_end. */
1436 assert(ctx->null_cond.c == TCG_COND_NEVER);
1438 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1439 ctx->mmu_idx == MMU_PHYS_IDX);
1440 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1442 save_gpr(ctx, rb, ofs);
1446 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1447 unsigned rx, int scale, target_sreg disp,
1448 unsigned sp, int modify, MemOp mop)
1453 /* Caller uses nullify_over/nullify_end. */
1454 assert(ctx->null_cond.c == TCG_COND_NEVER);
1456 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1457 ctx->mmu_idx == MMU_PHYS_IDX);
1458 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1460 save_gpr(ctx, rb, ofs);
1464 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1465 unsigned rx, int scale, target_sreg disp,
1466 unsigned sp, int modify, MemOp mop)
1471 /* Caller uses nullify_over/nullify_end. */
1472 assert(ctx->null_cond.c == TCG_COND_NEVER);
1474 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1475 ctx->mmu_idx == MMU_PHYS_IDX);
1476 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1478 save_gpr(ctx, rb, ofs);
1482 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1483 unsigned rx, int scale, target_sreg disp,
1484 unsigned sp, int modify, MemOp mop)
1489 /* Caller uses nullify_over/nullify_end. */
1490 assert(ctx->null_cond.c == TCG_COND_NEVER);
1492 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1493 ctx->mmu_idx == MMU_PHYS_IDX);
1494 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1496 save_gpr(ctx, rb, ofs);
1500 #if TARGET_REGISTER_BITS == 64
1501 #define do_load_reg do_load_64
1502 #define do_store_reg do_store_64
1504 #define do_load_reg do_load_32
1505 #define do_store_reg do_store_32
1508 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1509 unsigned rx, int scale, target_sreg disp,
1510 unsigned sp, int modify, MemOp mop)
1517 /* No base register update. */
1518 dest = dest_gpr(ctx, rt);
1520 /* Make sure if RT == RB, we see the result of the load. */
1521 dest = tcg_temp_new();
1523 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1524 save_gpr(ctx, rt, dest);
1526 return nullify_end(ctx);
1529 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1530 unsigned rx, int scale, target_sreg disp,
1531 unsigned sp, int modify)
1537 tmp = tcg_temp_new_i32();
1538 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1539 save_frw_i32(rt, tmp);
1542 gen_helper_loaded_fr0(tcg_env);
1545 return nullify_end(ctx);
1548 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1550 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1551 a->disp, a->sp, a->m);
1554 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1555 unsigned rx, int scale, target_sreg disp,
1556 unsigned sp, int modify)
1562 tmp = tcg_temp_new_i64();
1563 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1567 gen_helper_loaded_fr0(tcg_env);
1570 return nullify_end(ctx);
1573 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1575 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1576 a->disp, a->sp, a->m);
1579 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1580 target_sreg disp, unsigned sp,
1581 int modify, MemOp mop)
1584 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1585 return nullify_end(ctx);
1588 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1589 unsigned rx, int scale, target_sreg disp,
1590 unsigned sp, int modify)
1596 tmp = load_frw_i32(rt);
1597 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1599 return nullify_end(ctx);
1602 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1604 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1605 a->disp, a->sp, a->m);
1608 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1609 unsigned rx, int scale, target_sreg disp,
1610 unsigned sp, int modify)
1617 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1619 return nullify_end(ctx);
1622 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1624 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1625 a->disp, a->sp, a->m);
1628 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1629 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1634 tmp = load_frw0_i32(ra);
1636 func(tmp, tcg_env, tmp);
1638 save_frw_i32(rt, tmp);
1639 return nullify_end(ctx);
1642 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1643 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1650 dst = tcg_temp_new_i32();
1652 func(dst, tcg_env, src);
1654 save_frw_i32(rt, dst);
1655 return nullify_end(ctx);
1658 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1659 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1664 tmp = load_frd0(ra);
1666 func(tmp, tcg_env, tmp);
1669 return nullify_end(ctx);
1672 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1673 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1679 src = load_frw0_i32(ra);
1680 dst = tcg_temp_new_i64();
1682 func(dst, tcg_env, src);
1685 return nullify_end(ctx);
1688 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1689 unsigned ra, unsigned rb,
1690 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1695 a = load_frw0_i32(ra);
1696 b = load_frw0_i32(rb);
1698 func(a, tcg_env, a, b);
1700 save_frw_i32(rt, a);
1701 return nullify_end(ctx);
1704 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1705 unsigned ra, unsigned rb,
1706 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1714 func(a, tcg_env, a, b);
1717 return nullify_end(ctx);
1720 /* Emit an unconditional branch to a direct target, which may or may not
1721 have already had nullification handled. */
1722 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1723 unsigned link, bool is_n)
1725 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1727 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1731 ctx->null_cond.c = TCG_COND_ALWAYS;
1737 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1740 if (is_n && use_nullify_skip(ctx)) {
1741 nullify_set(ctx, 0);
1742 gen_goto_tb(ctx, 0, dest, dest + 4);
1744 nullify_set(ctx, is_n);
1745 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1750 nullify_set(ctx, 0);
1751 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1752 ctx->base.is_jmp = DISAS_NORETURN;
1757 /* Emit a conditional branch to a direct target. If the branch itself
1758 is nullified, we should have already used nullify_over. */
1759 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1762 target_ureg dest = iaoq_dest(ctx, disp);
1763 TCGLabel *taken = NULL;
1764 TCGCond c = cond->c;
1767 assert(ctx->null_cond.c == TCG_COND_NEVER);
1769 /* Handle TRUE and NEVER as direct branches. */
1770 if (c == TCG_COND_ALWAYS) {
1771 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1773 if (c == TCG_COND_NEVER) {
1774 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1777 taken = gen_new_label();
1778 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1781 /* Not taken: Condition not satisfied; nullify on backward branches. */
1782 n = is_n && disp < 0;
1783 if (n && use_nullify_skip(ctx)) {
1784 nullify_set(ctx, 0);
1785 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1787 if (!n && ctx->null_lab) {
1788 gen_set_label(ctx->null_lab);
1789 ctx->null_lab = NULL;
1791 nullify_set(ctx, n);
1792 if (ctx->iaoq_n == -1) {
1793 /* The temporary iaoq_n_var died at the branch above.
1794 Regenerate it here instead of saving it. */
1795 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1797 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1800 gen_set_label(taken);
1802 /* Taken: Condition satisfied; nullify on forward branches. */
1803 n = is_n && disp >= 0;
1804 if (n && use_nullify_skip(ctx)) {
1805 nullify_set(ctx, 0);
1806 gen_goto_tb(ctx, 1, dest, dest + 4);
1808 nullify_set(ctx, n);
1809 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1812 /* Not taken: the branch itself was nullified. */
1813 if (ctx->null_lab) {
1814 gen_set_label(ctx->null_lab);
1815 ctx->null_lab = NULL;
1816 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1818 ctx->base.is_jmp = DISAS_NORETURN;
1823 /* Emit an unconditional branch to an indirect target. This handles
1824 nullification of the branch itself. */
1825 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1826 unsigned link, bool is_n)
1828 TCGv_reg a0, a1, next, tmp;
1831 assert(ctx->null_lab == NULL);
1833 if (ctx->null_cond.c == TCG_COND_NEVER) {
1835 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1837 next = tcg_temp_new();
1838 tcg_gen_mov_reg(next, dest);
1840 if (use_nullify_skip(ctx)) {
1841 tcg_gen_mov_reg(cpu_iaoq_f, next);
1842 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1843 nullify_set(ctx, 0);
1844 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1847 ctx->null_cond.c = TCG_COND_ALWAYS;
1850 ctx->iaoq_n_var = next;
1851 } else if (is_n && use_nullify_skip(ctx)) {
1852 /* The (conditional) branch, B, nullifies the next insn, N,
1853 and we're allowed to skip execution N (no single-step or
1854 tracepoint in effect). Since the goto_ptr that we must use
1855 for the indirect branch consumes no special resources, we
1856 can (conditionally) skip B and continue execution. */
1857 /* The use_nullify_skip test implies we have a known control path. */
1858 tcg_debug_assert(ctx->iaoq_b != -1);
1859 tcg_debug_assert(ctx->iaoq_n != -1);
1861 /* We do have to handle the non-local temporary, DEST, before
1862 branching. Since IOAQ_F is not really live at this point, we
1863 can simply store DEST optimistically. Similarly with IAOQ_B. */
1864 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1865 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1869 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1871 tcg_gen_lookup_and_goto_ptr();
1872 return nullify_end(ctx);
1874 c = ctx->null_cond.c;
1875 a0 = ctx->null_cond.a0;
1876 a1 = ctx->null_cond.a1;
1878 tmp = tcg_temp_new();
1879 next = tcg_temp_new();
1881 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1882 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1884 ctx->iaoq_n_var = next;
1887 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1891 /* The branch nullifies the next insn, which means the state of N
1892 after the branch is the inverse of the state of N that applied
1894 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1895 cond_free(&ctx->null_cond);
1896 ctx->null_cond = cond_make_n();
1897 ctx->psw_n_nonzero = true;
1899 cond_free(&ctx->null_cond);
1906 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1907 * IAOQ_Next{30..31} ← GR[b]{30..31};
1909 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1910 * which keeps the privilege level from being increased.
1912 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1915 switch (ctx->privilege) {
1917 /* Privilege 0 is maximum and is allowed to decrease. */
1920 /* Privilege 3 is minimum and is never allowed to increase. */
1921 dest = tcg_temp_new();
1922 tcg_gen_ori_reg(dest, offset, 3);
1925 dest = tcg_temp_new();
1926 tcg_gen_andi_reg(dest, offset, -4);
1927 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1928 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1934 #ifdef CONFIG_USER_ONLY
1935 /* On Linux, page zero is normally marked execute only + gateway.
1936 Therefore normal read or write is supposed to fail, but specific
1937 offsets have kernel code mapped to raise permissions to implement
1938 system calls. Handling this via an explicit check here, rather
1939 in than the "be disp(sr2,r0)" instruction that probably sent us
1940 here, is the easiest way to handle the branch delay slot on the
1941 aforementioned BE. */
1942 static void do_page_zero(DisasContext *ctx)
1944 /* If by some means we get here with PSW[N]=1, that implies that
1945 the B,GATE instruction would be skipped, and we'd fault on the
1946 next insn within the privileged page. */
1947 switch (ctx->null_cond.c) {
1948 case TCG_COND_NEVER:
1950 case TCG_COND_ALWAYS:
1951 tcg_gen_movi_reg(cpu_psw_n, 0);
1954 /* Since this is always the first (and only) insn within the
1955 TB, we should know the state of PSW[N] from TB->FLAGS. */
1956 g_assert_not_reached();
1959 /* Check that we didn't arrive here via some means that allowed
1960 non-sequential instruction execution. Normally the PSW[B] bit
1961 detects this by disallowing the B,GATE instruction to execute
1962 under such conditions. */
1963 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1967 switch (ctx->iaoq_f & -4) {
1968 case 0x00: /* Null pointer call */
1969 gen_excp_1(EXCP_IMP);
1970 ctx->base.is_jmp = DISAS_NORETURN;
1973 case 0xb0: /* LWS */
1974 gen_excp_1(EXCP_SYSCALL_LWS);
1975 ctx->base.is_jmp = DISAS_NORETURN;
1978 case 0xe0: /* SET_THREAD_POINTER */
1979 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1980 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
1981 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
1982 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1985 case 0x100: /* SYSCALL */
1986 gen_excp_1(EXCP_SYSCALL);
1987 ctx->base.is_jmp = DISAS_NORETURN;
1992 gen_excp_1(EXCP_ILL);
1993 ctx->base.is_jmp = DISAS_NORETURN;
1999 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2001 cond_free(&ctx->null_cond);
2005 static bool trans_break(DisasContext *ctx, arg_break *a)
2007 return gen_excp_iir(ctx, EXCP_BREAK);
2010 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2012 /* No point in nullifying the memory barrier. */
2013 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2015 cond_free(&ctx->null_cond);
2019 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2022 TCGv_reg tmp = dest_gpr(ctx, rt);
2023 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2024 save_gpr(ctx, rt, tmp);
2026 cond_free(&ctx->null_cond);
2030 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2033 unsigned rs = a->sp;
2034 TCGv_i64 t0 = tcg_temp_new_i64();
2035 TCGv_reg t1 = tcg_temp_new();
2037 load_spr(ctx, t0, rs);
2038 tcg_gen_shri_i64(t0, t0, 32);
2039 tcg_gen_trunc_i64_reg(t1, t0);
2041 save_gpr(ctx, rt, t1);
2043 cond_free(&ctx->null_cond);
2047 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2050 unsigned ctl = a->r;
2055 #ifdef TARGET_HPPA64
2057 /* MFSAR without ,W masks low 5 bits. */
2058 tmp = dest_gpr(ctx, rt);
2059 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2060 save_gpr(ctx, rt, tmp);
2064 save_gpr(ctx, rt, cpu_sar);
2066 case CR_IT: /* Interval Timer */
2067 /* FIXME: Respect PSW_S bit. */
2069 tmp = dest_gpr(ctx, rt);
2070 if (translator_io_start(&ctx->base)) {
2071 gen_helper_read_interval_timer(tmp);
2072 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2074 gen_helper_read_interval_timer(tmp);
2076 save_gpr(ctx, rt, tmp);
2077 return nullify_end(ctx);
2082 /* All other control registers are privileged. */
2083 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2087 tmp = tcg_temp_new();
2088 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2089 save_gpr(ctx, rt, tmp);
2092 cond_free(&ctx->null_cond);
2096 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2099 unsigned rs = a->sp;
2103 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2107 t64 = tcg_temp_new_i64();
2108 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2109 tcg_gen_shli_i64(t64, t64, 32);
2112 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2113 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2115 tcg_gen_mov_i64(cpu_sr[rs], t64);
2118 return nullify_end(ctx);
2121 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2123 unsigned ctl = a->t;
2127 if (ctl == CR_SAR) {
2128 reg = load_gpr(ctx, a->r);
2129 tmp = tcg_temp_new();
2130 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2131 save_or_nullify(ctx, cpu_sar, tmp);
2133 cond_free(&ctx->null_cond);
2137 /* All other control registers are privileged or read-only. */
2138 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2140 #ifndef CONFIG_USER_ONLY
2142 reg = load_gpr(ctx, a->r);
2146 gen_helper_write_interval_timer(tcg_env, reg);
2149 gen_helper_write_eirr(tcg_env, reg);
2152 gen_helper_write_eiem(tcg_env, reg);
2153 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2158 /* FIXME: Respect PSW_Q bit */
2159 /* The write advances the queue and stores to the back element. */
2160 tmp = tcg_temp_new();
2161 tcg_gen_ld_reg(tmp, tcg_env,
2162 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2163 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2164 tcg_gen_st_reg(reg, tcg_env,
2165 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2172 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2173 #ifndef CONFIG_USER_ONLY
2174 gen_helper_change_prot_id(tcg_env);
2179 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2182 return nullify_end(ctx);
2186 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2188 TCGv_reg tmp = tcg_temp_new();
2190 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2191 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2192 save_or_nullify(ctx, cpu_sar, tmp);
2194 cond_free(&ctx->null_cond);
2198 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2200 TCGv_reg dest = dest_gpr(ctx, a->t);
2202 #ifdef CONFIG_USER_ONLY
2203 /* We don't implement space registers in user mode. */
2204 tcg_gen_movi_reg(dest, 0);
2206 TCGv_i64 t0 = tcg_temp_new_i64();
2208 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2209 tcg_gen_shri_i64(t0, t0, 32);
2210 tcg_gen_trunc_i64_reg(dest, t0);
2212 save_gpr(ctx, a->t, dest);
2214 cond_free(&ctx->null_cond);
2218 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2220 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2221 #ifndef CONFIG_USER_ONLY
2226 tmp = tcg_temp_new();
2227 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2228 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2229 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2230 save_gpr(ctx, a->t, tmp);
2232 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2233 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2234 return nullify_end(ctx);
2238 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2240 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2241 #ifndef CONFIG_USER_ONLY
2246 tmp = tcg_temp_new();
2247 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2248 tcg_gen_ori_reg(tmp, tmp, a->i);
2249 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2250 save_gpr(ctx, a->t, tmp);
2252 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2253 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2254 return nullify_end(ctx);
2258 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2260 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2261 #ifndef CONFIG_USER_ONLY
2265 reg = load_gpr(ctx, a->r);
2266 tmp = tcg_temp_new();
2267 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2269 /* Exit the TB to recognize new interrupts. */
2270 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2271 return nullify_end(ctx);
2275 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2277 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2278 #ifndef CONFIG_USER_ONLY
2282 gen_helper_rfi_r(tcg_env);
2284 gen_helper_rfi(tcg_env);
2286 /* Exit the TB to recognize new interrupts. */
2287 tcg_gen_exit_tb(NULL, 0);
2288 ctx->base.is_jmp = DISAS_NORETURN;
2290 return nullify_end(ctx);
2294 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2296 return do_rfi(ctx, false);
2299 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2301 return do_rfi(ctx, true);
2304 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2306 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2307 #ifndef CONFIG_USER_ONLY
2309 gen_helper_halt(tcg_env);
2310 ctx->base.is_jmp = DISAS_NORETURN;
2311 return nullify_end(ctx);
2315 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2317 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2318 #ifndef CONFIG_USER_ONLY
2320 gen_helper_reset(tcg_env);
2321 ctx->base.is_jmp = DISAS_NORETURN;
2322 return nullify_end(ctx);
2326 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2328 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2329 #ifndef CONFIG_USER_ONLY
2331 gen_helper_getshadowregs(tcg_env);
2332 return nullify_end(ctx);
2336 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2339 TCGv_reg dest = dest_gpr(ctx, a->b);
2340 TCGv_reg src1 = load_gpr(ctx, a->b);
2341 TCGv_reg src2 = load_gpr(ctx, a->x);
2343 /* The only thing we need to do is the base register modification. */
2344 tcg_gen_add_reg(dest, src1, src2);
2345 save_gpr(ctx, a->b, dest);
2347 cond_free(&ctx->null_cond);
2351 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2354 TCGv_i32 level, want;
2359 dest = dest_gpr(ctx, a->t);
2360 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2363 level = tcg_constant_i32(a->ri);
2365 level = tcg_temp_new_i32();
2366 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2367 tcg_gen_andi_i32(level, level, 3);
2369 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2371 gen_helper_probe(dest, tcg_env, addr, level, want);
2373 save_gpr(ctx, a->t, dest);
2374 return nullify_end(ctx);
2377 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2379 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2380 #ifndef CONFIG_USER_ONLY
2386 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2387 reg = load_gpr(ctx, a->r);
2389 gen_helper_itlba(tcg_env, addr, reg);
2391 gen_helper_itlbp(tcg_env, addr, reg);
2394 /* Exit TB for TLB change if mmu is enabled. */
2395 if (ctx->tb_flags & PSW_C) {
2396 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2398 return nullify_end(ctx);
2402 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2404 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2405 #ifndef CONFIG_USER_ONLY
2411 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2413 save_gpr(ctx, a->b, ofs);
2416 gen_helper_ptlbe(tcg_env);
2418 gen_helper_ptlb(tcg_env, addr);
2421 /* Exit TB for TLB change if mmu is enabled. */
2422 if (ctx->tb_flags & PSW_C) {
2423 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2425 return nullify_end(ctx);
2430 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2432 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2433 * page 13-9 (195/206)
2435 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2437 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2438 #ifndef CONFIG_USER_ONLY
2439 TCGv_tl addr, atl, stl;
2446 * if (not (pcxl or pcxl2))
2447 * return gen_illegal(ctx);
2449 * Note for future: these are 32-bit systems; no hppa64.
2452 atl = tcg_temp_new_tl();
2453 stl = tcg_temp_new_tl();
2454 addr = tcg_temp_new_tl();
2456 tcg_gen_ld32u_i64(stl, tcg_env,
2457 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2458 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2459 tcg_gen_ld32u_i64(atl, tcg_env,
2460 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2461 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2462 tcg_gen_shli_i64(stl, stl, 32);
2463 tcg_gen_or_tl(addr, atl, stl);
2465 reg = load_gpr(ctx, a->r);
2467 gen_helper_itlba(tcg_env, addr, reg);
2469 gen_helper_itlbp(tcg_env, addr, reg);
2472 /* Exit TB for TLB change if mmu is enabled. */
2473 if (ctx->tb_flags & PSW_C) {
2474 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2476 return nullify_end(ctx);
2480 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2482 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2483 #ifndef CONFIG_USER_ONLY
2485 TCGv_reg ofs, paddr;
2489 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2491 paddr = tcg_temp_new();
2492 gen_helper_lpa(paddr, tcg_env, vaddr);
2494 /* Note that physical address result overrides base modification. */
2496 save_gpr(ctx, a->b, ofs);
2498 save_gpr(ctx, a->t, paddr);
2500 return nullify_end(ctx);
2504 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2506 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2508 /* The Coherence Index is an implementation-defined function of the
2509 physical address. Two addresses with the same CI have a coherent
2510 view of the cache. Our implementation is to return 0 for all,
2511 since the entire address space is coherent. */
2512 save_gpr(ctx, a->t, tcg_constant_reg(0));
2514 cond_free(&ctx->null_cond);
2518 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2520 return do_add_reg(ctx, a, false, false, false, false);
2523 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2525 return do_add_reg(ctx, a, true, false, false, false);
2528 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2530 return do_add_reg(ctx, a, false, true, false, false);
2533 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2535 return do_add_reg(ctx, a, false, false, false, true);
2538 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2540 return do_add_reg(ctx, a, false, true, false, true);
2543 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2545 return do_sub_reg(ctx, a, false, false, false);
2548 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2550 return do_sub_reg(ctx, a, true, false, false);
2553 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2555 return do_sub_reg(ctx, a, false, false, true);
2558 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2560 return do_sub_reg(ctx, a, true, false, true);
2563 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2565 return do_sub_reg(ctx, a, false, true, false);
2568 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2570 return do_sub_reg(ctx, a, true, true, false);
2573 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2575 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2578 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2580 return do_log_reg(ctx, a, tcg_gen_and_reg);
2583 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2586 unsigned r2 = a->r2;
2587 unsigned r1 = a->r1;
2590 if (rt == 0) { /* NOP */
2591 cond_free(&ctx->null_cond);
2594 if (r2 == 0) { /* COPY */
2596 TCGv_reg dest = dest_gpr(ctx, rt);
2597 tcg_gen_movi_reg(dest, 0);
2598 save_gpr(ctx, rt, dest);
2600 save_gpr(ctx, rt, cpu_gr[r1]);
2602 cond_free(&ctx->null_cond);
2605 #ifndef CONFIG_USER_ONLY
2606 /* These are QEMU extensions and are nops in the real architecture:
2608 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2609 * or %r31,%r31,%r31 -- death loop; offline cpu
2610 * currently implemented as idle.
2612 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2613 /* No need to check for supervisor, as userland can only pause
2614 until the next timer interrupt. */
2617 /* Advance the instruction queue. */
2618 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2619 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2620 nullify_set(ctx, 0);
2622 /* Tell the qemu main loop to halt until this cpu has work. */
2623 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2624 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2625 gen_excp_1(EXCP_HALTED);
2626 ctx->base.is_jmp = DISAS_NORETURN;
2628 return nullify_end(ctx);
2632 return do_log_reg(ctx, a, tcg_gen_or_reg);
2635 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2637 return do_log_reg(ctx, a, tcg_gen_xor_reg);
2640 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2642 TCGv_reg tcg_r1, tcg_r2;
2647 tcg_r1 = load_gpr(ctx, a->r1);
2648 tcg_r2 = load_gpr(ctx, a->r2);
2649 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2650 return nullify_end(ctx);
2653 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2655 TCGv_reg tcg_r1, tcg_r2;
2660 tcg_r1 = load_gpr(ctx, a->r1);
2661 tcg_r2 = load_gpr(ctx, a->r2);
2662 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2663 return nullify_end(ctx);
2666 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2668 TCGv_reg tcg_r1, tcg_r2, tmp;
2673 tcg_r1 = load_gpr(ctx, a->r1);
2674 tcg_r2 = load_gpr(ctx, a->r2);
2675 tmp = tcg_temp_new();
2676 tcg_gen_not_reg(tmp, tcg_r2);
2677 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2678 return nullify_end(ctx);
2681 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2683 return do_uaddcm(ctx, a, false);
2686 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2688 return do_uaddcm(ctx, a, true);
2691 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2697 tmp = tcg_temp_new();
2698 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2700 tcg_gen_not_reg(tmp, tmp);
2702 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2703 tcg_gen_muli_reg(tmp, tmp, 6);
2704 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2705 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2706 return nullify_end(ctx);
2709 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2711 return do_dcor(ctx, a, false);
2714 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2716 return do_dcor(ctx, a, true);
2719 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2721 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2725 in1 = load_gpr(ctx, a->r1);
2726 in2 = load_gpr(ctx, a->r2);
2728 add1 = tcg_temp_new();
2729 add2 = tcg_temp_new();
2730 addc = tcg_temp_new();
2731 dest = tcg_temp_new();
2732 zero = tcg_constant_reg(0);
2734 /* Form R1 << 1 | PSW[CB]{8}. */
2735 tcg_gen_add_reg(add1, in1, in1);
2736 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2738 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2739 carry{8} requires that we subtract via + ~R2 + 1, as described in
2740 the manual. By extracting and masking V, we can produce the
2741 proper inputs to the addition without movcond. */
2742 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2743 tcg_gen_xor_reg(add2, in2, addc);
2744 tcg_gen_andi_reg(addc, addc, 1);
2745 /* ??? This is only correct for 32-bit. */
2746 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2747 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2749 /* Write back the result register. */
2750 save_gpr(ctx, a->t, dest);
2752 /* Write back PSW[CB]. */
2753 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2754 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2756 /* Write back PSW[V] for the division step. */
2757 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2758 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2760 /* Install the new nullification. */
2763 if (cond_need_sv(a->cf >> 1)) {
2764 /* ??? The lshift is supposed to contribute to overflow. */
2765 sv = do_add_sv(ctx, dest, add1, add2);
2767 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2770 return nullify_end(ctx);
2773 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2775 return do_add_imm(ctx, a, false, false);
2778 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2780 return do_add_imm(ctx, a, true, false);
2783 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2785 return do_add_imm(ctx, a, false, true);
2788 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2790 return do_add_imm(ctx, a, true, true);
2793 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2795 return do_sub_imm(ctx, a, false);
2798 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2800 return do_sub_imm(ctx, a, true);
2803 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2805 TCGv_reg tcg_im, tcg_r2;
2811 tcg_im = load_const(ctx, a->i);
2812 tcg_r2 = load_gpr(ctx, a->r);
2813 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2815 return nullify_end(ctx);
2818 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2820 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2821 return gen_illegal(ctx);
2823 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2824 a->disp, a->sp, a->m, a->size | MO_TE);
2828 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2830 assert(a->x == 0 && a->scale == 0);
2831 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2832 return gen_illegal(ctx);
2834 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2838 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2840 MemOp mop = MO_TE | MO_ALIGN | a->size;
2841 TCGv_reg zero, dest, ofs;
2847 /* Base register modification. Make sure if RT == RB,
2848 we see the result of the load. */
2849 dest = tcg_temp_new();
2851 dest = dest_gpr(ctx, a->t);
2854 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2855 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2858 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2859 * However actual hardware succeeds with aligned mod 4.
2860 * Detect this case and log a GUEST_ERROR.
2862 * TODO: HPPA64 relaxes the over-alignment requirement
2863 * with the ,co completer.
2865 gen_helper_ldc_check(addr);
2867 zero = tcg_constant_reg(0);
2868 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2871 save_gpr(ctx, a->b, ofs);
2873 save_gpr(ctx, a->t, dest);
2875 return nullify_end(ctx);
2878 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2885 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2886 ctx->mmu_idx == MMU_PHYS_IDX);
2887 val = load_gpr(ctx, a->r);
2889 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2890 gen_helper_stby_e_parallel(tcg_env, addr, val);
2892 gen_helper_stby_e(tcg_env, addr, val);
2895 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2896 gen_helper_stby_b_parallel(tcg_env, addr, val);
2898 gen_helper_stby_b(tcg_env, addr, val);
2902 tcg_gen_andi_reg(ofs, ofs, ~3);
2903 save_gpr(ctx, a->b, ofs);
2906 return nullify_end(ctx);
2909 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2911 int hold_mmu_idx = ctx->mmu_idx;
2913 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2914 ctx->mmu_idx = MMU_PHYS_IDX;
2916 ctx->mmu_idx = hold_mmu_idx;
2920 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2922 int hold_mmu_idx = ctx->mmu_idx;
2924 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2925 ctx->mmu_idx = MMU_PHYS_IDX;
2927 ctx->mmu_idx = hold_mmu_idx;
2931 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2933 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2935 tcg_gen_movi_reg(tcg_rt, a->i);
2936 save_gpr(ctx, a->t, tcg_rt);
2937 cond_free(&ctx->null_cond);
2941 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2943 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2944 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2946 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
2947 save_gpr(ctx, 1, tcg_r1);
2948 cond_free(&ctx->null_cond);
2952 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2954 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2956 /* Special case rb == 0, for the LDI pseudo-op.
2957 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2959 tcg_gen_movi_reg(tcg_rt, a->i);
2961 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
2963 save_gpr(ctx, a->t, tcg_rt);
2964 cond_free(&ctx->null_cond);
2968 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2969 unsigned c, unsigned f, unsigned n, int disp)
2971 TCGv_reg dest, in2, sv;
2974 in2 = load_gpr(ctx, r);
2975 dest = tcg_temp_new();
2977 tcg_gen_sub_reg(dest, in1, in2);
2980 if (cond_need_sv(c)) {
2981 sv = do_sub_sv(ctx, dest, in1, in2);
2984 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
2985 return do_cbranch(ctx, disp, n, &cond);
2988 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
2991 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
2994 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
2997 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3000 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3001 unsigned c, unsigned f, unsigned n, int disp)
3003 TCGv_reg dest, in2, sv, cb_msb;
3006 in2 = load_gpr(ctx, r);
3007 dest = tcg_temp_new();
3011 if (cond_need_cb(c)) {
3012 cb_msb = tcg_temp_new();
3013 tcg_gen_movi_reg(cb_msb, 0);
3014 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3016 tcg_gen_add_reg(dest, in1, in2);
3018 if (cond_need_sv(c)) {
3019 sv = do_add_sv(ctx, dest, in1, in2);
3022 cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3023 save_gpr(ctx, r, dest);
3024 return do_cbranch(ctx, disp, n, &cond);
3027 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3030 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3033 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3036 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3039 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3041 TCGv_reg tmp, tcg_r;
3046 tmp = tcg_temp_new();
3047 tcg_r = load_gpr(ctx, a->r);
3048 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3050 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3051 return do_cbranch(ctx, a->disp, a->n, &cond);
3054 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3056 TCGv_reg tmp, tcg_r;
3061 tmp = tcg_temp_new();
3062 tcg_r = load_gpr(ctx, a->r);
3063 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3065 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3066 return do_cbranch(ctx, a->disp, a->n, &cond);
3069 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3076 dest = dest_gpr(ctx, a->r2);
3078 tcg_gen_movi_reg(dest, 0);
3080 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3083 cond = do_sed_cond(a->c, dest);
3084 return do_cbranch(ctx, a->disp, a->n, &cond);
3087 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3094 dest = dest_gpr(ctx, a->r);
3095 tcg_gen_movi_reg(dest, a->i);
3097 cond = do_sed_cond(a->c, dest);
3098 return do_cbranch(ctx, a->disp, a->n, &cond);
3101 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3109 dest = dest_gpr(ctx, a->t);
3111 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3112 tcg_gen_shr_reg(dest, dest, cpu_sar);
3113 } else if (a->r1 == a->r2) {
3114 TCGv_i32 t32 = tcg_temp_new_i32();
3115 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3116 tcg_gen_rotr_i32(t32, t32, cpu_sar);
3117 tcg_gen_extu_i32_reg(dest, t32);
3119 TCGv_i64 t = tcg_temp_new_i64();
3120 TCGv_i64 s = tcg_temp_new_i64();
3122 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3123 tcg_gen_extu_reg_i64(s, cpu_sar);
3124 tcg_gen_shr_i64(t, t, s);
3125 tcg_gen_trunc_i64_reg(dest, t);
3127 save_gpr(ctx, a->t, dest);
3129 /* Install the new nullification. */
3130 cond_free(&ctx->null_cond);
3132 ctx->null_cond = do_sed_cond(a->c, dest);
3134 return nullify_end(ctx);
3137 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3139 unsigned sa = 31 - a->cpos;
3146 dest = dest_gpr(ctx, a->t);
3147 t2 = load_gpr(ctx, a->r2);
3149 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3150 } else if (TARGET_REGISTER_BITS == 32) {
3151 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3152 } else if (a->r1 == a->r2) {
3153 TCGv_i32 t32 = tcg_temp_new_i32();
3154 tcg_gen_trunc_reg_i32(t32, t2);
3155 tcg_gen_rotri_i32(t32, t32, sa);
3156 tcg_gen_extu_i32_reg(dest, t32);
3158 TCGv_i64 t64 = tcg_temp_new_i64();
3159 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3160 tcg_gen_shri_i64(t64, t64, sa);
3161 tcg_gen_trunc_i64_reg(dest, t64);
3163 save_gpr(ctx, a->t, dest);
3165 /* Install the new nullification. */
3166 cond_free(&ctx->null_cond);
3168 ctx->null_cond = do_sed_cond(a->c, dest);
3170 return nullify_end(ctx);
3173 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3175 unsigned len = 32 - a->clen;
3176 TCGv_reg dest, src, tmp;
3182 dest = dest_gpr(ctx, a->t);
3183 src = load_gpr(ctx, a->r);
3184 tmp = tcg_temp_new();
3186 /* Recall that SAR is using big-endian bit numbering. */
3187 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3189 tcg_gen_sar_reg(dest, src, tmp);
3190 tcg_gen_sextract_reg(dest, dest, 0, len);
3192 tcg_gen_shr_reg(dest, src, tmp);
3193 tcg_gen_extract_reg(dest, dest, 0, len);
3195 save_gpr(ctx, a->t, dest);
3197 /* Install the new nullification. */
3198 cond_free(&ctx->null_cond);
3200 ctx->null_cond = do_sed_cond(a->c, dest);
3202 return nullify_end(ctx);
3205 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3207 unsigned len = 32 - a->clen;
3208 unsigned cpos = 31 - a->pos;
3215 dest = dest_gpr(ctx, a->t);
3216 src = load_gpr(ctx, a->r);
3218 tcg_gen_sextract_reg(dest, src, cpos, len);
3220 tcg_gen_extract_reg(dest, src, cpos, len);
3222 save_gpr(ctx, a->t, dest);
3224 /* Install the new nullification. */
3225 cond_free(&ctx->null_cond);
3227 ctx->null_cond = do_sed_cond(a->c, dest);
3229 return nullify_end(ctx);
3232 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3234 unsigned len = 32 - a->clen;
3235 target_sreg mask0, mask1;
3241 if (a->cpos + len > 32) {
3245 dest = dest_gpr(ctx, a->t);
3246 mask0 = deposit64(0, a->cpos, len, a->i);
3247 mask1 = deposit64(-1, a->cpos, len, a->i);
3250 TCGv_reg src = load_gpr(ctx, a->t);
3252 tcg_gen_andi_reg(dest, src, mask1);
3255 tcg_gen_ori_reg(dest, src, mask0);
3257 tcg_gen_movi_reg(dest, mask0);
3259 save_gpr(ctx, a->t, dest);
3261 /* Install the new nullification. */
3262 cond_free(&ctx->null_cond);
3264 ctx->null_cond = do_sed_cond(a->c, dest);
3266 return nullify_end(ctx);
3269 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3271 unsigned rs = a->nz ? a->t : 0;
3272 unsigned len = 32 - a->clen;
3278 if (a->cpos + len > 32) {
3282 dest = dest_gpr(ctx, a->t);
3283 val = load_gpr(ctx, a->r);
3285 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3287 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3289 save_gpr(ctx, a->t, dest);
3291 /* Install the new nullification. */
3292 cond_free(&ctx->null_cond);
3294 ctx->null_cond = do_sed_cond(a->c, dest);
3296 return nullify_end(ctx);
3299 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3300 unsigned nz, unsigned clen, TCGv_reg val)
3302 unsigned rs = nz ? rt : 0;
3303 unsigned len = 32 - clen;
3304 TCGv_reg mask, tmp, shift, dest;
3305 unsigned msb = 1U << (len - 1);
3307 dest = dest_gpr(ctx, rt);
3308 shift = tcg_temp_new();
3309 tmp = tcg_temp_new();
3311 /* Convert big-endian bit numbering in SAR to left-shift. */
3312 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3314 mask = tcg_temp_new();
3315 tcg_gen_movi_reg(mask, msb + (msb - 1));
3316 tcg_gen_and_reg(tmp, val, mask);
3318 tcg_gen_shl_reg(mask, mask, shift);
3319 tcg_gen_shl_reg(tmp, tmp, shift);
3320 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3321 tcg_gen_or_reg(dest, dest, tmp);
3323 tcg_gen_shl_reg(dest, tmp, shift);
3325 save_gpr(ctx, rt, dest);
3327 /* Install the new nullification. */
3328 cond_free(&ctx->null_cond);
3330 ctx->null_cond = do_sed_cond(c, dest);
3332 return nullify_end(ctx);
3335 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3340 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3343 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3348 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3351 static bool trans_be(DisasContext *ctx, arg_be *a)
3355 #ifdef CONFIG_USER_ONLY
3356 /* ??? It seems like there should be a good way of using
3357 "be disp(sr2, r0)", the canonical gateway entry mechanism
3358 to our advantage. But that appears to be inconvenient to
3359 manage along side branch delay slots. Therefore we handle
3360 entry into the gateway page via absolute address. */
3361 /* Since we don't implement spaces, just branch. Do notice the special
3362 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3363 goto_tb to the TB containing the syscall. */
3365 return do_dbranch(ctx, a->disp, a->l, a->n);
3371 tmp = tcg_temp_new();
3372 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3373 tmp = do_ibranch_priv(ctx, tmp);
3375 #ifdef CONFIG_USER_ONLY
3376 return do_ibranch(ctx, tmp, a->l, a->n);
3378 TCGv_i64 new_spc = tcg_temp_new_i64();
3380 load_spr(ctx, new_spc, a->sp);
3382 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3383 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3385 if (a->n && use_nullify_skip(ctx)) {
3386 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3387 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3388 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3389 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3391 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3392 if (ctx->iaoq_b == -1) {
3393 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3395 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3396 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3397 nullify_set(ctx, a->n);
3399 tcg_gen_lookup_and_goto_ptr();
3400 ctx->base.is_jmp = DISAS_NORETURN;
3401 return nullify_end(ctx);
3405 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3407 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3410 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3412 target_ureg dest = iaoq_dest(ctx, a->disp);
3416 /* Make sure the caller hasn't done something weird with the queue.
3417 * ??? This is not quite the same as the PSW[B] bit, which would be
3418 * expensive to track. Real hardware will trap for
3420 * b gateway+4 (in delay slot of first branch)
3421 * However, checking for a non-sequential instruction queue *will*
3422 * diagnose the security hole
3425 * in which instructions at evil would run with increased privs.
3427 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3428 return gen_illegal(ctx);
3431 #ifndef CONFIG_USER_ONLY
3432 if (ctx->tb_flags & PSW_C) {
3433 CPUHPPAState *env = cpu_env(ctx->cs);
3434 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3435 /* If we could not find a TLB entry, then we need to generate an
3436 ITLB miss exception so the kernel will provide it.
3437 The resulting TLB fill operation will invalidate this TB and
3438 we will re-translate, at which point we *will* be able to find
3439 the TLB entry and determine if this is in fact a gateway page. */
3441 gen_excp(ctx, EXCP_ITLB_MISS);
3444 /* No change for non-gateway pages or for priv decrease. */
3445 if (type >= 4 && type - 4 < ctx->privilege) {
3446 dest = deposit32(dest, 0, 2, type - 4);
3449 dest &= -4; /* priv = 0 */
3454 TCGv_reg tmp = dest_gpr(ctx, a->l);
3455 if (ctx->privilege < 3) {
3456 tcg_gen_andi_reg(tmp, tmp, -4);
3458 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3459 save_gpr(ctx, a->l, tmp);
3462 return do_dbranch(ctx, dest, 0, a->n);
3465 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3468 TCGv_reg tmp = tcg_temp_new();
3469 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3470 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3471 /* The computation here never changes privilege level. */
3472 return do_ibranch(ctx, tmp, a->l, a->n);
3474 /* BLR R0,RX is a good way to load PC+8 into RX. */
3475 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3479 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3484 dest = load_gpr(ctx, a->b);
3486 dest = tcg_temp_new();
3487 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3488 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3490 dest = do_ibranch_priv(ctx, dest);
3491 return do_ibranch(ctx, dest, 0, a->n);
3494 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3498 #ifdef CONFIG_USER_ONLY
3499 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3500 return do_ibranch(ctx, dest, a->l, a->n);
3503 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3505 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3506 if (ctx->iaoq_b == -1) {
3507 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3509 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3510 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3512 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3514 nullify_set(ctx, a->n);
3515 tcg_gen_lookup_and_goto_ptr();
3516 ctx->base.is_jmp = DISAS_NORETURN;
3517 return nullify_end(ctx);
3525 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3527 tcg_gen_mov_i32(dst, src);
3530 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3534 if (TARGET_REGISTER_BITS == 64) {
3535 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3537 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3541 save_frd(0, tcg_constant_i64(ret));
3542 return nullify_end(ctx);
3545 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3547 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3550 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3552 tcg_gen_mov_i64(dst, src);
3555 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3557 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3560 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3562 tcg_gen_andi_i32(dst, src, INT32_MAX);
3565 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3567 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3570 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3572 tcg_gen_andi_i64(dst, src, INT64_MAX);
3575 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3577 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3580 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3582 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3585 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3587 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3590 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3592 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3595 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3597 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3600 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3602 tcg_gen_xori_i32(dst, src, INT32_MIN);
3605 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3607 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3610 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3612 tcg_gen_xori_i64(dst, src, INT64_MIN);
3615 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3617 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3620 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3622 tcg_gen_ori_i32(dst, src, INT32_MIN);
3625 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3627 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3630 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3632 tcg_gen_ori_i64(dst, src, INT64_MIN);
3635 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3637 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3644 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3646 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3649 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3651 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3654 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3656 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3659 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3661 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3664 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3666 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3669 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3671 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3674 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3676 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3679 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3681 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3684 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3686 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3689 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3691 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3694 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3696 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3699 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3701 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3704 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3706 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3709 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3711 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3714 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3716 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3719 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3721 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3724 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3726 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3729 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3731 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3734 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3736 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3739 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3741 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3744 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3746 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3749 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3751 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3754 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3756 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3759 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3761 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3764 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3766 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3769 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3771 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3778 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3780 TCGv_i32 ta, tb, tc, ty;
3784 ta = load_frw0_i32(a->r1);
3785 tb = load_frw0_i32(a->r2);
3786 ty = tcg_constant_i32(a->y);
3787 tc = tcg_constant_i32(a->c);
3789 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
3791 return nullify_end(ctx);
3794 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3801 ta = load_frd0(a->r1);
3802 tb = load_frd0(a->r2);
3803 ty = tcg_constant_i32(a->y);
3804 tc = tcg_constant_i32(a->c);
3806 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
3808 return nullify_end(ctx);
3811 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3818 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
3825 case 0: /* simple */
3826 tcg_gen_andi_reg(t, t, 0x4000000);
3827 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3855 TCGv_reg c = load_const(ctx, mask);
3856 tcg_gen_or_reg(t, t, c);
3857 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3859 tcg_gen_andi_reg(t, t, mask);
3860 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3863 unsigned cbit = (a->y ^ 1) - 1;
3865 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3866 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3870 return nullify_end(ctx);
3877 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3879 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3882 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3884 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3887 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3889 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3892 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3894 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3897 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3899 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3902 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3904 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3907 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3909 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3912 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3914 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3917 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3923 x = load_frw0_i64(a->r1);
3924 y = load_frw0_i64(a->r2);
3925 tcg_gen_mul_i64(x, x, y);
3928 return nullify_end(ctx);
3931 /* Convert the fmpyadd single-precision register encodings to standard. */
3932 static inline int fmpyadd_s_reg(unsigned r)
3934 return (r & 16) * 2 + 16 + (r & 15);
3937 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3939 int tm = fmpyadd_s_reg(a->tm);
3940 int ra = fmpyadd_s_reg(a->ra);
3941 int ta = fmpyadd_s_reg(a->ta);
3942 int rm2 = fmpyadd_s_reg(a->rm2);
3943 int rm1 = fmpyadd_s_reg(a->rm1);
3947 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3948 do_fop_weww(ctx, ta, ta, ra,
3949 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3951 return nullify_end(ctx);
3954 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3956 return do_fmpyadd_s(ctx, a, false);
3959 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
3961 return do_fmpyadd_s(ctx, a, true);
3964 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3968 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
3969 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
3970 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3972 return nullify_end(ctx);
3975 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
3977 return do_fmpyadd_d(ctx, a, false);
3980 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
3982 return do_fmpyadd_d(ctx, a, true);
3985 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
3990 x = load_frw0_i32(a->rm1);
3991 y = load_frw0_i32(a->rm2);
3992 z = load_frw0_i32(a->ra3);
3995 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
3997 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4000 save_frw_i32(a->t, x);
4001 return nullify_end(ctx);
4004 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4009 x = load_frd0(a->rm1);
4010 y = load_frd0(a->rm2);
4011 z = load_frd0(a->ra3);
4014 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4016 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4020 return nullify_end(ctx);
4023 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4025 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4026 #ifndef CONFIG_USER_ONLY
4027 if (a->i == 0x100) {
4028 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4030 gen_helper_diag_btlb(tcg_env);
4031 return nullify_end(ctx);
4034 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4038 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4040 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4044 ctx->tb_flags = ctx->base.tb->flags;
4046 #ifdef CONFIG_USER_ONLY
4047 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4048 ctx->mmu_idx = MMU_USER_IDX;
4049 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4050 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4051 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4053 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4054 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4055 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4058 /* Recover the IAOQ values from the GVA + PRIV. */
4059 uint64_t cs_base = ctx->base.tb->cs_base;
4060 uint64_t iasq_f = cs_base & ~0xffffffffull;
4061 int32_t diff = cs_base;
4063 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4064 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4067 ctx->iaoq_n_var = NULL;
4069 /* Bound the number of instructions by those left on the page. */
4070 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4071 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4074 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4076 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4078 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4079 ctx->null_cond = cond_make_f();
4080 ctx->psw_n_nonzero = false;
4081 if (ctx->tb_flags & PSW_N) {
4082 ctx->null_cond.c = TCG_COND_ALWAYS;
4083 ctx->psw_n_nonzero = true;
4085 ctx->null_lab = NULL;
4088 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4090 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4092 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4095 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4097 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4098 CPUHPPAState *env = cpu_env(cs);
4101 /* Execute one insn. */
4102 #ifdef CONFIG_USER_ONLY
4103 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4105 ret = ctx->base.is_jmp;
4106 assert(ret != DISAS_NEXT);
4110 /* Always fetch the insn, even if nullified, so that we check
4111 the page permissions for execute. */
4112 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4114 /* Set up the IA queue for the next insn.
4115 This will be overwritten by a branch. */
4116 if (ctx->iaoq_b == -1) {
4118 ctx->iaoq_n_var = tcg_temp_new();
4119 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4121 ctx->iaoq_n = ctx->iaoq_b + 4;
4122 ctx->iaoq_n_var = NULL;
4125 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4126 ctx->null_cond.c = TCG_COND_NEVER;
4130 if (!decode(ctx, insn)) {
4133 ret = ctx->base.is_jmp;
4134 assert(ctx->null_lab == NULL);
4138 /* Advance the insn queue. Note that this check also detects
4139 a priority change within the instruction queue. */
4140 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4141 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4142 && use_goto_tb(ctx, ctx->iaoq_b)
4143 && (ctx->null_cond.c == TCG_COND_NEVER
4144 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4145 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4146 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4147 ctx->base.is_jmp = ret = DISAS_NORETURN;
4149 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4152 ctx->iaoq_f = ctx->iaoq_b;
4153 ctx->iaoq_b = ctx->iaoq_n;
4154 ctx->base.pc_next += 4;
4157 case DISAS_NORETURN:
4158 case DISAS_IAQ_N_UPDATED:
4162 case DISAS_IAQ_N_STALE:
4163 case DISAS_IAQ_N_STALE_EXIT:
4164 if (ctx->iaoq_f == -1) {
4165 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4166 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4167 #ifndef CONFIG_USER_ONLY
4168 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4171 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4173 : DISAS_IAQ_N_UPDATED);
4174 } else if (ctx->iaoq_b == -1) {
4175 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4180 g_assert_not_reached();
4184 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4186 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4187 DisasJumpType is_jmp = ctx->base.is_jmp;
4190 case DISAS_NORETURN:
4192 case DISAS_TOO_MANY:
4193 case DISAS_IAQ_N_STALE:
4194 case DISAS_IAQ_N_STALE_EXIT:
4195 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4196 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4199 case DISAS_IAQ_N_UPDATED:
4200 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4201 tcg_gen_lookup_and_goto_ptr();
4206 tcg_gen_exit_tb(NULL, 0);
4209 g_assert_not_reached();
4213 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4214 CPUState *cs, FILE *logfile)
4216 target_ulong pc = dcbase->pc_first;
4218 #ifdef CONFIG_USER_ONLY
4221 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4224 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4227 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4230 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4235 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4236 target_disas(logfile, cs, pc, dcbase->tb->size);
4239 static const TranslatorOps hppa_tr_ops = {
4240 .init_disas_context = hppa_tr_init_disas_context,
4241 .tb_start = hppa_tr_tb_start,
4242 .insn_start = hppa_tr_insn_start,
4243 .translate_insn = hppa_tr_translate_insn,
4244 .tb_stop = hppa_tr_tb_stop,
4245 .disas_log = hppa_tr_disas_log,
4248 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4249 target_ulong pc, void *host_pc)
4252 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);