2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
36 /* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
41 #undef tcg_global_mem_new
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #if TARGET_REGISTER_BITS == 64
47 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
49 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
52 #define TCGv_tl TCGv_i32
53 #define tcg_temp_new_tl tcg_temp_new_i32
54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg TCGv_i64
60 #define tcg_temp_new tcg_temp_new_i64
61 #define tcg_global_mem_new tcg_global_mem_new_i64
63 #define tcg_gen_movi_reg tcg_gen_movi_i64
64 #define tcg_gen_mov_reg tcg_gen_mov_i64
65 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71 #define tcg_gen_ld_reg tcg_gen_ld_i64
72 #define tcg_gen_st8_reg tcg_gen_st8_i64
73 #define tcg_gen_st16_reg tcg_gen_st16_i64
74 #define tcg_gen_st32_reg tcg_gen_st32_i64
75 #define tcg_gen_st_reg tcg_gen_st_i64
76 #define tcg_gen_add_reg tcg_gen_add_i64
77 #define tcg_gen_addi_reg tcg_gen_addi_i64
78 #define tcg_gen_sub_reg tcg_gen_sub_i64
79 #define tcg_gen_neg_reg tcg_gen_neg_i64
80 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
81 #define tcg_gen_subi_reg tcg_gen_subi_i64
82 #define tcg_gen_and_reg tcg_gen_and_i64
83 #define tcg_gen_andi_reg tcg_gen_andi_i64
84 #define tcg_gen_or_reg tcg_gen_or_i64
85 #define tcg_gen_ori_reg tcg_gen_ori_i64
86 #define tcg_gen_xor_reg tcg_gen_xor_i64
87 #define tcg_gen_xori_reg tcg_gen_xori_i64
88 #define tcg_gen_not_reg tcg_gen_not_i64
89 #define tcg_gen_shl_reg tcg_gen_shl_i64
90 #define tcg_gen_shli_reg tcg_gen_shli_i64
91 #define tcg_gen_shr_reg tcg_gen_shr_i64
92 #define tcg_gen_shri_reg tcg_gen_shri_i64
93 #define tcg_gen_sar_reg tcg_gen_sar_i64
94 #define tcg_gen_sari_reg tcg_gen_sari_i64
95 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
96 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99 #define tcg_gen_mul_reg tcg_gen_mul_i64
100 #define tcg_gen_muli_reg tcg_gen_muli_i64
101 #define tcg_gen_div_reg tcg_gen_div_i64
102 #define tcg_gen_rem_reg tcg_gen_rem_i64
103 #define tcg_gen_divu_reg tcg_gen_divu_i64
104 #define tcg_gen_remu_reg tcg_gen_remu_i64
105 #define tcg_gen_discard_reg tcg_gen_discard_i64
106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122 #define tcg_gen_andc_reg tcg_gen_andc_i64
123 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
124 #define tcg_gen_nand_reg tcg_gen_nand_i64
125 #define tcg_gen_nor_reg tcg_gen_nor_i64
126 #define tcg_gen_orc_reg tcg_gen_orc_i64
127 #define tcg_gen_clz_reg tcg_gen_clz_i64
128 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
129 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
130 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
134 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
135 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
136 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
137 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139 #define tcg_gen_extract_reg tcg_gen_extract_i64
140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
142 #define tcg_constant_reg tcg_constant_i64
143 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
144 #define tcg_gen_add2_reg tcg_gen_add2_i64
145 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
146 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
149 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
151 #define TCGv_reg TCGv_i32
152 #define tcg_temp_new tcg_temp_new_i32
153 #define tcg_global_mem_new tcg_global_mem_new_i32
155 #define tcg_gen_movi_reg tcg_gen_movi_i32
156 #define tcg_gen_mov_reg tcg_gen_mov_i32
157 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
162 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
163 #define tcg_gen_ld_reg tcg_gen_ld_i32
164 #define tcg_gen_st8_reg tcg_gen_st8_i32
165 #define tcg_gen_st16_reg tcg_gen_st16_i32
166 #define tcg_gen_st32_reg tcg_gen_st32_i32
167 #define tcg_gen_st_reg tcg_gen_st_i32
168 #define tcg_gen_add_reg tcg_gen_add_i32
169 #define tcg_gen_addi_reg tcg_gen_addi_i32
170 #define tcg_gen_sub_reg tcg_gen_sub_i32
171 #define tcg_gen_neg_reg tcg_gen_neg_i32
172 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
173 #define tcg_gen_subi_reg tcg_gen_subi_i32
174 #define tcg_gen_and_reg tcg_gen_and_i32
175 #define tcg_gen_andi_reg tcg_gen_andi_i32
176 #define tcg_gen_or_reg tcg_gen_or_i32
177 #define tcg_gen_ori_reg tcg_gen_ori_i32
178 #define tcg_gen_xor_reg tcg_gen_xor_i32
179 #define tcg_gen_xori_reg tcg_gen_xori_i32
180 #define tcg_gen_not_reg tcg_gen_not_i32
181 #define tcg_gen_shl_reg tcg_gen_shl_i32
182 #define tcg_gen_shli_reg tcg_gen_shli_i32
183 #define tcg_gen_shr_reg tcg_gen_shr_i32
184 #define tcg_gen_shri_reg tcg_gen_shri_i32
185 #define tcg_gen_sar_reg tcg_gen_sar_i32
186 #define tcg_gen_sari_reg tcg_gen_sari_i32
187 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
188 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
190 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191 #define tcg_gen_mul_reg tcg_gen_mul_i32
192 #define tcg_gen_muli_reg tcg_gen_muli_i32
193 #define tcg_gen_div_reg tcg_gen_div_i32
194 #define tcg_gen_rem_reg tcg_gen_rem_i32
195 #define tcg_gen_divu_reg tcg_gen_divu_i32
196 #define tcg_gen_remu_reg tcg_gen_remu_i32
197 #define tcg_gen_discard_reg tcg_gen_discard_i32
198 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
209 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
210 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213 #define tcg_gen_andc_reg tcg_gen_andc_i32
214 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
215 #define tcg_gen_nand_reg tcg_gen_nand_i32
216 #define tcg_gen_nor_reg tcg_gen_nor_i32
217 #define tcg_gen_orc_reg tcg_gen_orc_i32
218 #define tcg_gen_clz_reg tcg_gen_clz_i32
219 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
220 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
221 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
225 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
226 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
227 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
228 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
229 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230 #define tcg_gen_extract_reg tcg_gen_extract_i32
231 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
232 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
233 #define tcg_constant_reg tcg_constant_i32
234 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
235 #define tcg_gen_add2_reg tcg_gen_add2_i32
236 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
237 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
240 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
241 #endif /* TARGET_REGISTER_BITS */
243 typedef struct DisasCond {
248 typedef struct DisasContext {
249 DisasContextBase base;
267 #ifdef CONFIG_USER_ONLY
272 #ifdef CONFIG_USER_ONLY
273 #define UNALIGN(C) (C)->unalign
275 #define UNALIGN(C) MO_ALIGN
278 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
279 static int expand_sm_imm(DisasContext *ctx, int val)
281 if (val & PSW_SM_E) {
282 val = (val & ~PSW_SM_E) | PSW_E;
284 if (val & PSW_SM_W) {
285 val = (val & ~PSW_SM_W) | PSW_W;
290 /* Inverted space register indicates 0 means sr0 not inferred from base. */
291 static int expand_sr3x(DisasContext *ctx, int val)
296 /* Convert the M:A bits within a memory insn to the tri-state value
297 we use for the final M. */
298 static int ma_to_m(DisasContext *ctx, int val)
300 return val & 2 ? (val & 1 ? -1 : 1) : 0;
303 /* Convert the sign of the displacement to a pre or post-modify. */
304 static int pos_to_m(DisasContext *ctx, int val)
309 static int neg_to_m(DisasContext *ctx, int val)
314 /* Used for branch targets and fp memory ops. */
315 static int expand_shl2(DisasContext *ctx, int val)
320 /* Used for fp memory ops. */
321 static int expand_shl3(DisasContext *ctx, int val)
326 /* Used for assemble_21. */
327 static int expand_shl11(DisasContext *ctx, int val)
332 /* Translate CMPI doubleword conditions to standard. */
333 static int cmpbid_c(DisasContext *ctx, int val)
335 return val ? val : 4; /* 0 == "*<<" */
339 /* Include the auto-generated decoder. */
340 #include "decode-insns.c.inc"
342 /* We are not using a goto_tb (for whatever reason), but have updated
343 the iaq (for whatever reason), so don't do it again on exit. */
344 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
346 /* We are exiting the TB, but have neither emitted a goto_tb, nor
347 updated the iaq for the next instruction to be executed. */
348 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
350 /* Similarly, but we want to return to the main loop immediately
351 to recognize unmasked interrupts. */
352 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
353 #define DISAS_EXIT DISAS_TARGET_3
355 /* global register indexes */
356 static TCGv_reg cpu_gr[32];
357 static TCGv_i64 cpu_sr[4];
358 static TCGv_i64 cpu_srH;
359 static TCGv_reg cpu_iaoq_f;
360 static TCGv_reg cpu_iaoq_b;
361 static TCGv_i64 cpu_iasq_f;
362 static TCGv_i64 cpu_iasq_b;
363 static TCGv_reg cpu_sar;
364 static TCGv_reg cpu_psw_n;
365 static TCGv_reg cpu_psw_v;
366 static TCGv_reg cpu_psw_cb;
367 static TCGv_reg cpu_psw_cb_msb;
369 void hppa_translate_init(void)
371 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
373 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
374 static const GlobalVar vars[] = {
375 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
386 /* Use the symbolic register names that match the disassembler. */
387 static const char gr_names[32][4] = {
388 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
389 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
390 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
391 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
393 /* SR[4-7] are not global registers so that we can index them. */
394 static const char sr_names[5][4] = {
395 "sr0", "sr1", "sr2", "sr3", "srH"
401 for (i = 1; i < 32; i++) {
402 cpu_gr[i] = tcg_global_mem_new(tcg_env,
403 offsetof(CPUHPPAState, gr[i]),
406 for (i = 0; i < 4; i++) {
407 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
408 offsetof(CPUHPPAState, sr[i]),
411 cpu_srH = tcg_global_mem_new_i64(tcg_env,
412 offsetof(CPUHPPAState, sr[4]),
415 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
416 const GlobalVar *v = &vars[i];
417 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
420 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
421 offsetof(CPUHPPAState, iasq_f),
423 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
424 offsetof(CPUHPPAState, iasq_b),
428 static DisasCond cond_make_f(void)
437 static DisasCond cond_make_t(void)
440 .c = TCG_COND_ALWAYS,
446 static DisasCond cond_make_n(void)
451 .a1 = tcg_constant_reg(0)
455 static DisasCond cond_make_tmp(TCGCond c, TCGv_reg a0, TCGv_reg a1)
457 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
458 return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
461 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
463 return cond_make_tmp(c, a0, tcg_constant_reg(0));
466 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
468 TCGv_reg tmp = tcg_temp_new();
469 tcg_gen_mov_reg(tmp, a0);
470 return cond_make_0_tmp(c, tmp);
473 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
475 TCGv_reg t0 = tcg_temp_new();
476 TCGv_reg t1 = tcg_temp_new();
478 tcg_gen_mov_reg(t0, a0);
479 tcg_gen_mov_reg(t1, a1);
480 return cond_make_tmp(c, t0, t1);
483 static void cond_free(DisasCond *cond)
490 case TCG_COND_ALWAYS:
491 cond->c = TCG_COND_NEVER;
498 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
501 TCGv_reg t = tcg_temp_new();
502 tcg_gen_movi_reg(t, 0);
509 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
511 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
512 return tcg_temp_new();
518 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
520 if (ctx->null_cond.c != TCG_COND_NEVER) {
521 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
522 ctx->null_cond.a1, dest, t);
524 tcg_gen_mov_reg(dest, t);
528 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
531 save_or_nullify(ctx, cpu_gr[reg], t);
543 static TCGv_i32 load_frw_i32(unsigned rt)
545 TCGv_i32 ret = tcg_temp_new_i32();
546 tcg_gen_ld_i32(ret, tcg_env,
547 offsetof(CPUHPPAState, fr[rt & 31])
548 + (rt & 32 ? LO_OFS : HI_OFS));
552 static TCGv_i32 load_frw0_i32(unsigned rt)
555 TCGv_i32 ret = tcg_temp_new_i32();
556 tcg_gen_movi_i32(ret, 0);
559 return load_frw_i32(rt);
563 static TCGv_i64 load_frw0_i64(unsigned rt)
565 TCGv_i64 ret = tcg_temp_new_i64();
567 tcg_gen_movi_i64(ret, 0);
569 tcg_gen_ld32u_i64(ret, tcg_env,
570 offsetof(CPUHPPAState, fr[rt & 31])
571 + (rt & 32 ? LO_OFS : HI_OFS));
576 static void save_frw_i32(unsigned rt, TCGv_i32 val)
578 tcg_gen_st_i32(val, tcg_env,
579 offsetof(CPUHPPAState, fr[rt & 31])
580 + (rt & 32 ? LO_OFS : HI_OFS));
586 static TCGv_i64 load_frd(unsigned rt)
588 TCGv_i64 ret = tcg_temp_new_i64();
589 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
593 static TCGv_i64 load_frd0(unsigned rt)
596 TCGv_i64 ret = tcg_temp_new_i64();
597 tcg_gen_movi_i64(ret, 0);
604 static void save_frd(unsigned rt, TCGv_i64 val)
606 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
609 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
611 #ifdef CONFIG_USER_ONLY
612 tcg_gen_movi_i64(dest, 0);
615 tcg_gen_mov_i64(dest, cpu_sr[reg]);
616 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
617 tcg_gen_mov_i64(dest, cpu_srH);
619 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
624 /* Skip over the implementation of an insn that has been nullified.
625 Use this when the insn is too complex for a conditional move. */
626 static void nullify_over(DisasContext *ctx)
628 if (ctx->null_cond.c != TCG_COND_NEVER) {
629 /* The always condition should have been handled in the main loop. */
630 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
632 ctx->null_lab = gen_new_label();
634 /* If we're using PSW[N], copy it to a temp because... */
635 if (ctx->null_cond.a0 == cpu_psw_n) {
636 ctx->null_cond.a0 = tcg_temp_new();
637 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
639 /* ... we clear it before branching over the implementation,
640 so that (1) it's clear after nullifying this insn and
641 (2) if this insn nullifies the next, PSW[N] is valid. */
642 if (ctx->psw_n_nonzero) {
643 ctx->psw_n_nonzero = false;
644 tcg_gen_movi_reg(cpu_psw_n, 0);
647 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
648 ctx->null_cond.a1, ctx->null_lab);
649 cond_free(&ctx->null_cond);
653 /* Save the current nullification state to PSW[N]. */
654 static void nullify_save(DisasContext *ctx)
656 if (ctx->null_cond.c == TCG_COND_NEVER) {
657 if (ctx->psw_n_nonzero) {
658 tcg_gen_movi_reg(cpu_psw_n, 0);
662 if (ctx->null_cond.a0 != cpu_psw_n) {
663 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
664 ctx->null_cond.a0, ctx->null_cond.a1);
665 ctx->psw_n_nonzero = true;
667 cond_free(&ctx->null_cond);
670 /* Set a PSW[N] to X. The intention is that this is used immediately
671 before a goto_tb/exit_tb, so that there is no fallthru path to other
672 code within the TB. Therefore we do not update psw_n_nonzero. */
673 static void nullify_set(DisasContext *ctx, bool x)
675 if (ctx->psw_n_nonzero || x) {
676 tcg_gen_movi_reg(cpu_psw_n, x);
680 /* Mark the end of an instruction that may have been nullified.
681 This is the pair to nullify_over. Always returns true so that
682 it may be tail-called from a translate function. */
683 static bool nullify_end(DisasContext *ctx)
685 TCGLabel *null_lab = ctx->null_lab;
686 DisasJumpType status = ctx->base.is_jmp;
688 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
689 For UPDATED, we cannot update on the nullified path. */
690 assert(status != DISAS_IAQ_N_UPDATED);
692 if (likely(null_lab == NULL)) {
693 /* The current insn wasn't conditional or handled the condition
694 applied to it without a branch, so the (new) setting of
695 NULL_COND can be applied directly to the next insn. */
698 ctx->null_lab = NULL;
700 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
701 /* The next instruction will be unconditional,
702 and NULL_COND already reflects that. */
703 gen_set_label(null_lab);
705 /* The insn that we just executed is itself nullifying the next
706 instruction. Store the condition in the PSW[N] global.
707 We asserted PSW[N] = 0 in nullify_over, so that after the
708 label we have the proper value in place. */
710 gen_set_label(null_lab);
711 ctx->null_cond = cond_make_n();
713 if (status == DISAS_NORETURN) {
714 ctx->base.is_jmp = DISAS_NEXT;
719 static target_ureg gva_offset_mask(DisasContext *ctx)
721 return (ctx->tb_flags & PSW_W
722 ? MAKE_64BIT_MASK(0, 62)
723 : MAKE_64BIT_MASK(0, 32));
726 static void copy_iaoq_entry(DisasContext *ctx, TCGv_reg dest,
727 target_ureg ival, TCGv_reg vval)
729 target_ureg mask = gva_offset_mask(ctx);
732 tcg_gen_movi_reg(dest, ival & mask);
735 tcg_debug_assert(vval != NULL);
738 * We know that the IAOQ is already properly masked.
739 * This optimization is primarily for "iaoq_f = iaoq_b".
741 if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
742 tcg_gen_mov_reg(dest, vval);
744 tcg_gen_andi_reg(dest, vval, mask);
748 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
750 return ctx->iaoq_f + disp + 8;
753 static void gen_excp_1(int exception)
755 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
758 static void gen_excp(DisasContext *ctx, int exception)
760 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
761 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
763 gen_excp_1(exception);
764 ctx->base.is_jmp = DISAS_NORETURN;
767 static bool gen_excp_iir(DisasContext *ctx, int exc)
770 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
771 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
773 return nullify_end(ctx);
776 static bool gen_illegal(DisasContext *ctx)
778 return gen_excp_iir(ctx, EXCP_ILL);
781 #ifdef CONFIG_USER_ONLY
782 #define CHECK_MOST_PRIVILEGED(EXCP) \
783 return gen_excp_iir(ctx, EXCP)
785 #define CHECK_MOST_PRIVILEGED(EXCP) \
787 if (ctx->privilege != 0) { \
788 return gen_excp_iir(ctx, EXCP); \
793 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
795 return translator_use_goto_tb(&ctx->base, dest);
798 /* If the next insn is to be nullified, and it's on the same page,
799 and we're not attempting to set a breakpoint on it, then we can
800 totally skip the nullified insn. This avoids creating and
801 executing a TB that merely branches to the next TB. */
802 static bool use_nullify_skip(DisasContext *ctx)
804 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
805 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
808 static void gen_goto_tb(DisasContext *ctx, int which,
809 target_ureg f, target_ureg b)
811 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
812 tcg_gen_goto_tb(which);
813 copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
814 copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
815 tcg_gen_exit_tb(ctx->base.tb, which);
817 copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
818 copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
819 tcg_gen_lookup_and_goto_ptr();
823 static bool cond_need_sv(int c)
825 return c == 2 || c == 3 || c == 6;
828 static bool cond_need_cb(int c)
830 return c == 4 || c == 5;
833 /* Need extensions from TCGv_i32 to TCGv_reg. */
834 static bool cond_need_ext(DisasContext *ctx, bool d)
836 return TARGET_REGISTER_BITS == 64 && !(ctx->is_pa20 && d);
840 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
841 * the Parisc 1.1 Architecture Reference Manual for details.
844 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
845 TCGv_reg res, TCGv_reg cb_msb, TCGv_reg sv)
851 case 0: /* Never / TR (0 / 1) */
852 cond = cond_make_f();
854 case 1: /* = / <> (Z / !Z) */
855 if (cond_need_ext(ctx, d)) {
856 tmp = tcg_temp_new();
857 tcg_gen_ext32u_reg(tmp, res);
860 cond = cond_make_0(TCG_COND_EQ, res);
862 case 2: /* < / >= (N ^ V / !(N ^ V) */
863 tmp = tcg_temp_new();
864 tcg_gen_xor_reg(tmp, res, sv);
865 if (cond_need_ext(ctx, d)) {
866 tcg_gen_ext32s_reg(tmp, tmp);
868 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
870 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
874 * ((res < 0) ^ (sv < 0)) | !res
875 * ((res ^ sv) < 0) | !res
876 * (~(res ^ sv) >= 0) | !res
877 * !(~(res ^ sv) >> 31) | !res
878 * !(~(res ^ sv) >> 31 & res)
880 tmp = tcg_temp_new();
881 tcg_gen_eqv_reg(tmp, res, sv);
882 if (cond_need_ext(ctx, d)) {
883 tcg_gen_sextract_reg(tmp, tmp, 31, 1);
884 tcg_gen_and_reg(tmp, tmp, res);
885 tcg_gen_ext32u_reg(tmp, tmp);
887 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
888 tcg_gen_and_reg(tmp, tmp, res);
890 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
892 case 4: /* NUV / UV (!C / C) */
893 /* Only bit 0 of cb_msb is ever set. */
894 cond = cond_make_0(TCG_COND_EQ, cb_msb);
896 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
897 tmp = tcg_temp_new();
898 tcg_gen_neg_reg(tmp, cb_msb);
899 tcg_gen_and_reg(tmp, tmp, res);
900 if (cond_need_ext(ctx, d)) {
901 tcg_gen_ext32u_reg(tmp, tmp);
903 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
905 case 6: /* SV / NSV (V / !V) */
906 if (cond_need_ext(ctx, d)) {
907 tmp = tcg_temp_new();
908 tcg_gen_ext32s_reg(tmp, sv);
911 cond = cond_make_0(TCG_COND_LT, sv);
913 case 7: /* OD / EV */
914 tmp = tcg_temp_new();
915 tcg_gen_andi_reg(tmp, res, 1);
916 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
919 g_assert_not_reached();
922 cond.c = tcg_invert_cond(cond.c);
928 /* Similar, but for the special case of subtraction without borrow, we
929 can use the inputs directly. This can allow other computation to be
930 deleted as unused. */
932 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
933 TCGv_reg res, TCGv_reg in1,
934 TCGv_reg in2, TCGv_reg sv)
952 case 4: /* << / >>= */
956 case 5: /* <<= / >> */
961 return do_cond(ctx, cf, d, res, NULL, sv);
965 tc = tcg_invert_cond(tc);
967 if (cond_need_ext(ctx, d)) {
968 TCGv_reg t1 = tcg_temp_new();
969 TCGv_reg t2 = tcg_temp_new();
972 tcg_gen_ext32u_reg(t1, in1);
973 tcg_gen_ext32u_reg(t2, in2);
975 tcg_gen_ext32s_reg(t1, in1);
976 tcg_gen_ext32s_reg(t2, in2);
978 return cond_make_tmp(tc, t1, t2);
980 return cond_make(tc, in1, in2);
984 * Similar, but for logicals, where the carry and overflow bits are not
985 * computed, and use of them is undefined.
987 * Undefined or not, hardware does not trap. It seems reasonable to
988 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
989 * how cases c={2,3} are treated.
992 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
1000 case 9: /* undef, C */
1001 case 11: /* undef, C & !Z */
1002 case 12: /* undef, V */
1003 return cond_make_f();
1006 case 8: /* undef, !C */
1007 case 10: /* undef, !C | Z */
1008 case 13: /* undef, !V */
1009 return cond_make_t();
1038 return do_cond(ctx, cf, d, res, NULL, NULL);
1041 g_assert_not_reached();
1044 if (cond_need_ext(ctx, d)) {
1045 TCGv_reg tmp = tcg_temp_new();
1048 tcg_gen_ext32u_reg(tmp, res);
1050 tcg_gen_ext32s_reg(tmp, res);
1052 return cond_make_0_tmp(tc, tmp);
1054 return cond_make_0(tc, res);
1057 /* Similar, but for shift/extract/deposit conditions. */
1059 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
1064 /* Convert the compressed condition codes to standard.
1065 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1066 4-7 are the reverse of 0-3. */
1073 return do_log_cond(ctx, c * 2 + f, d, res);
1076 /* Similar, but for unit conditions. */
1078 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_reg res,
1079 TCGv_reg in1, TCGv_reg in2)
1082 TCGv_reg tmp, cb = NULL;
1083 target_ureg d_repl = d ? 0x0000000100000001ull : 1;
1086 /* Since we want to test lots of carry-out bits all at once, do not
1087 * do our normal thing and compute carry-in of bit B+1 since that
1088 * leaves us with carry bits spread across two words.
1090 cb = tcg_temp_new();
1091 tmp = tcg_temp_new();
1092 tcg_gen_or_reg(cb, in1, in2);
1093 tcg_gen_and_reg(tmp, in1, in2);
1094 tcg_gen_andc_reg(cb, cb, res);
1095 tcg_gen_or_reg(cb, cb, tmp);
1099 case 0: /* never / TR */
1100 case 1: /* undefined */
1101 case 5: /* undefined */
1102 cond = cond_make_f();
1105 case 2: /* SBZ / NBZ */
1106 /* See hasless(v,1) from
1107 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1109 tmp = tcg_temp_new();
1110 tcg_gen_subi_reg(tmp, res, d_repl * 0x01010101u);
1111 tcg_gen_andc_reg(tmp, tmp, res);
1112 tcg_gen_andi_reg(tmp, tmp, d_repl * 0x80808080u);
1113 cond = cond_make_0(TCG_COND_NE, tmp);
1116 case 3: /* SHZ / NHZ */
1117 tmp = tcg_temp_new();
1118 tcg_gen_subi_reg(tmp, res, d_repl * 0x00010001u);
1119 tcg_gen_andc_reg(tmp, tmp, res);
1120 tcg_gen_andi_reg(tmp, tmp, d_repl * 0x80008000u);
1121 cond = cond_make_0(TCG_COND_NE, tmp);
1124 case 4: /* SDC / NDC */
1125 tcg_gen_andi_reg(cb, cb, d_repl * 0x88888888u);
1126 cond = cond_make_0(TCG_COND_NE, cb);
1129 case 6: /* SBC / NBC */
1130 tcg_gen_andi_reg(cb, cb, d_repl * 0x80808080u);
1131 cond = cond_make_0(TCG_COND_NE, cb);
1134 case 7: /* SHC / NHC */
1135 tcg_gen_andi_reg(cb, cb, d_repl * 0x80008000u);
1136 cond = cond_make_0(TCG_COND_NE, cb);
1140 g_assert_not_reached();
1143 cond.c = tcg_invert_cond(cond.c);
1149 static TCGv_reg get_carry(DisasContext *ctx, bool d,
1150 TCGv_reg cb, TCGv_reg cb_msb)
1152 if (cond_need_ext(ctx, d)) {
1153 TCGv_reg t = tcg_temp_new();
1154 tcg_gen_extract_reg(t, cb, 32, 1);
1160 static TCGv_reg get_psw_carry(DisasContext *ctx, bool d)
1162 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1165 /* Compute signed overflow for addition. */
1166 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1167 TCGv_reg in1, TCGv_reg in2)
1169 TCGv_reg sv = tcg_temp_new();
1170 TCGv_reg tmp = tcg_temp_new();
1172 tcg_gen_xor_reg(sv, res, in1);
1173 tcg_gen_xor_reg(tmp, in1, in2);
1174 tcg_gen_andc_reg(sv, sv, tmp);
1179 /* Compute signed overflow for subtraction. */
1180 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1181 TCGv_reg in1, TCGv_reg in2)
1183 TCGv_reg sv = tcg_temp_new();
1184 TCGv_reg tmp = tcg_temp_new();
1186 tcg_gen_xor_reg(sv, res, in1);
1187 tcg_gen_xor_reg(tmp, in1, in2);
1188 tcg_gen_and_reg(sv, sv, tmp);
1193 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1194 TCGv_reg in2, unsigned shift, bool is_l,
1195 bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1197 TCGv_reg dest, cb, cb_msb, cb_cond, sv, tmp;
1198 unsigned c = cf >> 1;
1201 dest = tcg_temp_new();
1207 tmp = tcg_temp_new();
1208 tcg_gen_shli_reg(tmp, in1, shift);
1212 if (!is_l || cond_need_cb(c)) {
1213 TCGv_reg zero = tcg_constant_reg(0);
1214 cb_msb = tcg_temp_new();
1215 cb = tcg_temp_new();
1217 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1219 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb,
1220 get_psw_carry(ctx, d), zero);
1222 tcg_gen_xor_reg(cb, in1, in2);
1223 tcg_gen_xor_reg(cb, cb, dest);
1224 if (cond_need_cb(c)) {
1225 cb_cond = get_carry(ctx, d, cb, cb_msb);
1228 tcg_gen_add_reg(dest, in1, in2);
1230 tcg_gen_add_reg(dest, dest, get_psw_carry(ctx, d));
1234 /* Compute signed overflow if required. */
1236 if (is_tsv || cond_need_sv(c)) {
1237 sv = do_add_sv(ctx, dest, in1, in2);
1239 /* ??? Need to include overflow from shift. */
1240 gen_helper_tsv(tcg_env, sv);
1244 /* Emit any conditional trap before any writeback. */
1245 cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1247 tmp = tcg_temp_new();
1248 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1249 gen_helper_tcond(tcg_env, tmp);
1252 /* Write back the result. */
1254 save_or_nullify(ctx, cpu_psw_cb, cb);
1255 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1257 save_gpr(ctx, rt, dest);
1259 /* Install the new nullification. */
1260 cond_free(&ctx->null_cond);
1261 ctx->null_cond = cond;
1264 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1265 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1267 TCGv_reg tcg_r1, tcg_r2;
1272 tcg_r1 = load_gpr(ctx, a->r1);
1273 tcg_r2 = load_gpr(ctx, a->r2);
1274 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1275 is_tsv, is_tc, is_c, a->cf, a->d);
1276 return nullify_end(ctx);
1279 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1280 bool is_tsv, bool is_tc)
1282 TCGv_reg tcg_im, tcg_r2;
1287 tcg_im = tcg_constant_reg(a->i);
1288 tcg_r2 = load_gpr(ctx, a->r);
1289 /* All ADDI conditions are 32-bit. */
1290 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1291 return nullify_end(ctx);
1294 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1295 TCGv_reg in2, bool is_tsv, bool is_b,
1296 bool is_tc, unsigned cf, bool d)
1298 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1299 unsigned c = cf >> 1;
1302 dest = tcg_temp_new();
1303 cb = tcg_temp_new();
1304 cb_msb = tcg_temp_new();
1306 zero = tcg_constant_reg(0);
1308 /* DEST,C = IN1 + ~IN2 + C. */
1309 tcg_gen_not_reg(cb, in2);
1310 tcg_gen_add2_reg(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
1311 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1312 tcg_gen_xor_reg(cb, cb, in1);
1313 tcg_gen_xor_reg(cb, cb, dest);
1316 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1317 * operations by seeding the high word with 1 and subtracting.
1319 TCGv_reg one = tcg_constant_reg(1);
1320 tcg_gen_sub2_reg(dest, cb_msb, in1, one, in2, zero);
1321 tcg_gen_eqv_reg(cb, in1, in2);
1322 tcg_gen_xor_reg(cb, cb, dest);
1325 /* Compute signed overflow if required. */
1327 if (is_tsv || cond_need_sv(c)) {
1328 sv = do_sub_sv(ctx, dest, in1, in2);
1330 gen_helper_tsv(tcg_env, sv);
1334 /* Compute the condition. We cannot use the special case for borrow. */
1336 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1338 cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1341 /* Emit any conditional trap before any writeback. */
1343 tmp = tcg_temp_new();
1344 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1345 gen_helper_tcond(tcg_env, tmp);
1348 /* Write back the result. */
1349 save_or_nullify(ctx, cpu_psw_cb, cb);
1350 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1351 save_gpr(ctx, rt, dest);
1353 /* Install the new nullification. */
1354 cond_free(&ctx->null_cond);
1355 ctx->null_cond = cond;
1358 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1359 bool is_tsv, bool is_b, bool is_tc)
1361 TCGv_reg tcg_r1, tcg_r2;
1366 tcg_r1 = load_gpr(ctx, a->r1);
1367 tcg_r2 = load_gpr(ctx, a->r2);
1368 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1369 return nullify_end(ctx);
1372 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1374 TCGv_reg tcg_im, tcg_r2;
1379 tcg_im = tcg_constant_reg(a->i);
1380 tcg_r2 = load_gpr(ctx, a->r);
1381 /* All SUBI conditions are 32-bit. */
1382 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1383 return nullify_end(ctx);
1386 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1387 TCGv_reg in2, unsigned cf, bool d)
1392 dest = tcg_temp_new();
1393 tcg_gen_sub_reg(dest, in1, in2);
1395 /* Compute signed overflow if required. */
1397 if (cond_need_sv(cf >> 1)) {
1398 sv = do_sub_sv(ctx, dest, in1, in2);
1401 /* Form the condition for the compare. */
1402 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1405 tcg_gen_movi_reg(dest, 0);
1406 save_gpr(ctx, rt, dest);
1408 /* Install the new nullification. */
1409 cond_free(&ctx->null_cond);
1410 ctx->null_cond = cond;
1413 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1414 TCGv_reg in2, unsigned cf, bool d,
1415 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1417 TCGv_reg dest = dest_gpr(ctx, rt);
1419 /* Perform the operation, and writeback. */
1421 save_gpr(ctx, rt, dest);
1423 /* Install the new nullification. */
1424 cond_free(&ctx->null_cond);
1426 ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1430 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1431 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1433 TCGv_reg tcg_r1, tcg_r2;
1438 tcg_r1 = load_gpr(ctx, a->r1);
1439 tcg_r2 = load_gpr(ctx, a->r2);
1440 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1441 return nullify_end(ctx);
1444 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1445 TCGv_reg in2, unsigned cf, bool d, bool is_tc,
1446 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1452 dest = dest_gpr(ctx, rt);
1454 save_gpr(ctx, rt, dest);
1455 cond_free(&ctx->null_cond);
1457 dest = tcg_temp_new();
1460 cond = do_unit_cond(cf, d, dest, in1, in2);
1463 TCGv_reg tmp = tcg_temp_new();
1464 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1465 gen_helper_tcond(tcg_env, tmp);
1467 save_gpr(ctx, rt, dest);
1469 cond_free(&ctx->null_cond);
1470 ctx->null_cond = cond;
1474 #ifndef CONFIG_USER_ONLY
1475 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1476 from the top 2 bits of the base register. There are a few system
1477 instructions that have a 3-bit space specifier, for which SR0 is
1478 not special. To handle this, pass ~SP. */
1479 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1489 spc = tcg_temp_new_tl();
1490 load_spr(ctx, spc, sp);
1493 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1497 ptr = tcg_temp_new_ptr();
1498 tmp = tcg_temp_new();
1499 spc = tcg_temp_new_tl();
1501 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1502 tcg_gen_shri_reg(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1503 tcg_gen_andi_reg(tmp, tmp, 030);
1504 tcg_gen_trunc_reg_ptr(ptr, tmp);
1506 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1507 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1513 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1514 unsigned rb, unsigned rx, int scale, target_sreg disp,
1515 unsigned sp, int modify, bool is_phys)
1517 TCGv_reg base = load_gpr(ctx, rb);
1521 /* Note that RX is mutually exclusive with DISP. */
1523 ofs = tcg_temp_new();
1524 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1525 tcg_gen_add_reg(ofs, ofs, base);
1526 } else if (disp || modify) {
1527 ofs = tcg_temp_new();
1528 tcg_gen_addi_reg(ofs, base, disp);
1534 *pgva = addr = tcg_temp_new_tl();
1535 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1536 tcg_gen_andi_tl(addr, addr, gva_offset_mask(ctx));
1537 #ifndef CONFIG_USER_ONLY
1539 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1544 /* Emit a memory load. The modify parameter should be
1545 * < 0 for pre-modify,
1546 * > 0 for post-modify,
1547 * = 0 for no base register update.
1549 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1550 unsigned rx, int scale, target_sreg disp,
1551 unsigned sp, int modify, MemOp mop)
1556 /* Caller uses nullify_over/nullify_end. */
1557 assert(ctx->null_cond.c == TCG_COND_NEVER);
1559 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1560 ctx->mmu_idx == MMU_PHYS_IDX);
1561 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1563 save_gpr(ctx, rb, ofs);
1567 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1568 unsigned rx, int scale, target_sreg disp,
1569 unsigned sp, int modify, MemOp mop)
1574 /* Caller uses nullify_over/nullify_end. */
1575 assert(ctx->null_cond.c == TCG_COND_NEVER);
1577 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1578 ctx->mmu_idx == MMU_PHYS_IDX);
1579 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1581 save_gpr(ctx, rb, ofs);
1585 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1586 unsigned rx, int scale, target_sreg disp,
1587 unsigned sp, int modify, MemOp mop)
1592 /* Caller uses nullify_over/nullify_end. */
1593 assert(ctx->null_cond.c == TCG_COND_NEVER);
1595 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1596 ctx->mmu_idx == MMU_PHYS_IDX);
1597 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1599 save_gpr(ctx, rb, ofs);
1603 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1604 unsigned rx, int scale, target_sreg disp,
1605 unsigned sp, int modify, MemOp mop)
1610 /* Caller uses nullify_over/nullify_end. */
1611 assert(ctx->null_cond.c == TCG_COND_NEVER);
1613 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1614 ctx->mmu_idx == MMU_PHYS_IDX);
1615 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1617 save_gpr(ctx, rb, ofs);
1621 #if TARGET_REGISTER_BITS == 64
1622 #define do_load_reg do_load_64
1623 #define do_store_reg do_store_64
1625 #define do_load_reg do_load_32
1626 #define do_store_reg do_store_32
1629 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1630 unsigned rx, int scale, target_sreg disp,
1631 unsigned sp, int modify, MemOp mop)
1638 /* No base register update. */
1639 dest = dest_gpr(ctx, rt);
1641 /* Make sure if RT == RB, we see the result of the load. */
1642 dest = tcg_temp_new();
1644 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1645 save_gpr(ctx, rt, dest);
1647 return nullify_end(ctx);
1650 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1651 unsigned rx, int scale, target_sreg disp,
1652 unsigned sp, int modify)
1658 tmp = tcg_temp_new_i32();
1659 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1660 save_frw_i32(rt, tmp);
1663 gen_helper_loaded_fr0(tcg_env);
1666 return nullify_end(ctx);
1669 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1671 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1672 a->disp, a->sp, a->m);
1675 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1676 unsigned rx, int scale, target_sreg disp,
1677 unsigned sp, int modify)
1683 tmp = tcg_temp_new_i64();
1684 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1688 gen_helper_loaded_fr0(tcg_env);
1691 return nullify_end(ctx);
1694 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1696 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1697 a->disp, a->sp, a->m);
1700 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1701 target_sreg disp, unsigned sp,
1702 int modify, MemOp mop)
1705 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1706 return nullify_end(ctx);
1709 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1710 unsigned rx, int scale, target_sreg disp,
1711 unsigned sp, int modify)
1717 tmp = load_frw_i32(rt);
1718 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1720 return nullify_end(ctx);
1723 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1725 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1726 a->disp, a->sp, a->m);
1729 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1730 unsigned rx, int scale, target_sreg disp,
1731 unsigned sp, int modify)
1738 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1740 return nullify_end(ctx);
1743 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1745 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1746 a->disp, a->sp, a->m);
1749 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1750 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1755 tmp = load_frw0_i32(ra);
1757 func(tmp, tcg_env, tmp);
1759 save_frw_i32(rt, tmp);
1760 return nullify_end(ctx);
1763 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1764 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1771 dst = tcg_temp_new_i32();
1773 func(dst, tcg_env, src);
1775 save_frw_i32(rt, dst);
1776 return nullify_end(ctx);
1779 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1780 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1785 tmp = load_frd0(ra);
1787 func(tmp, tcg_env, tmp);
1790 return nullify_end(ctx);
1793 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1794 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1800 src = load_frw0_i32(ra);
1801 dst = tcg_temp_new_i64();
1803 func(dst, tcg_env, src);
1806 return nullify_end(ctx);
1809 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1810 unsigned ra, unsigned rb,
1811 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1816 a = load_frw0_i32(ra);
1817 b = load_frw0_i32(rb);
1819 func(a, tcg_env, a, b);
1821 save_frw_i32(rt, a);
1822 return nullify_end(ctx);
1825 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1826 unsigned ra, unsigned rb,
1827 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1835 func(a, tcg_env, a, b);
1838 return nullify_end(ctx);
1841 /* Emit an unconditional branch to a direct target, which may or may not
1842 have already had nullification handled. */
1843 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1844 unsigned link, bool is_n)
1846 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1848 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1852 ctx->null_cond.c = TCG_COND_ALWAYS;
1858 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1861 if (is_n && use_nullify_skip(ctx)) {
1862 nullify_set(ctx, 0);
1863 gen_goto_tb(ctx, 0, dest, dest + 4);
1865 nullify_set(ctx, is_n);
1866 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1871 nullify_set(ctx, 0);
1872 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1873 ctx->base.is_jmp = DISAS_NORETURN;
1878 /* Emit a conditional branch to a direct target. If the branch itself
1879 is nullified, we should have already used nullify_over. */
1880 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1883 target_ureg dest = iaoq_dest(ctx, disp);
1884 TCGLabel *taken = NULL;
1885 TCGCond c = cond->c;
1888 assert(ctx->null_cond.c == TCG_COND_NEVER);
1890 /* Handle TRUE and NEVER as direct branches. */
1891 if (c == TCG_COND_ALWAYS) {
1892 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1894 if (c == TCG_COND_NEVER) {
1895 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1898 taken = gen_new_label();
1899 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1902 /* Not taken: Condition not satisfied; nullify on backward branches. */
1903 n = is_n && disp < 0;
1904 if (n && use_nullify_skip(ctx)) {
1905 nullify_set(ctx, 0);
1906 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1908 if (!n && ctx->null_lab) {
1909 gen_set_label(ctx->null_lab);
1910 ctx->null_lab = NULL;
1912 nullify_set(ctx, n);
1913 if (ctx->iaoq_n == -1) {
1914 /* The temporary iaoq_n_var died at the branch above.
1915 Regenerate it here instead of saving it. */
1916 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1918 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1921 gen_set_label(taken);
1923 /* Taken: Condition satisfied; nullify on forward branches. */
1924 n = is_n && disp >= 0;
1925 if (n && use_nullify_skip(ctx)) {
1926 nullify_set(ctx, 0);
1927 gen_goto_tb(ctx, 1, dest, dest + 4);
1929 nullify_set(ctx, n);
1930 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1933 /* Not taken: the branch itself was nullified. */
1934 if (ctx->null_lab) {
1935 gen_set_label(ctx->null_lab);
1936 ctx->null_lab = NULL;
1937 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1939 ctx->base.is_jmp = DISAS_NORETURN;
1944 /* Emit an unconditional branch to an indirect target. This handles
1945 nullification of the branch itself. */
1946 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1947 unsigned link, bool is_n)
1949 TCGv_reg a0, a1, next, tmp;
1952 assert(ctx->null_lab == NULL);
1954 if (ctx->null_cond.c == TCG_COND_NEVER) {
1956 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1958 next = tcg_temp_new();
1959 tcg_gen_mov_reg(next, dest);
1961 if (use_nullify_skip(ctx)) {
1962 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1963 tcg_gen_addi_reg(next, next, 4);
1964 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1965 nullify_set(ctx, 0);
1966 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1969 ctx->null_cond.c = TCG_COND_ALWAYS;
1972 ctx->iaoq_n_var = next;
1973 } else if (is_n && use_nullify_skip(ctx)) {
1974 /* The (conditional) branch, B, nullifies the next insn, N,
1975 and we're allowed to skip execution N (no single-step or
1976 tracepoint in effect). Since the goto_ptr that we must use
1977 for the indirect branch consumes no special resources, we
1978 can (conditionally) skip B and continue execution. */
1979 /* The use_nullify_skip test implies we have a known control path. */
1980 tcg_debug_assert(ctx->iaoq_b != -1);
1981 tcg_debug_assert(ctx->iaoq_n != -1);
1983 /* We do have to handle the non-local temporary, DEST, before
1984 branching. Since IOAQ_F is not really live at this point, we
1985 can simply store DEST optimistically. Similarly with IAOQ_B. */
1986 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1987 next = tcg_temp_new();
1988 tcg_gen_addi_reg(next, dest, 4);
1989 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1993 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1995 tcg_gen_lookup_and_goto_ptr();
1996 return nullify_end(ctx);
1998 c = ctx->null_cond.c;
1999 a0 = ctx->null_cond.a0;
2000 a1 = ctx->null_cond.a1;
2002 tmp = tcg_temp_new();
2003 next = tcg_temp_new();
2005 copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
2006 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
2008 ctx->iaoq_n_var = next;
2011 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
2015 /* The branch nullifies the next insn, which means the state of N
2016 after the branch is the inverse of the state of N that applied
2018 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
2019 cond_free(&ctx->null_cond);
2020 ctx->null_cond = cond_make_n();
2021 ctx->psw_n_nonzero = true;
2023 cond_free(&ctx->null_cond);
2030 * if (IAOQ_Front{30..31} < GR[b]{30..31})
2031 * IAOQ_Next{30..31} ← GR[b]{30..31};
2033 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
2034 * which keeps the privilege level from being increased.
2036 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
2039 switch (ctx->privilege) {
2041 /* Privilege 0 is maximum and is allowed to decrease. */
2044 /* Privilege 3 is minimum and is never allowed to increase. */
2045 dest = tcg_temp_new();
2046 tcg_gen_ori_reg(dest, offset, 3);
2049 dest = tcg_temp_new();
2050 tcg_gen_andi_reg(dest, offset, -4);
2051 tcg_gen_ori_reg(dest, dest, ctx->privilege);
2052 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
2058 #ifdef CONFIG_USER_ONLY
2059 /* On Linux, page zero is normally marked execute only + gateway.
2060 Therefore normal read or write is supposed to fail, but specific
2061 offsets have kernel code mapped to raise permissions to implement
2062 system calls. Handling this via an explicit check here, rather
2063 in than the "be disp(sr2,r0)" instruction that probably sent us
2064 here, is the easiest way to handle the branch delay slot on the
2065 aforementioned BE. */
2066 static void do_page_zero(DisasContext *ctx)
2070 /* If by some means we get here with PSW[N]=1, that implies that
2071 the B,GATE instruction would be skipped, and we'd fault on the
2072 next insn within the privileged page. */
2073 switch (ctx->null_cond.c) {
2074 case TCG_COND_NEVER:
2076 case TCG_COND_ALWAYS:
2077 tcg_gen_movi_reg(cpu_psw_n, 0);
2080 /* Since this is always the first (and only) insn within the
2081 TB, we should know the state of PSW[N] from TB->FLAGS. */
2082 g_assert_not_reached();
2085 /* Check that we didn't arrive here via some means that allowed
2086 non-sequential instruction execution. Normally the PSW[B] bit
2087 detects this by disallowing the B,GATE instruction to execute
2088 under such conditions. */
2089 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2093 switch (ctx->iaoq_f & -4) {
2094 case 0x00: /* Null pointer call */
2095 gen_excp_1(EXCP_IMP);
2096 ctx->base.is_jmp = DISAS_NORETURN;
2099 case 0xb0: /* LWS */
2100 gen_excp_1(EXCP_SYSCALL_LWS);
2101 ctx->base.is_jmp = DISAS_NORETURN;
2104 case 0xe0: /* SET_THREAD_POINTER */
2105 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
2106 tmp = tcg_temp_new();
2107 tcg_gen_ori_reg(tmp, cpu_gr[31], 3);
2108 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
2109 tcg_gen_addi_reg(tmp, tmp, 4);
2110 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
2111 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2114 case 0x100: /* SYSCALL */
2115 gen_excp_1(EXCP_SYSCALL);
2116 ctx->base.is_jmp = DISAS_NORETURN;
2121 gen_excp_1(EXCP_ILL);
2122 ctx->base.is_jmp = DISAS_NORETURN;
2128 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2130 cond_free(&ctx->null_cond);
2134 static bool trans_break(DisasContext *ctx, arg_break *a)
2136 return gen_excp_iir(ctx, EXCP_BREAK);
2139 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2141 /* No point in nullifying the memory barrier. */
2142 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2144 cond_free(&ctx->null_cond);
2148 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2151 TCGv_reg tmp = dest_gpr(ctx, rt);
2152 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2153 save_gpr(ctx, rt, tmp);
2155 cond_free(&ctx->null_cond);
2159 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2162 unsigned rs = a->sp;
2163 TCGv_i64 t0 = tcg_temp_new_i64();
2164 TCGv_reg t1 = tcg_temp_new();
2166 load_spr(ctx, t0, rs);
2167 tcg_gen_shri_i64(t0, t0, 32);
2168 tcg_gen_trunc_i64_reg(t1, t0);
2170 save_gpr(ctx, rt, t1);
2172 cond_free(&ctx->null_cond);
2176 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2179 unsigned ctl = a->r;
2185 /* MFSAR without ,W masks low 5 bits. */
2186 tmp = dest_gpr(ctx, rt);
2187 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2188 save_gpr(ctx, rt, tmp);
2191 save_gpr(ctx, rt, cpu_sar);
2193 case CR_IT: /* Interval Timer */
2194 /* FIXME: Respect PSW_S bit. */
2196 tmp = dest_gpr(ctx, rt);
2197 if (translator_io_start(&ctx->base)) {
2198 gen_helper_read_interval_timer(tmp);
2199 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2201 gen_helper_read_interval_timer(tmp);
2203 save_gpr(ctx, rt, tmp);
2204 return nullify_end(ctx);
2209 /* All other control registers are privileged. */
2210 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2214 tmp = tcg_temp_new();
2215 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2216 save_gpr(ctx, rt, tmp);
2219 cond_free(&ctx->null_cond);
2223 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2226 unsigned rs = a->sp;
2230 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2234 t64 = tcg_temp_new_i64();
2235 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2236 tcg_gen_shli_i64(t64, t64, 32);
2239 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2240 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2242 tcg_gen_mov_i64(cpu_sr[rs], t64);
2245 return nullify_end(ctx);
2248 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2250 unsigned ctl = a->t;
2254 if (ctl == CR_SAR) {
2255 reg = load_gpr(ctx, a->r);
2256 tmp = tcg_temp_new();
2257 tcg_gen_andi_reg(tmp, reg, ctx->is_pa20 ? 63 : 31);
2258 save_or_nullify(ctx, cpu_sar, tmp);
2260 cond_free(&ctx->null_cond);
2264 /* All other control registers are privileged or read-only. */
2265 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2267 #ifndef CONFIG_USER_ONLY
2269 reg = load_gpr(ctx, a->r);
2273 gen_helper_write_interval_timer(tcg_env, reg);
2276 gen_helper_write_eirr(tcg_env, reg);
2279 gen_helper_write_eiem(tcg_env, reg);
2280 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2285 /* FIXME: Respect PSW_Q bit */
2286 /* The write advances the queue and stores to the back element. */
2287 tmp = tcg_temp_new();
2288 tcg_gen_ld_reg(tmp, tcg_env,
2289 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2290 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2291 tcg_gen_st_reg(reg, tcg_env,
2292 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2299 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2300 #ifndef CONFIG_USER_ONLY
2301 gen_helper_change_prot_id(tcg_env);
2306 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2309 return nullify_end(ctx);
2313 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2315 TCGv_reg tmp = tcg_temp_new();
2317 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2318 tcg_gen_andi_reg(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2319 save_or_nullify(ctx, cpu_sar, tmp);
2321 cond_free(&ctx->null_cond);
2325 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2327 TCGv_reg dest = dest_gpr(ctx, a->t);
2329 #ifdef CONFIG_USER_ONLY
2330 /* We don't implement space registers in user mode. */
2331 tcg_gen_movi_reg(dest, 0);
2333 TCGv_i64 t0 = tcg_temp_new_i64();
2335 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2336 tcg_gen_shri_i64(t0, t0, 32);
2337 tcg_gen_trunc_i64_reg(dest, t0);
2339 save_gpr(ctx, a->t, dest);
2341 cond_free(&ctx->null_cond);
2345 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2347 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2348 #ifndef CONFIG_USER_ONLY
2353 tmp = tcg_temp_new();
2354 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2355 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2356 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2357 save_gpr(ctx, a->t, tmp);
2359 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2360 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2361 return nullify_end(ctx);
2365 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2367 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2368 #ifndef CONFIG_USER_ONLY
2373 tmp = tcg_temp_new();
2374 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2375 tcg_gen_ori_reg(tmp, tmp, a->i);
2376 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2377 save_gpr(ctx, a->t, tmp);
2379 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2380 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2381 return nullify_end(ctx);
2385 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2387 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2388 #ifndef CONFIG_USER_ONLY
2392 reg = load_gpr(ctx, a->r);
2393 tmp = tcg_temp_new();
2394 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2396 /* Exit the TB to recognize new interrupts. */
2397 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2398 return nullify_end(ctx);
2402 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2404 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2405 #ifndef CONFIG_USER_ONLY
2409 gen_helper_rfi_r(tcg_env);
2411 gen_helper_rfi(tcg_env);
2413 /* Exit the TB to recognize new interrupts. */
2414 tcg_gen_exit_tb(NULL, 0);
2415 ctx->base.is_jmp = DISAS_NORETURN;
2417 return nullify_end(ctx);
2421 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2423 return do_rfi(ctx, false);
2426 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2428 return do_rfi(ctx, true);
2431 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2433 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2434 #ifndef CONFIG_USER_ONLY
2436 gen_helper_halt(tcg_env);
2437 ctx->base.is_jmp = DISAS_NORETURN;
2438 return nullify_end(ctx);
2442 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2444 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2445 #ifndef CONFIG_USER_ONLY
2447 gen_helper_reset(tcg_env);
2448 ctx->base.is_jmp = DISAS_NORETURN;
2449 return nullify_end(ctx);
2453 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2455 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2456 #ifndef CONFIG_USER_ONLY
2458 gen_helper_getshadowregs(tcg_env);
2459 return nullify_end(ctx);
2463 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2466 TCGv_reg dest = dest_gpr(ctx, a->b);
2467 TCGv_reg src1 = load_gpr(ctx, a->b);
2468 TCGv_reg src2 = load_gpr(ctx, a->x);
2470 /* The only thing we need to do is the base register modification. */
2471 tcg_gen_add_reg(dest, src1, src2);
2472 save_gpr(ctx, a->b, dest);
2474 cond_free(&ctx->null_cond);
2478 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2481 TCGv_i32 level, want;
2486 dest = dest_gpr(ctx, a->t);
2487 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2490 level = tcg_constant_i32(a->ri);
2492 level = tcg_temp_new_i32();
2493 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2494 tcg_gen_andi_i32(level, level, 3);
2496 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2498 gen_helper_probe(dest, tcg_env, addr, level, want);
2500 save_gpr(ctx, a->t, dest);
2501 return nullify_end(ctx);
2504 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2506 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2507 #ifndef CONFIG_USER_ONLY
2513 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2514 reg = load_gpr(ctx, a->r);
2516 gen_helper_itlba(tcg_env, addr, reg);
2518 gen_helper_itlbp(tcg_env, addr, reg);
2521 /* Exit TB for TLB change if mmu is enabled. */
2522 if (ctx->tb_flags & PSW_C) {
2523 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2525 return nullify_end(ctx);
2529 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2531 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2532 #ifndef CONFIG_USER_ONLY
2538 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2540 save_gpr(ctx, a->b, ofs);
2543 gen_helper_ptlbe(tcg_env);
2545 gen_helper_ptlb(tcg_env, addr);
2548 /* Exit TB for TLB change if mmu is enabled. */
2549 if (ctx->tb_flags & PSW_C) {
2550 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2552 return nullify_end(ctx);
2557 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2559 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2560 * page 13-9 (195/206)
2562 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2564 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2565 #ifndef CONFIG_USER_ONLY
2566 TCGv_tl addr, atl, stl;
2573 * if (not (pcxl or pcxl2))
2574 * return gen_illegal(ctx);
2576 * Note for future: these are 32-bit systems; no hppa64.
2579 atl = tcg_temp_new_tl();
2580 stl = tcg_temp_new_tl();
2581 addr = tcg_temp_new_tl();
2583 tcg_gen_ld32u_i64(stl, tcg_env,
2584 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2585 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2586 tcg_gen_ld32u_i64(atl, tcg_env,
2587 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2588 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2589 tcg_gen_shli_i64(stl, stl, 32);
2590 tcg_gen_or_tl(addr, atl, stl);
2592 reg = load_gpr(ctx, a->r);
2594 gen_helper_itlba(tcg_env, addr, reg);
2596 gen_helper_itlbp(tcg_env, addr, reg);
2599 /* Exit TB for TLB change if mmu is enabled. */
2600 if (ctx->tb_flags & PSW_C) {
2601 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2603 return nullify_end(ctx);
2607 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2609 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2610 #ifndef CONFIG_USER_ONLY
2612 TCGv_reg ofs, paddr;
2616 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2618 paddr = tcg_temp_new();
2619 gen_helper_lpa(paddr, tcg_env, vaddr);
2621 /* Note that physical address result overrides base modification. */
2623 save_gpr(ctx, a->b, ofs);
2625 save_gpr(ctx, a->t, paddr);
2627 return nullify_end(ctx);
2631 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2633 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2635 /* The Coherence Index is an implementation-defined function of the
2636 physical address. Two addresses with the same CI have a coherent
2637 view of the cache. Our implementation is to return 0 for all,
2638 since the entire address space is coherent. */
2639 save_gpr(ctx, a->t, tcg_constant_reg(0));
2641 cond_free(&ctx->null_cond);
2645 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2647 return do_add_reg(ctx, a, false, false, false, false);
2650 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2652 return do_add_reg(ctx, a, true, false, false, false);
2655 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2657 return do_add_reg(ctx, a, false, true, false, false);
2660 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2662 return do_add_reg(ctx, a, false, false, false, true);
2665 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2667 return do_add_reg(ctx, a, false, true, false, true);
2670 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2672 return do_sub_reg(ctx, a, false, false, false);
2675 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2677 return do_sub_reg(ctx, a, true, false, false);
2680 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2682 return do_sub_reg(ctx, a, false, false, true);
2685 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2687 return do_sub_reg(ctx, a, true, false, true);
2690 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2692 return do_sub_reg(ctx, a, false, true, false);
2695 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2697 return do_sub_reg(ctx, a, true, true, false);
2700 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2702 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2705 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2707 return do_log_reg(ctx, a, tcg_gen_and_reg);
2710 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2713 unsigned r2 = a->r2;
2714 unsigned r1 = a->r1;
2717 if (rt == 0) { /* NOP */
2718 cond_free(&ctx->null_cond);
2721 if (r2 == 0) { /* COPY */
2723 TCGv_reg dest = dest_gpr(ctx, rt);
2724 tcg_gen_movi_reg(dest, 0);
2725 save_gpr(ctx, rt, dest);
2727 save_gpr(ctx, rt, cpu_gr[r1]);
2729 cond_free(&ctx->null_cond);
2732 #ifndef CONFIG_USER_ONLY
2733 /* These are QEMU extensions and are nops in the real architecture:
2735 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2736 * or %r31,%r31,%r31 -- death loop; offline cpu
2737 * currently implemented as idle.
2739 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2740 /* No need to check for supervisor, as userland can only pause
2741 until the next timer interrupt. */
2744 /* Advance the instruction queue. */
2745 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2746 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2747 nullify_set(ctx, 0);
2749 /* Tell the qemu main loop to halt until this cpu has work. */
2750 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2751 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2752 gen_excp_1(EXCP_HALTED);
2753 ctx->base.is_jmp = DISAS_NORETURN;
2755 return nullify_end(ctx);
2759 return do_log_reg(ctx, a, tcg_gen_or_reg);
2762 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2764 return do_log_reg(ctx, a, tcg_gen_xor_reg);
2767 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2769 TCGv_reg tcg_r1, tcg_r2;
2774 tcg_r1 = load_gpr(ctx, a->r1);
2775 tcg_r2 = load_gpr(ctx, a->r2);
2776 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2777 return nullify_end(ctx);
2780 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2782 TCGv_reg tcg_r1, tcg_r2;
2787 tcg_r1 = load_gpr(ctx, a->r1);
2788 tcg_r2 = load_gpr(ctx, a->r2);
2789 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_reg);
2790 return nullify_end(ctx);
2793 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2795 TCGv_reg tcg_r1, tcg_r2, tmp;
2800 tcg_r1 = load_gpr(ctx, a->r1);
2801 tcg_r2 = load_gpr(ctx, a->r2);
2802 tmp = tcg_temp_new();
2803 tcg_gen_not_reg(tmp, tcg_r2);
2804 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_reg);
2805 return nullify_end(ctx);
2808 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2810 return do_uaddcm(ctx, a, false);
2813 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2815 return do_uaddcm(ctx, a, true);
2818 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2824 tmp = tcg_temp_new();
2825 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2827 tcg_gen_not_reg(tmp, tmp);
2829 tcg_gen_andi_reg(tmp, tmp, (target_ureg)0x1111111111111111ull);
2830 tcg_gen_muli_reg(tmp, tmp, 6);
2831 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2832 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2833 return nullify_end(ctx);
2836 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2838 return do_dcor(ctx, a, false);
2841 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2843 return do_dcor(ctx, a, true);
2846 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2848 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2853 in1 = load_gpr(ctx, a->r1);
2854 in2 = load_gpr(ctx, a->r2);
2856 add1 = tcg_temp_new();
2857 add2 = tcg_temp_new();
2858 addc = tcg_temp_new();
2859 dest = tcg_temp_new();
2860 zero = tcg_constant_reg(0);
2862 /* Form R1 << 1 | PSW[CB]{8}. */
2863 tcg_gen_add_reg(add1, in1, in1);
2864 tcg_gen_add_reg(add1, add1, get_psw_carry(ctx, false));
2867 * Add or subtract R2, depending on PSW[V]. Proper computation of
2868 * carry requires that we subtract via + ~R2 + 1, as described in
2869 * the manual. By extracting and masking V, we can produce the
2870 * proper inputs to the addition without movcond.
2872 tcg_gen_sextract_reg(addc, cpu_psw_v, 31, 1);
2873 tcg_gen_xor_reg(add2, in2, addc);
2874 tcg_gen_andi_reg(addc, addc, 1);
2876 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2877 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2879 /* Write back the result register. */
2880 save_gpr(ctx, a->t, dest);
2882 /* Write back PSW[CB]. */
2883 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2884 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2886 /* Write back PSW[V] for the division step. */
2887 cout = get_psw_carry(ctx, false);
2888 tcg_gen_neg_reg(cpu_psw_v, cout);
2889 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2891 /* Install the new nullification. */
2894 if (cond_need_sv(a->cf >> 1)) {
2895 /* ??? The lshift is supposed to contribute to overflow. */
2896 sv = do_add_sv(ctx, dest, add1, add2);
2898 ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2901 return nullify_end(ctx);
2904 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2906 return do_add_imm(ctx, a, false, false);
2909 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2911 return do_add_imm(ctx, a, true, false);
2914 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2916 return do_add_imm(ctx, a, false, true);
2919 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2921 return do_add_imm(ctx, a, true, true);
2924 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2926 return do_sub_imm(ctx, a, false);
2929 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2931 return do_sub_imm(ctx, a, true);
2934 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2936 TCGv_reg tcg_im, tcg_r2;
2942 tcg_im = tcg_constant_reg(a->i);
2943 tcg_r2 = load_gpr(ctx, a->r);
2944 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2946 return nullify_end(ctx);
2949 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2951 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2952 return gen_illegal(ctx);
2954 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2955 a->disp, a->sp, a->m, a->size | MO_TE);
2959 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2961 assert(a->x == 0 && a->scale == 0);
2962 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2963 return gen_illegal(ctx);
2965 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2969 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2971 MemOp mop = MO_TE | MO_ALIGN | a->size;
2972 TCGv_reg zero, dest, ofs;
2975 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2976 return gen_illegal(ctx);
2982 /* Base register modification. Make sure if RT == RB,
2983 we see the result of the load. */
2984 dest = tcg_temp_new();
2986 dest = dest_gpr(ctx, a->t);
2989 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2990 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2993 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2994 * However actual hardware succeeds with aligned mod 4.
2995 * Detect this case and log a GUEST_ERROR.
2997 * TODO: HPPA64 relaxes the over-alignment requirement
2998 * with the ,co completer.
3000 gen_helper_ldc_check(addr);
3002 zero = tcg_constant_reg(0);
3003 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
3006 save_gpr(ctx, a->b, ofs);
3008 save_gpr(ctx, a->t, dest);
3010 return nullify_end(ctx);
3013 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3020 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3021 ctx->mmu_idx == MMU_PHYS_IDX);
3022 val = load_gpr(ctx, a->r);
3024 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3025 gen_helper_stby_e_parallel(tcg_env, addr, val);
3027 gen_helper_stby_e(tcg_env, addr, val);
3030 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3031 gen_helper_stby_b_parallel(tcg_env, addr, val);
3033 gen_helper_stby_b(tcg_env, addr, val);
3037 tcg_gen_andi_reg(ofs, ofs, ~3);
3038 save_gpr(ctx, a->b, ofs);
3041 return nullify_end(ctx);
3044 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3046 int hold_mmu_idx = ctx->mmu_idx;
3048 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3049 ctx->mmu_idx = MMU_PHYS_IDX;
3051 ctx->mmu_idx = hold_mmu_idx;
3055 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3057 int hold_mmu_idx = ctx->mmu_idx;
3059 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3060 ctx->mmu_idx = MMU_PHYS_IDX;
3062 ctx->mmu_idx = hold_mmu_idx;
3066 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3068 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3070 tcg_gen_movi_reg(tcg_rt, a->i);
3071 save_gpr(ctx, a->t, tcg_rt);
3072 cond_free(&ctx->null_cond);
3076 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3078 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
3079 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3081 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
3082 save_gpr(ctx, 1, tcg_r1);
3083 cond_free(&ctx->null_cond);
3087 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3089 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3091 /* Special case rb == 0, for the LDI pseudo-op.
3092 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
3094 tcg_gen_movi_reg(tcg_rt, a->i);
3096 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
3098 save_gpr(ctx, a->t, tcg_rt);
3099 cond_free(&ctx->null_cond);
3103 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3104 unsigned c, unsigned f, bool d, unsigned n, int disp)
3106 TCGv_reg dest, in2, sv;
3109 in2 = load_gpr(ctx, r);
3110 dest = tcg_temp_new();
3112 tcg_gen_sub_reg(dest, in1, in2);
3115 if (cond_need_sv(c)) {
3116 sv = do_sub_sv(ctx, dest, in1, in2);
3119 cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3120 return do_cbranch(ctx, disp, n, &cond);
3123 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3125 if (!ctx->is_pa20 && a->d) {
3129 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3130 a->c, a->f, a->d, a->n, a->disp);
3133 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3135 if (!ctx->is_pa20 && a->d) {
3139 return do_cmpb(ctx, a->r, tcg_constant_reg(a->i),
3140 a->c, a->f, a->d, a->n, a->disp);
3143 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3144 unsigned c, unsigned f, unsigned n, int disp)
3146 TCGv_reg dest, in2, sv, cb_cond;
3151 * For hppa64, the ADDB conditions change with PSW.W,
3152 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3154 if (ctx->tb_flags & PSW_W) {
3161 in2 = load_gpr(ctx, r);
3162 dest = tcg_temp_new();
3166 if (cond_need_cb(c)) {
3167 TCGv_reg cb = tcg_temp_new();
3168 TCGv_reg cb_msb = tcg_temp_new();
3170 tcg_gen_movi_reg(cb_msb, 0);
3171 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3172 tcg_gen_xor_reg(cb, in1, in2);
3173 tcg_gen_xor_reg(cb, cb, dest);
3174 cb_cond = get_carry(ctx, d, cb, cb_msb);
3176 tcg_gen_add_reg(dest, in1, in2);
3178 if (cond_need_sv(c)) {
3179 sv = do_add_sv(ctx, dest, in1, in2);
3182 cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3183 save_gpr(ctx, r, dest);
3184 return do_cbranch(ctx, disp, n, &cond);
3187 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3190 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3193 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3196 return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3199 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3201 TCGv_reg tmp, tcg_r;
3206 tmp = tcg_temp_new();
3207 tcg_r = load_gpr(ctx, a->r);
3208 if (cond_need_ext(ctx, a->d)) {
3209 /* Force shift into [32,63] */
3210 tcg_gen_ori_reg(tmp, cpu_sar, 32);
3211 tcg_gen_shl_reg(tmp, tcg_r, tmp);
3213 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3216 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3217 return do_cbranch(ctx, a->disp, a->n, &cond);
3220 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3222 TCGv_reg tmp, tcg_r;
3228 tmp = tcg_temp_new();
3229 tcg_r = load_gpr(ctx, a->r);
3230 p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3231 tcg_gen_shli_reg(tmp, tcg_r, p);
3233 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3234 return do_cbranch(ctx, a->disp, a->n, &cond);
3237 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3244 dest = dest_gpr(ctx, a->r2);
3246 tcg_gen_movi_reg(dest, 0);
3248 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3251 /* All MOVB conditions are 32-bit. */
3252 cond = do_sed_cond(ctx, a->c, false, dest);
3253 return do_cbranch(ctx, a->disp, a->n, &cond);
3256 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3263 dest = dest_gpr(ctx, a->r);
3264 tcg_gen_movi_reg(dest, a->i);
3266 /* All MOVBI conditions are 32-bit. */
3267 cond = do_sed_cond(ctx, a->c, false, dest);
3268 return do_cbranch(ctx, a->disp, a->n, &cond);
3271 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3279 dest = dest_gpr(ctx, a->t);
3281 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3282 tcg_gen_shr_reg(dest, dest, cpu_sar);
3283 } else if (a->r1 == a->r2) {
3284 TCGv_i32 t32 = tcg_temp_new_i32();
3285 TCGv_i32 s32 = tcg_temp_new_i32();
3287 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3288 tcg_gen_trunc_reg_i32(s32, cpu_sar);
3289 tcg_gen_rotr_i32(t32, t32, s32);
3290 tcg_gen_extu_i32_reg(dest, t32);
3292 TCGv_i64 t = tcg_temp_new_i64();
3293 TCGv_i64 s = tcg_temp_new_i64();
3295 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3296 tcg_gen_extu_reg_i64(s, cpu_sar);
3297 tcg_gen_shr_i64(t, t, s);
3298 tcg_gen_trunc_i64_reg(dest, t);
3300 save_gpr(ctx, a->t, dest);
3302 /* Install the new nullification. */
3303 cond_free(&ctx->null_cond);
3305 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3307 return nullify_end(ctx);
3310 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3312 unsigned sa = 31 - a->cpos;
3319 dest = dest_gpr(ctx, a->t);
3320 t2 = load_gpr(ctx, a->r2);
3322 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3323 } else if (TARGET_REGISTER_BITS == 32) {
3324 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3325 } else if (a->r1 == a->r2) {
3326 TCGv_i32 t32 = tcg_temp_new_i32();
3327 tcg_gen_trunc_reg_i32(t32, t2);
3328 tcg_gen_rotri_i32(t32, t32, sa);
3329 tcg_gen_extu_i32_reg(dest, t32);
3331 TCGv_i64 t64 = tcg_temp_new_i64();
3332 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3333 tcg_gen_shri_i64(t64, t64, sa);
3334 tcg_gen_trunc_i64_reg(dest, t64);
3336 save_gpr(ctx, a->t, dest);
3338 /* Install the new nullification. */
3339 cond_free(&ctx->null_cond);
3341 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3343 return nullify_end(ctx);
3346 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3348 unsigned len = 32 - a->clen;
3349 TCGv_reg dest, src, tmp;
3355 dest = dest_gpr(ctx, a->t);
3356 src = load_gpr(ctx, a->r);
3357 tmp = tcg_temp_new();
3359 /* Recall that SAR is using big-endian bit numbering. */
3360 tcg_gen_andi_reg(tmp, cpu_sar, 31);
3361 tcg_gen_xori_reg(tmp, tmp, 31);
3364 tcg_gen_sar_reg(dest, src, tmp);
3365 tcg_gen_sextract_reg(dest, dest, 0, len);
3367 tcg_gen_shr_reg(dest, src, tmp);
3368 tcg_gen_extract_reg(dest, dest, 0, len);
3370 save_gpr(ctx, a->t, dest);
3372 /* Install the new nullification. */
3373 cond_free(&ctx->null_cond);
3375 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3377 return nullify_end(ctx);
3380 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3382 unsigned len = 32 - a->clen;
3383 unsigned cpos = 31 - a->pos;
3390 dest = dest_gpr(ctx, a->t);
3391 src = load_gpr(ctx, a->r);
3393 tcg_gen_sextract_reg(dest, src, cpos, len);
3395 tcg_gen_extract_reg(dest, src, cpos, len);
3397 save_gpr(ctx, a->t, dest);
3399 /* Install the new nullification. */
3400 cond_free(&ctx->null_cond);
3402 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3404 return nullify_end(ctx);
3407 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3409 unsigned len = 32 - a->clen;
3410 target_sreg mask0, mask1;
3416 if (a->cpos + len > 32) {
3420 dest = dest_gpr(ctx, a->t);
3421 mask0 = deposit64(0, a->cpos, len, a->i);
3422 mask1 = deposit64(-1, a->cpos, len, a->i);
3425 TCGv_reg src = load_gpr(ctx, a->t);
3427 tcg_gen_andi_reg(dest, src, mask1);
3430 tcg_gen_ori_reg(dest, src, mask0);
3432 tcg_gen_movi_reg(dest, mask0);
3434 save_gpr(ctx, a->t, dest);
3436 /* Install the new nullification. */
3437 cond_free(&ctx->null_cond);
3439 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3441 return nullify_end(ctx);
3444 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3446 unsigned rs = a->nz ? a->t : 0;
3447 unsigned len = 32 - a->clen;
3453 if (a->cpos + len > 32) {
3457 dest = dest_gpr(ctx, a->t);
3458 val = load_gpr(ctx, a->r);
3460 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3462 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3464 save_gpr(ctx, a->t, dest);
3466 /* Install the new nullification. */
3467 cond_free(&ctx->null_cond);
3469 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3471 return nullify_end(ctx);
3474 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3475 unsigned nz, unsigned clen, TCGv_reg val)
3477 unsigned rs = nz ? rt : 0;
3478 unsigned len = 32 - clen;
3479 TCGv_reg mask, tmp, shift, dest;
3480 unsigned msb = 1U << (len - 1);
3482 dest = dest_gpr(ctx, rt);
3483 shift = tcg_temp_new();
3484 tmp = tcg_temp_new();
3486 /* Convert big-endian bit numbering in SAR to left-shift. */
3487 tcg_gen_andi_reg(shift, cpu_sar, 31);
3488 tcg_gen_xori_reg(shift, shift, 31);
3490 mask = tcg_temp_new();
3491 tcg_gen_movi_reg(mask, msb + (msb - 1));
3492 tcg_gen_and_reg(tmp, val, mask);
3494 tcg_gen_shl_reg(mask, mask, shift);
3495 tcg_gen_shl_reg(tmp, tmp, shift);
3496 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3497 tcg_gen_or_reg(dest, dest, tmp);
3499 tcg_gen_shl_reg(dest, tmp, shift);
3501 save_gpr(ctx, rt, dest);
3503 /* Install the new nullification. */
3504 cond_free(&ctx->null_cond);
3506 ctx->null_cond = do_sed_cond(ctx, c, false, dest);
3508 return nullify_end(ctx);
3511 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3516 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3519 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3524 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, tcg_constant_reg(a->i));
3527 static bool trans_be(DisasContext *ctx, arg_be *a)
3531 #ifdef CONFIG_USER_ONLY
3532 /* ??? It seems like there should be a good way of using
3533 "be disp(sr2, r0)", the canonical gateway entry mechanism
3534 to our advantage. But that appears to be inconvenient to
3535 manage along side branch delay slots. Therefore we handle
3536 entry into the gateway page via absolute address. */
3537 /* Since we don't implement spaces, just branch. Do notice the special
3538 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3539 goto_tb to the TB containing the syscall. */
3541 return do_dbranch(ctx, a->disp, a->l, a->n);
3547 tmp = tcg_temp_new();
3548 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3549 tmp = do_ibranch_priv(ctx, tmp);
3551 #ifdef CONFIG_USER_ONLY
3552 return do_ibranch(ctx, tmp, a->l, a->n);
3554 TCGv_i64 new_spc = tcg_temp_new_i64();
3556 load_spr(ctx, new_spc, a->sp);
3558 copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3559 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3561 if (a->n && use_nullify_skip(ctx)) {
3562 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3563 tcg_gen_addi_reg(tmp, tmp, 4);
3564 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3565 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3566 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3568 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3569 if (ctx->iaoq_b == -1) {
3570 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3572 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3573 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3574 nullify_set(ctx, a->n);
3576 tcg_gen_lookup_and_goto_ptr();
3577 ctx->base.is_jmp = DISAS_NORETURN;
3578 return nullify_end(ctx);
3582 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3584 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3587 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3589 target_ureg dest = iaoq_dest(ctx, a->disp);
3593 /* Make sure the caller hasn't done something weird with the queue.
3594 * ??? This is not quite the same as the PSW[B] bit, which would be
3595 * expensive to track. Real hardware will trap for
3597 * b gateway+4 (in delay slot of first branch)
3598 * However, checking for a non-sequential instruction queue *will*
3599 * diagnose the security hole
3602 * in which instructions at evil would run with increased privs.
3604 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3605 return gen_illegal(ctx);
3608 #ifndef CONFIG_USER_ONLY
3609 if (ctx->tb_flags & PSW_C) {
3610 CPUHPPAState *env = cpu_env(ctx->cs);
3611 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3612 /* If we could not find a TLB entry, then we need to generate an
3613 ITLB miss exception so the kernel will provide it.
3614 The resulting TLB fill operation will invalidate this TB and
3615 we will re-translate, at which point we *will* be able to find
3616 the TLB entry and determine if this is in fact a gateway page. */
3618 gen_excp(ctx, EXCP_ITLB_MISS);
3621 /* No change for non-gateway pages or for priv decrease. */
3622 if (type >= 4 && type - 4 < ctx->privilege) {
3623 dest = deposit32(dest, 0, 2, type - 4);
3626 dest &= -4; /* priv = 0 */
3631 TCGv_reg tmp = dest_gpr(ctx, a->l);
3632 if (ctx->privilege < 3) {
3633 tcg_gen_andi_reg(tmp, tmp, -4);
3635 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3636 save_gpr(ctx, a->l, tmp);
3639 return do_dbranch(ctx, dest, 0, a->n);
3642 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3645 TCGv_reg tmp = tcg_temp_new();
3646 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3647 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3648 /* The computation here never changes privilege level. */
3649 return do_ibranch(ctx, tmp, a->l, a->n);
3651 /* BLR R0,RX is a good way to load PC+8 into RX. */
3652 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3656 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3661 dest = load_gpr(ctx, a->b);
3663 dest = tcg_temp_new();
3664 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3665 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3667 dest = do_ibranch_priv(ctx, dest);
3668 return do_ibranch(ctx, dest, 0, a->n);
3671 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3675 #ifdef CONFIG_USER_ONLY
3676 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3677 return do_ibranch(ctx, dest, a->l, a->n);
3680 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3682 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3683 if (ctx->iaoq_b == -1) {
3684 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3686 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3687 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3689 copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3691 nullify_set(ctx, a->n);
3692 tcg_gen_lookup_and_goto_ptr();
3693 ctx->base.is_jmp = DISAS_NORETURN;
3694 return nullify_end(ctx);
3702 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3704 tcg_gen_mov_i32(dst, src);
3707 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3711 if (TARGET_REGISTER_BITS == 64) {
3712 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3714 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3718 save_frd(0, tcg_constant_i64(ret));
3719 return nullify_end(ctx);
3722 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3724 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3727 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3729 tcg_gen_mov_i64(dst, src);
3732 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3734 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3737 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3739 tcg_gen_andi_i32(dst, src, INT32_MAX);
3742 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3744 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3747 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3749 tcg_gen_andi_i64(dst, src, INT64_MAX);
3752 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3754 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3757 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3759 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3762 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3764 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3767 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3769 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3772 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3774 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3777 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3779 tcg_gen_xori_i32(dst, src, INT32_MIN);
3782 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3784 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3787 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3789 tcg_gen_xori_i64(dst, src, INT64_MIN);
3792 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3794 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3797 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3799 tcg_gen_ori_i32(dst, src, INT32_MIN);
3802 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3804 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3807 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3809 tcg_gen_ori_i64(dst, src, INT64_MIN);
3812 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3814 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3821 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3823 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3826 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3828 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3831 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3833 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3836 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3838 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3841 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3843 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3846 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3848 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3851 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3853 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3856 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3858 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3861 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3863 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3866 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3868 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3871 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3873 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3876 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3878 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3881 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3883 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3886 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3888 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3891 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3893 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3896 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3898 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3901 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3903 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3906 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3908 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3911 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3913 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3916 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3918 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3921 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3923 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3926 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3928 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3931 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3933 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3936 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3938 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3941 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3943 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3946 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3948 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3955 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3957 TCGv_i32 ta, tb, tc, ty;
3961 ta = load_frw0_i32(a->r1);
3962 tb = load_frw0_i32(a->r2);
3963 ty = tcg_constant_i32(a->y);
3964 tc = tcg_constant_i32(a->c);
3966 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
3968 return nullify_end(ctx);
3971 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3978 ta = load_frd0(a->r1);
3979 tb = load_frd0(a->r2);
3980 ty = tcg_constant_i32(a->y);
3981 tc = tcg_constant_i32(a->c);
3983 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
3985 return nullify_end(ctx);
3988 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3995 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4002 case 0: /* simple */
4003 tcg_gen_andi_reg(t, t, 0x4000000);
4004 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4032 TCGv_reg c = tcg_constant_reg(mask);
4033 tcg_gen_or_reg(t, t, c);
4034 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4036 tcg_gen_andi_reg(t, t, mask);
4037 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4040 unsigned cbit = (a->y ^ 1) - 1;
4042 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
4043 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4047 return nullify_end(ctx);
4054 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4056 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4059 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4061 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4064 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4066 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4069 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4071 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4074 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4076 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4079 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4081 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4084 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4086 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4089 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4091 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4094 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4100 x = load_frw0_i64(a->r1);
4101 y = load_frw0_i64(a->r2);
4102 tcg_gen_mul_i64(x, x, y);
4105 return nullify_end(ctx);
4108 /* Convert the fmpyadd single-precision register encodings to standard. */
4109 static inline int fmpyadd_s_reg(unsigned r)
4111 return (r & 16) * 2 + 16 + (r & 15);
4114 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4116 int tm = fmpyadd_s_reg(a->tm);
4117 int ra = fmpyadd_s_reg(a->ra);
4118 int ta = fmpyadd_s_reg(a->ta);
4119 int rm2 = fmpyadd_s_reg(a->rm2);
4120 int rm1 = fmpyadd_s_reg(a->rm1);
4124 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4125 do_fop_weww(ctx, ta, ta, ra,
4126 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4128 return nullify_end(ctx);
4131 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4133 return do_fmpyadd_s(ctx, a, false);
4136 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4138 return do_fmpyadd_s(ctx, a, true);
4141 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4145 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4146 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4147 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4149 return nullify_end(ctx);
4152 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4154 return do_fmpyadd_d(ctx, a, false);
4157 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4159 return do_fmpyadd_d(ctx, a, true);
4162 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4167 x = load_frw0_i32(a->rm1);
4168 y = load_frw0_i32(a->rm2);
4169 z = load_frw0_i32(a->ra3);
4172 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4174 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4177 save_frw_i32(a->t, x);
4178 return nullify_end(ctx);
4181 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4186 x = load_frd0(a->rm1);
4187 y = load_frd0(a->rm2);
4188 z = load_frd0(a->ra3);
4191 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4193 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4197 return nullify_end(ctx);
4200 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4202 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4203 #ifndef CONFIG_USER_ONLY
4204 if (a->i == 0x100) {
4205 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4207 gen_helper_diag_btlb(tcg_env);
4208 return nullify_end(ctx);
4211 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4215 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4217 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4221 ctx->tb_flags = ctx->base.tb->flags;
4222 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4224 #ifdef CONFIG_USER_ONLY
4225 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4226 ctx->mmu_idx = MMU_USER_IDX;
4227 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4228 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4229 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4231 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4232 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4233 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4236 /* Recover the IAOQ values from the GVA + PRIV. */
4237 uint64_t cs_base = ctx->base.tb->cs_base;
4238 uint64_t iasq_f = cs_base & ~0xffffffffull;
4239 int32_t diff = cs_base;
4241 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4242 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4245 ctx->iaoq_n_var = NULL;
4247 /* Bound the number of instructions by those left on the page. */
4248 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4249 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4252 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4254 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4256 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4257 ctx->null_cond = cond_make_f();
4258 ctx->psw_n_nonzero = false;
4259 if (ctx->tb_flags & PSW_N) {
4260 ctx->null_cond.c = TCG_COND_ALWAYS;
4261 ctx->psw_n_nonzero = true;
4263 ctx->null_lab = NULL;
4266 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4268 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4270 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4273 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4275 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4276 CPUHPPAState *env = cpu_env(cs);
4279 /* Execute one insn. */
4280 #ifdef CONFIG_USER_ONLY
4281 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4283 ret = ctx->base.is_jmp;
4284 assert(ret != DISAS_NEXT);
4288 /* Always fetch the insn, even if nullified, so that we check
4289 the page permissions for execute. */
4290 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4292 /* Set up the IA queue for the next insn.
4293 This will be overwritten by a branch. */
4294 if (ctx->iaoq_b == -1) {
4296 ctx->iaoq_n_var = tcg_temp_new();
4297 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4299 ctx->iaoq_n = ctx->iaoq_b + 4;
4300 ctx->iaoq_n_var = NULL;
4303 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4304 ctx->null_cond.c = TCG_COND_NEVER;
4308 if (!decode(ctx, insn)) {
4311 ret = ctx->base.is_jmp;
4312 assert(ctx->null_lab == NULL);
4316 /* Advance the insn queue. Note that this check also detects
4317 a priority change within the instruction queue. */
4318 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4319 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4320 && use_goto_tb(ctx, ctx->iaoq_b)
4321 && (ctx->null_cond.c == TCG_COND_NEVER
4322 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4323 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4324 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4325 ctx->base.is_jmp = ret = DISAS_NORETURN;
4327 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4330 ctx->iaoq_f = ctx->iaoq_b;
4331 ctx->iaoq_b = ctx->iaoq_n;
4332 ctx->base.pc_next += 4;
4335 case DISAS_NORETURN:
4336 case DISAS_IAQ_N_UPDATED:
4340 case DISAS_IAQ_N_STALE:
4341 case DISAS_IAQ_N_STALE_EXIT:
4342 if (ctx->iaoq_f == -1) {
4343 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4344 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4345 #ifndef CONFIG_USER_ONLY
4346 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4349 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4351 : DISAS_IAQ_N_UPDATED);
4352 } else if (ctx->iaoq_b == -1) {
4353 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4358 g_assert_not_reached();
4362 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4364 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4365 DisasJumpType is_jmp = ctx->base.is_jmp;
4368 case DISAS_NORETURN:
4370 case DISAS_TOO_MANY:
4371 case DISAS_IAQ_N_STALE:
4372 case DISAS_IAQ_N_STALE_EXIT:
4373 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4374 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4377 case DISAS_IAQ_N_UPDATED:
4378 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4379 tcg_gen_lookup_and_goto_ptr();
4384 tcg_gen_exit_tb(NULL, 0);
4387 g_assert_not_reached();
4391 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4392 CPUState *cs, FILE *logfile)
4394 target_ulong pc = dcbase->pc_first;
4396 #ifdef CONFIG_USER_ONLY
4399 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4402 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4405 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4408 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4413 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4414 target_disas(logfile, cs, pc, dcbase->tb->size);
4417 static const TranslatorOps hppa_tr_ops = {
4418 .init_disas_context = hppa_tr_init_disas_context,
4419 .tb_start = hppa_tr_tb_start,
4420 .insn_start = hppa_tr_insn_start,
4421 .translate_insn = hppa_tr_translate_insn,
4422 .tb_stop = hppa_tr_tb_stop,
4423 .disas_log = hppa_tr_disas_log,
4426 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4427 target_ulong pc, void *host_pc)
4430 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);