2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
36 /* Choose to use explicit sizes within this file. */
39 typedef struct DisasCond {
44 typedef struct DisasContext {
45 DisasContextBase base;
65 #ifdef CONFIG_USER_ONLY
70 #ifdef CONFIG_USER_ONLY
71 #define UNALIGN(C) (C)->unalign
73 #define UNALIGN(C) MO_ALIGN
76 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
77 static int expand_sm_imm(DisasContext *ctx, int val)
80 val = (val & ~PSW_SM_E) | PSW_E;
83 val = (val & ~PSW_SM_W) | PSW_W;
88 /* Inverted space register indicates 0 means sr0 not inferred from base. */
89 static int expand_sr3x(DisasContext *ctx, int val)
94 /* Convert the M:A bits within a memory insn to the tri-state value
95 we use for the final M. */
96 static int ma_to_m(DisasContext *ctx, int val)
98 return val & 2 ? (val & 1 ? -1 : 1) : 0;
101 /* Convert the sign of the displacement to a pre or post-modify. */
102 static int pos_to_m(DisasContext *ctx, int val)
107 static int neg_to_m(DisasContext *ctx, int val)
112 /* Used for branch targets and fp memory ops. */
113 static int expand_shl2(DisasContext *ctx, int val)
118 /* Used for fp memory ops. */
119 static int expand_shl3(DisasContext *ctx, int val)
124 /* Used for assemble_21. */
125 static int expand_shl11(DisasContext *ctx, int val)
130 static int assemble_6(DisasContext *ctx, int val)
133 * Officially, 32 * x + 32 - y.
134 * Here, x is already in bit 5, and y is [4:0].
135 * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
136 * with the overflow from bit 4 summing with x.
138 return (val ^ 31) + 1;
141 /* Translate CMPI doubleword conditions to standard. */
142 static int cmpbid_c(DisasContext *ctx, int val)
144 return val ? val : 4; /* 0 == "*<<" */
148 /* Include the auto-generated decoder. */
149 #include "decode-insns.c.inc"
151 /* We are not using a goto_tb (for whatever reason), but have updated
152 the iaq (for whatever reason), so don't do it again on exit. */
153 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
155 /* We are exiting the TB, but have neither emitted a goto_tb, nor
156 updated the iaq for the next instruction to be executed. */
157 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
159 /* Similarly, but we want to return to the main loop immediately
160 to recognize unmasked interrupts. */
161 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
162 #define DISAS_EXIT DISAS_TARGET_3
164 /* global register indexes */
165 static TCGv_i64 cpu_gr[32];
166 static TCGv_i64 cpu_sr[4];
167 static TCGv_i64 cpu_srH;
168 static TCGv_i64 cpu_iaoq_f;
169 static TCGv_i64 cpu_iaoq_b;
170 static TCGv_i64 cpu_iasq_f;
171 static TCGv_i64 cpu_iasq_b;
172 static TCGv_i64 cpu_sar;
173 static TCGv_i64 cpu_psw_n;
174 static TCGv_i64 cpu_psw_v;
175 static TCGv_i64 cpu_psw_cb;
176 static TCGv_i64 cpu_psw_cb_msb;
178 void hppa_translate_init(void)
180 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
182 typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
183 static const GlobalVar vars[] = {
184 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
195 /* Use the symbolic register names that match the disassembler. */
196 static const char gr_names[32][4] = {
197 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
198 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
199 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
200 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
202 /* SR[4-7] are not global registers so that we can index them. */
203 static const char sr_names[5][4] = {
204 "sr0", "sr1", "sr2", "sr3", "srH"
210 for (i = 1; i < 32; i++) {
211 cpu_gr[i] = tcg_global_mem_new(tcg_env,
212 offsetof(CPUHPPAState, gr[i]),
215 for (i = 0; i < 4; i++) {
216 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
217 offsetof(CPUHPPAState, sr[i]),
220 cpu_srH = tcg_global_mem_new_i64(tcg_env,
221 offsetof(CPUHPPAState, sr[4]),
224 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
225 const GlobalVar *v = &vars[i];
226 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
229 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
230 offsetof(CPUHPPAState, iasq_f),
232 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
233 offsetof(CPUHPPAState, iasq_b),
237 static DisasCond cond_make_f(void)
246 static DisasCond cond_make_t(void)
249 .c = TCG_COND_ALWAYS,
255 static DisasCond cond_make_n(void)
260 .a1 = tcg_constant_i64(0)
264 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
266 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
267 return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
270 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
272 return cond_make_tmp(c, a0, tcg_constant_i64(0));
275 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
277 TCGv_i64 tmp = tcg_temp_new_i64();
278 tcg_gen_mov_i64(tmp, a0);
279 return cond_make_0_tmp(c, tmp);
282 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
284 TCGv_i64 t0 = tcg_temp_new_i64();
285 TCGv_i64 t1 = tcg_temp_new_i64();
287 tcg_gen_mov_i64(t0, a0);
288 tcg_gen_mov_i64(t1, a1);
289 return cond_make_tmp(c, t0, t1);
292 static void cond_free(DisasCond *cond)
299 case TCG_COND_ALWAYS:
300 cond->c = TCG_COND_NEVER;
307 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
316 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
318 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
319 return tcg_temp_new_i64();
325 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
327 if (ctx->null_cond.c != TCG_COND_NEVER) {
328 tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
329 ctx->null_cond.a1, dest, t);
331 tcg_gen_mov_i64(dest, t);
335 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
338 save_or_nullify(ctx, cpu_gr[reg], t);
350 static TCGv_i32 load_frw_i32(unsigned rt)
352 TCGv_i32 ret = tcg_temp_new_i32();
353 tcg_gen_ld_i32(ret, tcg_env,
354 offsetof(CPUHPPAState, fr[rt & 31])
355 + (rt & 32 ? LO_OFS : HI_OFS));
359 static TCGv_i32 load_frw0_i32(unsigned rt)
362 TCGv_i32 ret = tcg_temp_new_i32();
363 tcg_gen_movi_i32(ret, 0);
366 return load_frw_i32(rt);
370 static TCGv_i64 load_frw0_i64(unsigned rt)
372 TCGv_i64 ret = tcg_temp_new_i64();
374 tcg_gen_movi_i64(ret, 0);
376 tcg_gen_ld32u_i64(ret, tcg_env,
377 offsetof(CPUHPPAState, fr[rt & 31])
378 + (rt & 32 ? LO_OFS : HI_OFS));
383 static void save_frw_i32(unsigned rt, TCGv_i32 val)
385 tcg_gen_st_i32(val, tcg_env,
386 offsetof(CPUHPPAState, fr[rt & 31])
387 + (rt & 32 ? LO_OFS : HI_OFS));
393 static TCGv_i64 load_frd(unsigned rt)
395 TCGv_i64 ret = tcg_temp_new_i64();
396 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
400 static TCGv_i64 load_frd0(unsigned rt)
403 TCGv_i64 ret = tcg_temp_new_i64();
404 tcg_gen_movi_i64(ret, 0);
411 static void save_frd(unsigned rt, TCGv_i64 val)
413 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
416 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
418 #ifdef CONFIG_USER_ONLY
419 tcg_gen_movi_i64(dest, 0);
422 tcg_gen_mov_i64(dest, cpu_sr[reg]);
423 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
424 tcg_gen_mov_i64(dest, cpu_srH);
426 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
431 /* Skip over the implementation of an insn that has been nullified.
432 Use this when the insn is too complex for a conditional move. */
433 static void nullify_over(DisasContext *ctx)
435 if (ctx->null_cond.c != TCG_COND_NEVER) {
436 /* The always condition should have been handled in the main loop. */
437 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
439 ctx->null_lab = gen_new_label();
441 /* If we're using PSW[N], copy it to a temp because... */
442 if (ctx->null_cond.a0 == cpu_psw_n) {
443 ctx->null_cond.a0 = tcg_temp_new_i64();
444 tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
446 /* ... we clear it before branching over the implementation,
447 so that (1) it's clear after nullifying this insn and
448 (2) if this insn nullifies the next, PSW[N] is valid. */
449 if (ctx->psw_n_nonzero) {
450 ctx->psw_n_nonzero = false;
451 tcg_gen_movi_i64(cpu_psw_n, 0);
454 tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
455 ctx->null_cond.a1, ctx->null_lab);
456 cond_free(&ctx->null_cond);
460 /* Save the current nullification state to PSW[N]. */
461 static void nullify_save(DisasContext *ctx)
463 if (ctx->null_cond.c == TCG_COND_NEVER) {
464 if (ctx->psw_n_nonzero) {
465 tcg_gen_movi_i64(cpu_psw_n, 0);
469 if (ctx->null_cond.a0 != cpu_psw_n) {
470 tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
471 ctx->null_cond.a0, ctx->null_cond.a1);
472 ctx->psw_n_nonzero = true;
474 cond_free(&ctx->null_cond);
477 /* Set a PSW[N] to X. The intention is that this is used immediately
478 before a goto_tb/exit_tb, so that there is no fallthru path to other
479 code within the TB. Therefore we do not update psw_n_nonzero. */
480 static void nullify_set(DisasContext *ctx, bool x)
482 if (ctx->psw_n_nonzero || x) {
483 tcg_gen_movi_i64(cpu_psw_n, x);
487 /* Mark the end of an instruction that may have been nullified.
488 This is the pair to nullify_over. Always returns true so that
489 it may be tail-called from a translate function. */
490 static bool nullify_end(DisasContext *ctx)
492 TCGLabel *null_lab = ctx->null_lab;
493 DisasJumpType status = ctx->base.is_jmp;
495 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
496 For UPDATED, we cannot update on the nullified path. */
497 assert(status != DISAS_IAQ_N_UPDATED);
499 if (likely(null_lab == NULL)) {
500 /* The current insn wasn't conditional or handled the condition
501 applied to it without a branch, so the (new) setting of
502 NULL_COND can be applied directly to the next insn. */
505 ctx->null_lab = NULL;
507 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
508 /* The next instruction will be unconditional,
509 and NULL_COND already reflects that. */
510 gen_set_label(null_lab);
512 /* The insn that we just executed is itself nullifying the next
513 instruction. Store the condition in the PSW[N] global.
514 We asserted PSW[N] = 0 in nullify_over, so that after the
515 label we have the proper value in place. */
517 gen_set_label(null_lab);
518 ctx->null_cond = cond_make_n();
520 if (status == DISAS_NORETURN) {
521 ctx->base.is_jmp = DISAS_NEXT;
526 static uint64_t gva_offset_mask(DisasContext *ctx)
528 return (ctx->tb_flags & PSW_W
529 ? MAKE_64BIT_MASK(0, 62)
530 : MAKE_64BIT_MASK(0, 32));
533 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
534 uint64_t ival, TCGv_i64 vval)
536 uint64_t mask = gva_offset_mask(ctx);
539 tcg_gen_movi_i64(dest, ival & mask);
542 tcg_debug_assert(vval != NULL);
545 * We know that the IAOQ is already properly masked.
546 * This optimization is primarily for "iaoq_f = iaoq_b".
548 if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
549 tcg_gen_mov_i64(dest, vval);
551 tcg_gen_andi_i64(dest, vval, mask);
555 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
557 return ctx->iaoq_f + disp + 8;
560 static void gen_excp_1(int exception)
562 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
565 static void gen_excp(DisasContext *ctx, int exception)
567 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
568 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
570 gen_excp_1(exception);
571 ctx->base.is_jmp = DISAS_NORETURN;
574 static bool gen_excp_iir(DisasContext *ctx, int exc)
577 tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
578 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
580 return nullify_end(ctx);
583 static bool gen_illegal(DisasContext *ctx)
585 return gen_excp_iir(ctx, EXCP_ILL);
588 #ifdef CONFIG_USER_ONLY
589 #define CHECK_MOST_PRIVILEGED(EXCP) \
590 return gen_excp_iir(ctx, EXCP)
592 #define CHECK_MOST_PRIVILEGED(EXCP) \
594 if (ctx->privilege != 0) { \
595 return gen_excp_iir(ctx, EXCP); \
600 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
602 return translator_use_goto_tb(&ctx->base, dest);
605 /* If the next insn is to be nullified, and it's on the same page,
606 and we're not attempting to set a breakpoint on it, then we can
607 totally skip the nullified insn. This avoids creating and
608 executing a TB that merely branches to the next TB. */
609 static bool use_nullify_skip(DisasContext *ctx)
611 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
612 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
615 static void gen_goto_tb(DisasContext *ctx, int which,
616 uint64_t f, uint64_t b)
618 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
619 tcg_gen_goto_tb(which);
620 copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
621 copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
622 tcg_gen_exit_tb(ctx->base.tb, which);
624 copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
625 copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
626 tcg_gen_lookup_and_goto_ptr();
630 static bool cond_need_sv(int c)
632 return c == 2 || c == 3 || c == 6;
635 static bool cond_need_cb(int c)
637 return c == 4 || c == 5;
640 /* Need extensions from TCGv_i32 to TCGv_i64. */
641 static bool cond_need_ext(DisasContext *ctx, bool d)
643 return !(ctx->is_pa20 && d);
647 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
648 * the Parisc 1.1 Architecture Reference Manual for details.
651 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
652 TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
658 case 0: /* Never / TR (0 / 1) */
659 cond = cond_make_f();
661 case 1: /* = / <> (Z / !Z) */
662 if (cond_need_ext(ctx, d)) {
663 tmp = tcg_temp_new_i64();
664 tcg_gen_ext32u_i64(tmp, res);
667 cond = cond_make_0(TCG_COND_EQ, res);
669 case 2: /* < / >= (N ^ V / !(N ^ V) */
670 tmp = tcg_temp_new_i64();
671 tcg_gen_xor_i64(tmp, res, sv);
672 if (cond_need_ext(ctx, d)) {
673 tcg_gen_ext32s_i64(tmp, tmp);
675 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
677 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
681 * ((res < 0) ^ (sv < 0)) | !res
682 * ((res ^ sv) < 0) | !res
683 * (~(res ^ sv) >= 0) | !res
684 * !(~(res ^ sv) >> 31) | !res
685 * !(~(res ^ sv) >> 31 & res)
687 tmp = tcg_temp_new_i64();
688 tcg_gen_eqv_i64(tmp, res, sv);
689 if (cond_need_ext(ctx, d)) {
690 tcg_gen_sextract_i64(tmp, tmp, 31, 1);
691 tcg_gen_and_i64(tmp, tmp, res);
692 tcg_gen_ext32u_i64(tmp, tmp);
694 tcg_gen_sari_i64(tmp, tmp, 63);
695 tcg_gen_and_i64(tmp, tmp, res);
697 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
699 case 4: /* NUV / UV (!C / C) */
700 /* Only bit 0 of cb_msb is ever set. */
701 cond = cond_make_0(TCG_COND_EQ, cb_msb);
703 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
704 tmp = tcg_temp_new_i64();
705 tcg_gen_neg_i64(tmp, cb_msb);
706 tcg_gen_and_i64(tmp, tmp, res);
707 if (cond_need_ext(ctx, d)) {
708 tcg_gen_ext32u_i64(tmp, tmp);
710 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
712 case 6: /* SV / NSV (V / !V) */
713 if (cond_need_ext(ctx, d)) {
714 tmp = tcg_temp_new_i64();
715 tcg_gen_ext32s_i64(tmp, sv);
718 cond = cond_make_0(TCG_COND_LT, sv);
720 case 7: /* OD / EV */
721 tmp = tcg_temp_new_i64();
722 tcg_gen_andi_i64(tmp, res, 1);
723 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
726 g_assert_not_reached();
729 cond.c = tcg_invert_cond(cond.c);
735 /* Similar, but for the special case of subtraction without borrow, we
736 can use the inputs directly. This can allow other computation to be
737 deleted as unused. */
739 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
740 TCGv_i64 res, TCGv_i64 in1,
741 TCGv_i64 in2, TCGv_i64 sv)
759 case 4: /* << / >>= */
763 case 5: /* <<= / >> */
768 return do_cond(ctx, cf, d, res, NULL, sv);
772 tc = tcg_invert_cond(tc);
774 if (cond_need_ext(ctx, d)) {
775 TCGv_i64 t1 = tcg_temp_new_i64();
776 TCGv_i64 t2 = tcg_temp_new_i64();
779 tcg_gen_ext32u_i64(t1, in1);
780 tcg_gen_ext32u_i64(t2, in2);
782 tcg_gen_ext32s_i64(t1, in1);
783 tcg_gen_ext32s_i64(t2, in2);
785 return cond_make_tmp(tc, t1, t2);
787 return cond_make(tc, in1, in2);
791 * Similar, but for logicals, where the carry and overflow bits are not
792 * computed, and use of them is undefined.
794 * Undefined or not, hardware does not trap. It seems reasonable to
795 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
796 * how cases c={2,3} are treated.
799 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
807 case 9: /* undef, C */
808 case 11: /* undef, C & !Z */
809 case 12: /* undef, V */
810 return cond_make_f();
813 case 8: /* undef, !C */
814 case 10: /* undef, !C | Z */
815 case 13: /* undef, !V */
816 return cond_make_t();
845 return do_cond(ctx, cf, d, res, NULL, NULL);
848 g_assert_not_reached();
851 if (cond_need_ext(ctx, d)) {
852 TCGv_i64 tmp = tcg_temp_new_i64();
855 tcg_gen_ext32u_i64(tmp, res);
857 tcg_gen_ext32s_i64(tmp, res);
859 return cond_make_0_tmp(tc, tmp);
861 return cond_make_0(tc, res);
864 /* Similar, but for shift/extract/deposit conditions. */
866 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
871 /* Convert the compressed condition codes to standard.
872 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
873 4-7 are the reverse of 0-3. */
880 return do_log_cond(ctx, c * 2 + f, d, res);
883 /* Similar, but for unit conditions. */
885 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
886 TCGv_i64 in1, TCGv_i64 in2)
889 TCGv_i64 tmp, cb = NULL;
890 uint64_t d_repl = d ? 0x0000000100000001ull : 1;
893 /* Since we want to test lots of carry-out bits all at once, do not
894 * do our normal thing and compute carry-in of bit B+1 since that
895 * leaves us with carry bits spread across two words.
897 cb = tcg_temp_new_i64();
898 tmp = tcg_temp_new_i64();
899 tcg_gen_or_i64(cb, in1, in2);
900 tcg_gen_and_i64(tmp, in1, in2);
901 tcg_gen_andc_i64(cb, cb, res);
902 tcg_gen_or_i64(cb, cb, tmp);
906 case 0: /* never / TR */
907 case 1: /* undefined */
908 case 5: /* undefined */
909 cond = cond_make_f();
912 case 2: /* SBZ / NBZ */
913 /* See hasless(v,1) from
914 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
916 tmp = tcg_temp_new_i64();
917 tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
918 tcg_gen_andc_i64(tmp, tmp, res);
919 tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
920 cond = cond_make_0(TCG_COND_NE, tmp);
923 case 3: /* SHZ / NHZ */
924 tmp = tcg_temp_new_i64();
925 tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
926 tcg_gen_andc_i64(tmp, tmp, res);
927 tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
928 cond = cond_make_0(TCG_COND_NE, tmp);
931 case 4: /* SDC / NDC */
932 tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
933 cond = cond_make_0(TCG_COND_NE, cb);
936 case 6: /* SBC / NBC */
937 tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
938 cond = cond_make_0(TCG_COND_NE, cb);
941 case 7: /* SHC / NHC */
942 tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
943 cond = cond_make_0(TCG_COND_NE, cb);
947 g_assert_not_reached();
950 cond.c = tcg_invert_cond(cond.c);
956 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
957 TCGv_i64 cb, TCGv_i64 cb_msb)
959 if (cond_need_ext(ctx, d)) {
960 TCGv_i64 t = tcg_temp_new_i64();
961 tcg_gen_extract_i64(t, cb, 32, 1);
967 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
969 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
972 /* Compute signed overflow for addition. */
973 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
974 TCGv_i64 in1, TCGv_i64 in2)
976 TCGv_i64 sv = tcg_temp_new_i64();
977 TCGv_i64 tmp = tcg_temp_new_i64();
979 tcg_gen_xor_i64(sv, res, in1);
980 tcg_gen_xor_i64(tmp, in1, in2);
981 tcg_gen_andc_i64(sv, sv, tmp);
986 /* Compute signed overflow for subtraction. */
987 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
988 TCGv_i64 in1, TCGv_i64 in2)
990 TCGv_i64 sv = tcg_temp_new_i64();
991 TCGv_i64 tmp = tcg_temp_new_i64();
993 tcg_gen_xor_i64(sv, res, in1);
994 tcg_gen_xor_i64(tmp, in1, in2);
995 tcg_gen_and_i64(sv, sv, tmp);
1000 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1001 TCGv_i64 in2, unsigned shift, bool is_l,
1002 bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1004 TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1005 unsigned c = cf >> 1;
1008 dest = tcg_temp_new_i64();
1014 tmp = tcg_temp_new_i64();
1015 tcg_gen_shli_i64(tmp, in1, shift);
1019 if (!is_l || cond_need_cb(c)) {
1020 cb_msb = tcg_temp_new_i64();
1021 cb = tcg_temp_new_i64();
1023 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1025 tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1026 get_psw_carry(ctx, d), ctx->zero);
1028 tcg_gen_xor_i64(cb, in1, in2);
1029 tcg_gen_xor_i64(cb, cb, dest);
1030 if (cond_need_cb(c)) {
1031 cb_cond = get_carry(ctx, d, cb, cb_msb);
1034 tcg_gen_add_i64(dest, in1, in2);
1036 tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1040 /* Compute signed overflow if required. */
1042 if (is_tsv || cond_need_sv(c)) {
1043 sv = do_add_sv(ctx, dest, in1, in2);
1045 /* ??? Need to include overflow from shift. */
1046 gen_helper_tsv(tcg_env, sv);
1050 /* Emit any conditional trap before any writeback. */
1051 cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1053 tmp = tcg_temp_new_i64();
1054 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1055 gen_helper_tcond(tcg_env, tmp);
1058 /* Write back the result. */
1060 save_or_nullify(ctx, cpu_psw_cb, cb);
1061 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1063 save_gpr(ctx, rt, dest);
1065 /* Install the new nullification. */
1066 cond_free(&ctx->null_cond);
1067 ctx->null_cond = cond;
1070 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1071 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1073 TCGv_i64 tcg_r1, tcg_r2;
1078 tcg_r1 = load_gpr(ctx, a->r1);
1079 tcg_r2 = load_gpr(ctx, a->r2);
1080 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1081 is_tsv, is_tc, is_c, a->cf, a->d);
1082 return nullify_end(ctx);
1085 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1086 bool is_tsv, bool is_tc)
1088 TCGv_i64 tcg_im, tcg_r2;
1093 tcg_im = tcg_constant_i64(a->i);
1094 tcg_r2 = load_gpr(ctx, a->r);
1095 /* All ADDI conditions are 32-bit. */
1096 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1097 return nullify_end(ctx);
1100 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1101 TCGv_i64 in2, bool is_tsv, bool is_b,
1102 bool is_tc, unsigned cf, bool d)
1104 TCGv_i64 dest, sv, cb, cb_msb, tmp;
1105 unsigned c = cf >> 1;
1108 dest = tcg_temp_new_i64();
1109 cb = tcg_temp_new_i64();
1110 cb_msb = tcg_temp_new_i64();
1113 /* DEST,C = IN1 + ~IN2 + C. */
1114 tcg_gen_not_i64(cb, in2);
1115 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1116 get_psw_carry(ctx, d), ctx->zero);
1117 tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1118 tcg_gen_xor_i64(cb, cb, in1);
1119 tcg_gen_xor_i64(cb, cb, dest);
1122 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1123 * operations by seeding the high word with 1 and subtracting.
1125 TCGv_i64 one = tcg_constant_i64(1);
1126 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1127 tcg_gen_eqv_i64(cb, in1, in2);
1128 tcg_gen_xor_i64(cb, cb, dest);
1131 /* Compute signed overflow if required. */
1133 if (is_tsv || cond_need_sv(c)) {
1134 sv = do_sub_sv(ctx, dest, in1, in2);
1136 gen_helper_tsv(tcg_env, sv);
1140 /* Compute the condition. We cannot use the special case for borrow. */
1142 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1144 cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1147 /* Emit any conditional trap before any writeback. */
1149 tmp = tcg_temp_new_i64();
1150 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1151 gen_helper_tcond(tcg_env, tmp);
1154 /* Write back the result. */
1155 save_or_nullify(ctx, cpu_psw_cb, cb);
1156 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1157 save_gpr(ctx, rt, dest);
1159 /* Install the new nullification. */
1160 cond_free(&ctx->null_cond);
1161 ctx->null_cond = cond;
1164 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1165 bool is_tsv, bool is_b, bool is_tc)
1167 TCGv_i64 tcg_r1, tcg_r2;
1172 tcg_r1 = load_gpr(ctx, a->r1);
1173 tcg_r2 = load_gpr(ctx, a->r2);
1174 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1175 return nullify_end(ctx);
1178 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1180 TCGv_i64 tcg_im, tcg_r2;
1185 tcg_im = tcg_constant_i64(a->i);
1186 tcg_r2 = load_gpr(ctx, a->r);
1187 /* All SUBI conditions are 32-bit. */
1188 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1189 return nullify_end(ctx);
1192 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1193 TCGv_i64 in2, unsigned cf, bool d)
1198 dest = tcg_temp_new_i64();
1199 tcg_gen_sub_i64(dest, in1, in2);
1201 /* Compute signed overflow if required. */
1203 if (cond_need_sv(cf >> 1)) {
1204 sv = do_sub_sv(ctx, dest, in1, in2);
1207 /* Form the condition for the compare. */
1208 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1211 tcg_gen_movi_i64(dest, 0);
1212 save_gpr(ctx, rt, dest);
1214 /* Install the new nullification. */
1215 cond_free(&ctx->null_cond);
1216 ctx->null_cond = cond;
1219 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1220 TCGv_i64 in2, unsigned cf, bool d,
1221 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1223 TCGv_i64 dest = dest_gpr(ctx, rt);
1225 /* Perform the operation, and writeback. */
1227 save_gpr(ctx, rt, dest);
1229 /* Install the new nullification. */
1230 cond_free(&ctx->null_cond);
1232 ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1236 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1237 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1239 TCGv_i64 tcg_r1, tcg_r2;
1244 tcg_r1 = load_gpr(ctx, a->r1);
1245 tcg_r2 = load_gpr(ctx, a->r2);
1246 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1247 return nullify_end(ctx);
1250 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1251 TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1252 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1258 dest = dest_gpr(ctx, rt);
1260 save_gpr(ctx, rt, dest);
1261 cond_free(&ctx->null_cond);
1263 dest = tcg_temp_new_i64();
1266 cond = do_unit_cond(cf, d, dest, in1, in2);
1269 TCGv_i64 tmp = tcg_temp_new_i64();
1270 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1271 gen_helper_tcond(tcg_env, tmp);
1273 save_gpr(ctx, rt, dest);
1275 cond_free(&ctx->null_cond);
1276 ctx->null_cond = cond;
1280 #ifndef CONFIG_USER_ONLY
1281 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1282 from the top 2 bits of the base register. There are a few system
1283 instructions that have a 3-bit space specifier, for which SR0 is
1284 not special. To handle this, pass ~SP. */
1285 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1295 spc = tcg_temp_new_i64();
1296 load_spr(ctx, spc, sp);
1299 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1303 ptr = tcg_temp_new_ptr();
1304 tmp = tcg_temp_new_i64();
1305 spc = tcg_temp_new_i64();
1307 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1308 tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1309 tcg_gen_andi_i64(tmp, tmp, 030);
1310 tcg_gen_trunc_i64_ptr(ptr, tmp);
1312 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1313 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1319 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1320 unsigned rb, unsigned rx, int scale, int64_t disp,
1321 unsigned sp, int modify, bool is_phys)
1323 TCGv_i64 base = load_gpr(ctx, rb);
1327 /* Note that RX is mutually exclusive with DISP. */
1329 ofs = tcg_temp_new_i64();
1330 tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1331 tcg_gen_add_i64(ofs, ofs, base);
1332 } else if (disp || modify) {
1333 ofs = tcg_temp_new_i64();
1334 tcg_gen_addi_i64(ofs, base, disp);
1340 *pgva = addr = tcg_temp_new_i64();
1341 tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
1342 #ifndef CONFIG_USER_ONLY
1344 tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1349 /* Emit a memory load. The modify parameter should be
1350 * < 0 for pre-modify,
1351 * > 0 for post-modify,
1352 * = 0 for no base register update.
1354 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1355 unsigned rx, int scale, int64_t disp,
1356 unsigned sp, int modify, MemOp mop)
1361 /* Caller uses nullify_over/nullify_end. */
1362 assert(ctx->null_cond.c == TCG_COND_NEVER);
1364 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1365 ctx->mmu_idx == MMU_PHYS_IDX);
1366 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1368 save_gpr(ctx, rb, ofs);
1372 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1373 unsigned rx, int scale, int64_t disp,
1374 unsigned sp, int modify, MemOp mop)
1379 /* Caller uses nullify_over/nullify_end. */
1380 assert(ctx->null_cond.c == TCG_COND_NEVER);
1382 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1383 ctx->mmu_idx == MMU_PHYS_IDX);
1384 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1386 save_gpr(ctx, rb, ofs);
1390 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1391 unsigned rx, int scale, int64_t disp,
1392 unsigned sp, int modify, MemOp mop)
1397 /* Caller uses nullify_over/nullify_end. */
1398 assert(ctx->null_cond.c == TCG_COND_NEVER);
1400 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1401 ctx->mmu_idx == MMU_PHYS_IDX);
1402 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1404 save_gpr(ctx, rb, ofs);
1408 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1409 unsigned rx, int scale, int64_t disp,
1410 unsigned sp, int modify, MemOp mop)
1415 /* Caller uses nullify_over/nullify_end. */
1416 assert(ctx->null_cond.c == TCG_COND_NEVER);
1418 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1419 ctx->mmu_idx == MMU_PHYS_IDX);
1420 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1422 save_gpr(ctx, rb, ofs);
1426 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1427 unsigned rx, int scale, int64_t disp,
1428 unsigned sp, int modify, MemOp mop)
1435 /* No base register update. */
1436 dest = dest_gpr(ctx, rt);
1438 /* Make sure if RT == RB, we see the result of the load. */
1439 dest = tcg_temp_new_i64();
1441 do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1442 save_gpr(ctx, rt, dest);
1444 return nullify_end(ctx);
1447 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1448 unsigned rx, int scale, int64_t disp,
1449 unsigned sp, int modify)
1455 tmp = tcg_temp_new_i32();
1456 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1457 save_frw_i32(rt, tmp);
1460 gen_helper_loaded_fr0(tcg_env);
1463 return nullify_end(ctx);
1466 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1468 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1469 a->disp, a->sp, a->m);
1472 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1473 unsigned rx, int scale, int64_t disp,
1474 unsigned sp, int modify)
1480 tmp = tcg_temp_new_i64();
1481 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1485 gen_helper_loaded_fr0(tcg_env);
1488 return nullify_end(ctx);
1491 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1493 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1494 a->disp, a->sp, a->m);
1497 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1498 int64_t disp, unsigned sp,
1499 int modify, MemOp mop)
1502 do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1503 return nullify_end(ctx);
1506 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1507 unsigned rx, int scale, int64_t disp,
1508 unsigned sp, int modify)
1514 tmp = load_frw_i32(rt);
1515 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1517 return nullify_end(ctx);
1520 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1522 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1523 a->disp, a->sp, a->m);
1526 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1527 unsigned rx, int scale, int64_t disp,
1528 unsigned sp, int modify)
1535 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1537 return nullify_end(ctx);
1540 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1542 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1543 a->disp, a->sp, a->m);
1546 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1547 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1552 tmp = load_frw0_i32(ra);
1554 func(tmp, tcg_env, tmp);
1556 save_frw_i32(rt, tmp);
1557 return nullify_end(ctx);
1560 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1561 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1568 dst = tcg_temp_new_i32();
1570 func(dst, tcg_env, src);
1572 save_frw_i32(rt, dst);
1573 return nullify_end(ctx);
1576 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1577 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1582 tmp = load_frd0(ra);
1584 func(tmp, tcg_env, tmp);
1587 return nullify_end(ctx);
1590 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1591 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1597 src = load_frw0_i32(ra);
1598 dst = tcg_temp_new_i64();
1600 func(dst, tcg_env, src);
1603 return nullify_end(ctx);
1606 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1607 unsigned ra, unsigned rb,
1608 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1613 a = load_frw0_i32(ra);
1614 b = load_frw0_i32(rb);
1616 func(a, tcg_env, a, b);
1618 save_frw_i32(rt, a);
1619 return nullify_end(ctx);
1622 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1623 unsigned ra, unsigned rb,
1624 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1632 func(a, tcg_env, a, b);
1635 return nullify_end(ctx);
1638 /* Emit an unconditional branch to a direct target, which may or may not
1639 have already had nullification handled. */
1640 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1641 unsigned link, bool is_n)
1643 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1645 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1649 ctx->null_cond.c = TCG_COND_ALWAYS;
1655 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1658 if (is_n && use_nullify_skip(ctx)) {
1659 nullify_set(ctx, 0);
1660 gen_goto_tb(ctx, 0, dest, dest + 4);
1662 nullify_set(ctx, is_n);
1663 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1668 nullify_set(ctx, 0);
1669 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1670 ctx->base.is_jmp = DISAS_NORETURN;
1675 /* Emit a conditional branch to a direct target. If the branch itself
1676 is nullified, we should have already used nullify_over. */
1677 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1680 uint64_t dest = iaoq_dest(ctx, disp);
1681 TCGLabel *taken = NULL;
1682 TCGCond c = cond->c;
1685 assert(ctx->null_cond.c == TCG_COND_NEVER);
1687 /* Handle TRUE and NEVER as direct branches. */
1688 if (c == TCG_COND_ALWAYS) {
1689 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1691 if (c == TCG_COND_NEVER) {
1692 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1695 taken = gen_new_label();
1696 tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1699 /* Not taken: Condition not satisfied; nullify on backward branches. */
1700 n = is_n && disp < 0;
1701 if (n && use_nullify_skip(ctx)) {
1702 nullify_set(ctx, 0);
1703 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1705 if (!n && ctx->null_lab) {
1706 gen_set_label(ctx->null_lab);
1707 ctx->null_lab = NULL;
1709 nullify_set(ctx, n);
1710 if (ctx->iaoq_n == -1) {
1711 /* The temporary iaoq_n_var died at the branch above.
1712 Regenerate it here instead of saving it. */
1713 tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1715 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1718 gen_set_label(taken);
1720 /* Taken: Condition satisfied; nullify on forward branches. */
1721 n = is_n && disp >= 0;
1722 if (n && use_nullify_skip(ctx)) {
1723 nullify_set(ctx, 0);
1724 gen_goto_tb(ctx, 1, dest, dest + 4);
1726 nullify_set(ctx, n);
1727 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1730 /* Not taken: the branch itself was nullified. */
1731 if (ctx->null_lab) {
1732 gen_set_label(ctx->null_lab);
1733 ctx->null_lab = NULL;
1734 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1736 ctx->base.is_jmp = DISAS_NORETURN;
1741 /* Emit an unconditional branch to an indirect target. This handles
1742 nullification of the branch itself. */
1743 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1744 unsigned link, bool is_n)
1746 TCGv_i64 a0, a1, next, tmp;
1749 assert(ctx->null_lab == NULL);
1751 if (ctx->null_cond.c == TCG_COND_NEVER) {
1753 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1755 next = tcg_temp_new_i64();
1756 tcg_gen_mov_i64(next, dest);
1758 if (use_nullify_skip(ctx)) {
1759 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1760 tcg_gen_addi_i64(next, next, 4);
1761 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1762 nullify_set(ctx, 0);
1763 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1766 ctx->null_cond.c = TCG_COND_ALWAYS;
1769 ctx->iaoq_n_var = next;
1770 } else if (is_n && use_nullify_skip(ctx)) {
1771 /* The (conditional) branch, B, nullifies the next insn, N,
1772 and we're allowed to skip execution N (no single-step or
1773 tracepoint in effect). Since the goto_ptr that we must use
1774 for the indirect branch consumes no special resources, we
1775 can (conditionally) skip B and continue execution. */
1776 /* The use_nullify_skip test implies we have a known control path. */
1777 tcg_debug_assert(ctx->iaoq_b != -1);
1778 tcg_debug_assert(ctx->iaoq_n != -1);
1780 /* We do have to handle the non-local temporary, DEST, before
1781 branching. Since IOAQ_F is not really live at this point, we
1782 can simply store DEST optimistically. Similarly with IAOQ_B. */
1783 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1784 next = tcg_temp_new_i64();
1785 tcg_gen_addi_i64(next, dest, 4);
1786 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1790 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1792 tcg_gen_lookup_and_goto_ptr();
1793 return nullify_end(ctx);
1795 c = ctx->null_cond.c;
1796 a0 = ctx->null_cond.a0;
1797 a1 = ctx->null_cond.a1;
1799 tmp = tcg_temp_new_i64();
1800 next = tcg_temp_new_i64();
1802 copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1803 tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1805 ctx->iaoq_n_var = next;
1808 tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1812 /* The branch nullifies the next insn, which means the state of N
1813 after the branch is the inverse of the state of N that applied
1815 tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1816 cond_free(&ctx->null_cond);
1817 ctx->null_cond = cond_make_n();
1818 ctx->psw_n_nonzero = true;
1820 cond_free(&ctx->null_cond);
1827 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1828 * IAOQ_Next{30..31} ← GR[b]{30..31};
1830 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1831 * which keeps the privilege level from being increased.
1833 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1836 switch (ctx->privilege) {
1838 /* Privilege 0 is maximum and is allowed to decrease. */
1841 /* Privilege 3 is minimum and is never allowed to increase. */
1842 dest = tcg_temp_new_i64();
1843 tcg_gen_ori_i64(dest, offset, 3);
1846 dest = tcg_temp_new_i64();
1847 tcg_gen_andi_i64(dest, offset, -4);
1848 tcg_gen_ori_i64(dest, dest, ctx->privilege);
1849 tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1855 #ifdef CONFIG_USER_ONLY
1856 /* On Linux, page zero is normally marked execute only + gateway.
1857 Therefore normal read or write is supposed to fail, but specific
1858 offsets have kernel code mapped to raise permissions to implement
1859 system calls. Handling this via an explicit check here, rather
1860 in than the "be disp(sr2,r0)" instruction that probably sent us
1861 here, is the easiest way to handle the branch delay slot on the
1862 aforementioned BE. */
1863 static void do_page_zero(DisasContext *ctx)
1867 /* If by some means we get here with PSW[N]=1, that implies that
1868 the B,GATE instruction would be skipped, and we'd fault on the
1869 next insn within the privileged page. */
1870 switch (ctx->null_cond.c) {
1871 case TCG_COND_NEVER:
1873 case TCG_COND_ALWAYS:
1874 tcg_gen_movi_i64(cpu_psw_n, 0);
1877 /* Since this is always the first (and only) insn within the
1878 TB, we should know the state of PSW[N] from TB->FLAGS. */
1879 g_assert_not_reached();
1882 /* Check that we didn't arrive here via some means that allowed
1883 non-sequential instruction execution. Normally the PSW[B] bit
1884 detects this by disallowing the B,GATE instruction to execute
1885 under such conditions. */
1886 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1890 switch (ctx->iaoq_f & -4) {
1891 case 0x00: /* Null pointer call */
1892 gen_excp_1(EXCP_IMP);
1893 ctx->base.is_jmp = DISAS_NORETURN;
1896 case 0xb0: /* LWS */
1897 gen_excp_1(EXCP_SYSCALL_LWS);
1898 ctx->base.is_jmp = DISAS_NORETURN;
1901 case 0xe0: /* SET_THREAD_POINTER */
1902 tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1903 tmp = tcg_temp_new_i64();
1904 tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1905 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1906 tcg_gen_addi_i64(tmp, tmp, 4);
1907 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1908 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1911 case 0x100: /* SYSCALL */
1912 gen_excp_1(EXCP_SYSCALL);
1913 ctx->base.is_jmp = DISAS_NORETURN;
1918 gen_excp_1(EXCP_ILL);
1919 ctx->base.is_jmp = DISAS_NORETURN;
1925 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1927 cond_free(&ctx->null_cond);
1931 static bool trans_break(DisasContext *ctx, arg_break *a)
1933 return gen_excp_iir(ctx, EXCP_BREAK);
1936 static bool trans_sync(DisasContext *ctx, arg_sync *a)
1938 /* No point in nullifying the memory barrier. */
1939 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1941 cond_free(&ctx->null_cond);
1945 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
1948 TCGv_i64 tmp = dest_gpr(ctx, rt);
1949 tcg_gen_movi_i64(tmp, ctx->iaoq_f);
1950 save_gpr(ctx, rt, tmp);
1952 cond_free(&ctx->null_cond);
1956 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
1959 unsigned rs = a->sp;
1960 TCGv_i64 t0 = tcg_temp_new_i64();
1962 load_spr(ctx, t0, rs);
1963 tcg_gen_shri_i64(t0, t0, 32);
1965 save_gpr(ctx, rt, t0);
1967 cond_free(&ctx->null_cond);
1971 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
1974 unsigned ctl = a->r;
1980 /* MFSAR without ,W masks low 5 bits. */
1981 tmp = dest_gpr(ctx, rt);
1982 tcg_gen_andi_i64(tmp, cpu_sar, 31);
1983 save_gpr(ctx, rt, tmp);
1986 save_gpr(ctx, rt, cpu_sar);
1988 case CR_IT: /* Interval Timer */
1989 /* FIXME: Respect PSW_S bit. */
1991 tmp = dest_gpr(ctx, rt);
1992 if (translator_io_start(&ctx->base)) {
1993 gen_helper_read_interval_timer(tmp);
1994 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1996 gen_helper_read_interval_timer(tmp);
1998 save_gpr(ctx, rt, tmp);
1999 return nullify_end(ctx);
2004 /* All other control registers are privileged. */
2005 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2009 tmp = tcg_temp_new_i64();
2010 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2011 save_gpr(ctx, rt, tmp);
2014 cond_free(&ctx->null_cond);
2018 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2021 unsigned rs = a->sp;
2025 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2029 tmp = tcg_temp_new_i64();
2030 tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2033 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2034 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2036 tcg_gen_mov_i64(cpu_sr[rs], tmp);
2039 return nullify_end(ctx);
2042 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2044 unsigned ctl = a->t;
2048 if (ctl == CR_SAR) {
2049 reg = load_gpr(ctx, a->r);
2050 tmp = tcg_temp_new_i64();
2051 tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2052 save_or_nullify(ctx, cpu_sar, tmp);
2054 cond_free(&ctx->null_cond);
2058 /* All other control registers are privileged or read-only. */
2059 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2061 #ifndef CONFIG_USER_ONLY
2065 reg = load_gpr(ctx, a->r);
2067 reg = tcg_temp_new_i64();
2068 tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2073 gen_helper_write_interval_timer(tcg_env, reg);
2076 gen_helper_write_eirr(tcg_env, reg);
2079 gen_helper_write_eiem(tcg_env, reg);
2080 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2085 /* FIXME: Respect PSW_Q bit */
2086 /* The write advances the queue and stores to the back element. */
2087 tmp = tcg_temp_new_i64();
2088 tcg_gen_ld_i64(tmp, tcg_env,
2089 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2090 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2091 tcg_gen_st_i64(reg, tcg_env,
2092 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2099 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2100 #ifndef CONFIG_USER_ONLY
2101 gen_helper_change_prot_id(tcg_env);
2106 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2109 return nullify_end(ctx);
2113 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2115 TCGv_i64 tmp = tcg_temp_new_i64();
2117 tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2118 tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2119 save_or_nullify(ctx, cpu_sar, tmp);
2121 cond_free(&ctx->null_cond);
2125 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2127 TCGv_i64 dest = dest_gpr(ctx, a->t);
2129 #ifdef CONFIG_USER_ONLY
2130 /* We don't implement space registers in user mode. */
2131 tcg_gen_movi_i64(dest, 0);
2133 tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2134 tcg_gen_shri_i64(dest, dest, 32);
2136 save_gpr(ctx, a->t, dest);
2138 cond_free(&ctx->null_cond);
2142 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2144 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2145 #ifndef CONFIG_USER_ONLY
2150 tmp = tcg_temp_new_i64();
2151 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2152 tcg_gen_andi_i64(tmp, tmp, ~a->i);
2153 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2154 save_gpr(ctx, a->t, tmp);
2156 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2157 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2158 return nullify_end(ctx);
2162 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2164 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2165 #ifndef CONFIG_USER_ONLY
2170 tmp = tcg_temp_new_i64();
2171 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2172 tcg_gen_ori_i64(tmp, tmp, a->i);
2173 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2174 save_gpr(ctx, a->t, tmp);
2176 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2177 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2178 return nullify_end(ctx);
2182 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2184 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2185 #ifndef CONFIG_USER_ONLY
2189 reg = load_gpr(ctx, a->r);
2190 tmp = tcg_temp_new_i64();
2191 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2193 /* Exit the TB to recognize new interrupts. */
2194 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2195 return nullify_end(ctx);
2199 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2201 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2202 #ifndef CONFIG_USER_ONLY
2206 gen_helper_rfi_r(tcg_env);
2208 gen_helper_rfi(tcg_env);
2210 /* Exit the TB to recognize new interrupts. */
2211 tcg_gen_exit_tb(NULL, 0);
2212 ctx->base.is_jmp = DISAS_NORETURN;
2214 return nullify_end(ctx);
2218 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2220 return do_rfi(ctx, false);
2223 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2225 return do_rfi(ctx, true);
2228 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2230 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2231 #ifndef CONFIG_USER_ONLY
2233 gen_helper_halt(tcg_env);
2234 ctx->base.is_jmp = DISAS_NORETURN;
2235 return nullify_end(ctx);
2239 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2241 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2242 #ifndef CONFIG_USER_ONLY
2244 gen_helper_reset(tcg_env);
2245 ctx->base.is_jmp = DISAS_NORETURN;
2246 return nullify_end(ctx);
2250 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2252 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2253 #ifndef CONFIG_USER_ONLY
2255 gen_helper_getshadowregs(tcg_env);
2256 return nullify_end(ctx);
2260 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2263 TCGv_i64 dest = dest_gpr(ctx, a->b);
2264 TCGv_i64 src1 = load_gpr(ctx, a->b);
2265 TCGv_i64 src2 = load_gpr(ctx, a->x);
2267 /* The only thing we need to do is the base register modification. */
2268 tcg_gen_add_i64(dest, src1, src2);
2269 save_gpr(ctx, a->b, dest);
2271 cond_free(&ctx->null_cond);
2275 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2278 TCGv_i32 level, want;
2283 dest = dest_gpr(ctx, a->t);
2284 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2287 level = tcg_constant_i32(a->ri);
2289 level = tcg_temp_new_i32();
2290 tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2291 tcg_gen_andi_i32(level, level, 3);
2293 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2295 gen_helper_probe(dest, tcg_env, addr, level, want);
2297 save_gpr(ctx, a->t, dest);
2298 return nullify_end(ctx);
2301 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2306 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2307 #ifndef CONFIG_USER_ONLY
2313 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2314 reg = load_gpr(ctx, a->r);
2316 gen_helper_itlba_pa11(tcg_env, addr, reg);
2318 gen_helper_itlbp_pa11(tcg_env, addr, reg);
2321 /* Exit TB for TLB change if mmu is enabled. */
2322 if (ctx->tb_flags & PSW_C) {
2323 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2325 return nullify_end(ctx);
2329 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2331 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2332 #ifndef CONFIG_USER_ONLY
2338 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2341 * Page align now, rather than later, so that we can add in the
2342 * page_size field from pa2.0 from the low 4 bits of GR[b].
2344 tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2346 tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2350 gen_helper_ptlb_l(tcg_env, addr);
2352 gen_helper_ptlb(tcg_env, addr);
2356 save_gpr(ctx, a->b, ofs);
2359 /* Exit TB for TLB change if mmu is enabled. */
2360 if (ctx->tb_flags & PSW_C) {
2361 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2363 return nullify_end(ctx);
2367 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2369 return do_pxtlb(ctx, a, false);
2372 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2374 return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2377 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2379 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2380 #ifndef CONFIG_USER_ONLY
2383 trans_nop_addrx(ctx, a);
2384 gen_helper_ptlbe(tcg_env);
2386 /* Exit TB for TLB change if mmu is enabled. */
2387 if (ctx->tb_flags & PSW_C) {
2388 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2390 return nullify_end(ctx);
2395 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2397 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2398 * page 13-9 (195/206)
2400 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2405 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2406 #ifndef CONFIG_USER_ONLY
2407 TCGv_i64 addr, atl, stl;
2414 * if (not (pcxl or pcxl2))
2415 * return gen_illegal(ctx);
2418 atl = tcg_temp_new_i64();
2419 stl = tcg_temp_new_i64();
2420 addr = tcg_temp_new_i64();
2422 tcg_gen_ld32u_i64(stl, tcg_env,
2423 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2424 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2425 tcg_gen_ld32u_i64(atl, tcg_env,
2426 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2427 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2428 tcg_gen_shli_i64(stl, stl, 32);
2429 tcg_gen_or_i64(addr, atl, stl);
2431 reg = load_gpr(ctx, a->r);
2433 gen_helper_itlba_pa11(tcg_env, addr, reg);
2435 gen_helper_itlbp_pa11(tcg_env, addr, reg);
2438 /* Exit TB for TLB change if mmu is enabled. */
2439 if (ctx->tb_flags & PSW_C) {
2440 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2442 return nullify_end(ctx);
2446 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2448 if (!ctx->is_pa20) {
2451 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2452 #ifndef CONFIG_USER_ONLY
2455 TCGv_i64 src1 = load_gpr(ctx, a->r1);
2456 TCGv_i64 src2 = load_gpr(ctx, a->r2);
2459 gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2461 gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2464 /* Exit TB for TLB change if mmu is enabled. */
2465 if (ctx->tb_flags & PSW_C) {
2466 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2468 return nullify_end(ctx);
2472 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2474 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2475 #ifndef CONFIG_USER_ONLY
2477 TCGv_i64 ofs, paddr;
2481 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2483 paddr = tcg_temp_new_i64();
2484 gen_helper_lpa(paddr, tcg_env, vaddr);
2486 /* Note that physical address result overrides base modification. */
2488 save_gpr(ctx, a->b, ofs);
2490 save_gpr(ctx, a->t, paddr);
2492 return nullify_end(ctx);
2496 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2498 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2500 /* The Coherence Index is an implementation-defined function of the
2501 physical address. Two addresses with the same CI have a coherent
2502 view of the cache. Our implementation is to return 0 for all,
2503 since the entire address space is coherent. */
2504 save_gpr(ctx, a->t, ctx->zero);
2506 cond_free(&ctx->null_cond);
2510 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2512 return do_add_reg(ctx, a, false, false, false, false);
2515 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2517 return do_add_reg(ctx, a, true, false, false, false);
2520 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2522 return do_add_reg(ctx, a, false, true, false, false);
2525 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2527 return do_add_reg(ctx, a, false, false, false, true);
2530 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2532 return do_add_reg(ctx, a, false, true, false, true);
2535 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2537 return do_sub_reg(ctx, a, false, false, false);
2540 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2542 return do_sub_reg(ctx, a, true, false, false);
2545 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2547 return do_sub_reg(ctx, a, false, false, true);
2550 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2552 return do_sub_reg(ctx, a, true, false, true);
2555 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2557 return do_sub_reg(ctx, a, false, true, false);
2560 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2562 return do_sub_reg(ctx, a, true, true, false);
2565 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2567 return do_log_reg(ctx, a, tcg_gen_andc_i64);
2570 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2572 return do_log_reg(ctx, a, tcg_gen_and_i64);
2575 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2578 unsigned r2 = a->r2;
2579 unsigned r1 = a->r1;
2582 if (rt == 0) { /* NOP */
2583 cond_free(&ctx->null_cond);
2586 if (r2 == 0) { /* COPY */
2588 TCGv_i64 dest = dest_gpr(ctx, rt);
2589 tcg_gen_movi_i64(dest, 0);
2590 save_gpr(ctx, rt, dest);
2592 save_gpr(ctx, rt, cpu_gr[r1]);
2594 cond_free(&ctx->null_cond);
2597 #ifndef CONFIG_USER_ONLY
2598 /* These are QEMU extensions and are nops in the real architecture:
2600 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2601 * or %r31,%r31,%r31 -- death loop; offline cpu
2602 * currently implemented as idle.
2604 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2605 /* No need to check for supervisor, as userland can only pause
2606 until the next timer interrupt. */
2609 /* Advance the instruction queue. */
2610 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2611 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2612 nullify_set(ctx, 0);
2614 /* Tell the qemu main loop to halt until this cpu has work. */
2615 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2616 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2617 gen_excp_1(EXCP_HALTED);
2618 ctx->base.is_jmp = DISAS_NORETURN;
2620 return nullify_end(ctx);
2624 return do_log_reg(ctx, a, tcg_gen_or_i64);
2627 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2629 return do_log_reg(ctx, a, tcg_gen_xor_i64);
2632 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2634 TCGv_i64 tcg_r1, tcg_r2;
2639 tcg_r1 = load_gpr(ctx, a->r1);
2640 tcg_r2 = load_gpr(ctx, a->r2);
2641 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2642 return nullify_end(ctx);
2645 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2647 TCGv_i64 tcg_r1, tcg_r2;
2652 tcg_r1 = load_gpr(ctx, a->r1);
2653 tcg_r2 = load_gpr(ctx, a->r2);
2654 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2655 return nullify_end(ctx);
2658 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2660 TCGv_i64 tcg_r1, tcg_r2, tmp;
2665 tcg_r1 = load_gpr(ctx, a->r1);
2666 tcg_r2 = load_gpr(ctx, a->r2);
2667 tmp = tcg_temp_new_i64();
2668 tcg_gen_not_i64(tmp, tcg_r2);
2669 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2670 return nullify_end(ctx);
2673 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2675 return do_uaddcm(ctx, a, false);
2678 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2680 return do_uaddcm(ctx, a, true);
2683 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2689 tmp = tcg_temp_new_i64();
2690 tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2692 tcg_gen_not_i64(tmp, tmp);
2694 tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2695 tcg_gen_muli_i64(tmp, tmp, 6);
2696 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2697 is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2698 return nullify_end(ctx);
2701 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2703 return do_dcor(ctx, a, false);
2706 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2708 return do_dcor(ctx, a, true);
2711 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2713 TCGv_i64 dest, add1, add2, addc, in1, in2;
2718 in1 = load_gpr(ctx, a->r1);
2719 in2 = load_gpr(ctx, a->r2);
2721 add1 = tcg_temp_new_i64();
2722 add2 = tcg_temp_new_i64();
2723 addc = tcg_temp_new_i64();
2724 dest = tcg_temp_new_i64();
2726 /* Form R1 << 1 | PSW[CB]{8}. */
2727 tcg_gen_add_i64(add1, in1, in1);
2728 tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2731 * Add or subtract R2, depending on PSW[V]. Proper computation of
2732 * carry requires that we subtract via + ~R2 + 1, as described in
2733 * the manual. By extracting and masking V, we can produce the
2734 * proper inputs to the addition without movcond.
2736 tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2737 tcg_gen_xor_i64(add2, in2, addc);
2738 tcg_gen_andi_i64(addc, addc, 1);
2740 tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
2741 tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
2744 /* Write back the result register. */
2745 save_gpr(ctx, a->t, dest);
2747 /* Write back PSW[CB]. */
2748 tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2749 tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2751 /* Write back PSW[V] for the division step. */
2752 cout = get_psw_carry(ctx, false);
2753 tcg_gen_neg_i64(cpu_psw_v, cout);
2754 tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2756 /* Install the new nullification. */
2759 if (cond_need_sv(a->cf >> 1)) {
2760 /* ??? The lshift is supposed to contribute to overflow. */
2761 sv = do_add_sv(ctx, dest, add1, add2);
2763 ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2766 return nullify_end(ctx);
2769 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2771 return do_add_imm(ctx, a, false, false);
2774 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2776 return do_add_imm(ctx, a, true, false);
2779 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2781 return do_add_imm(ctx, a, false, true);
2784 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2786 return do_add_imm(ctx, a, true, true);
2789 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2791 return do_sub_imm(ctx, a, false);
2794 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2796 return do_sub_imm(ctx, a, true);
2799 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2801 TCGv_i64 tcg_im, tcg_r2;
2807 tcg_im = tcg_constant_i64(a->i);
2808 tcg_r2 = load_gpr(ctx, a->r);
2809 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2811 return nullify_end(ctx);
2814 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
2815 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
2817 TCGv_i64 r1, r2, dest;
2819 if (!ctx->is_pa20) {
2825 r1 = load_gpr(ctx, a->r1);
2826 r2 = load_gpr(ctx, a->r2);
2827 dest = dest_gpr(ctx, a->t);
2830 save_gpr(ctx, a->t, dest);
2832 return nullify_end(ctx);
2835 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
2836 void (*fn)(TCGv_i64, TCGv_i64, int64_t))
2840 if (!ctx->is_pa20) {
2846 r = load_gpr(ctx, a->r);
2847 dest = dest_gpr(ctx, a->t);
2850 save_gpr(ctx, a->t, dest);
2852 return nullify_end(ctx);
2855 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
2856 void (*fn)(TCGv_i64, TCGv_i64,
2857 TCGv_i64, TCGv_i32))
2859 TCGv_i64 r1, r2, dest;
2861 if (!ctx->is_pa20) {
2867 r1 = load_gpr(ctx, a->r1);
2868 r2 = load_gpr(ctx, a->r2);
2869 dest = dest_gpr(ctx, a->t);
2871 fn(dest, r1, r2, tcg_constant_i32(a->sh));
2872 save_gpr(ctx, a->t, dest);
2874 return nullify_end(ctx);
2877 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
2879 return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
2882 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
2884 return do_multimedia(ctx, a, gen_helper_hadd_ss);
2887 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
2889 return do_multimedia(ctx, a, gen_helper_hadd_us);
2892 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
2894 return do_multimedia(ctx, a, gen_helper_havg);
2897 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
2899 return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
2902 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
2904 return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
2907 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
2909 return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
2912 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
2914 return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
2917 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
2919 return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
2922 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
2924 return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
2927 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
2929 return do_multimedia(ctx, a, gen_helper_hsub_ss);
2932 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
2934 return do_multimedia(ctx, a, gen_helper_hsub_us);
2937 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2939 uint64_t mask = 0xffff0000ffff0000ull;
2940 TCGv_i64 tmp = tcg_temp_new_i64();
2942 tcg_gen_andi_i64(tmp, r2, mask);
2943 tcg_gen_andi_i64(dst, r1, mask);
2944 tcg_gen_shri_i64(tmp, tmp, 16);
2945 tcg_gen_or_i64(dst, dst, tmp);
2948 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
2950 return do_multimedia(ctx, a, gen_mixh_l);
2953 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2955 uint64_t mask = 0x0000ffff0000ffffull;
2956 TCGv_i64 tmp = tcg_temp_new_i64();
2958 tcg_gen_andi_i64(tmp, r1, mask);
2959 tcg_gen_andi_i64(dst, r2, mask);
2960 tcg_gen_shli_i64(tmp, tmp, 16);
2961 tcg_gen_or_i64(dst, dst, tmp);
2964 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
2966 return do_multimedia(ctx, a, gen_mixh_r);
2969 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2971 TCGv_i64 tmp = tcg_temp_new_i64();
2973 tcg_gen_shri_i64(tmp, r2, 32);
2974 tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
2977 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
2979 return do_multimedia(ctx, a, gen_mixw_l);
2982 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2984 tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
2987 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
2989 return do_multimedia(ctx, a, gen_mixw_r);
2992 static bool trans_permh(DisasContext *ctx, arg_permh *a)
2994 TCGv_i64 r, t0, t1, t2, t3;
2996 if (!ctx->is_pa20) {
3002 r = load_gpr(ctx, a->r1);
3003 t0 = tcg_temp_new_i64();
3004 t1 = tcg_temp_new_i64();
3005 t2 = tcg_temp_new_i64();
3006 t3 = tcg_temp_new_i64();
3008 tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3009 tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3010 tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3011 tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3013 tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3014 tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3015 tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3017 save_gpr(ctx, a->t, t0);
3018 return nullify_end(ctx);
3021 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3025 * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3026 * Any base modification still occurs.
3029 return trans_nop_addrx(ctx, a);
3031 } else if (a->size > MO_32) {
3032 return gen_illegal(ctx);
3034 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3035 a->disp, a->sp, a->m, a->size | MO_TE);
3038 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3040 assert(a->x == 0 && a->scale == 0);
3041 if (!ctx->is_pa20 && a->size > MO_32) {
3042 return gen_illegal(ctx);
3044 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3047 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3049 MemOp mop = MO_TE | MO_ALIGN | a->size;
3053 if (!ctx->is_pa20 && a->size > MO_32) {
3054 return gen_illegal(ctx);
3060 /* Base register modification. Make sure if RT == RB,
3061 we see the result of the load. */
3062 dest = tcg_temp_new_i64();
3064 dest = dest_gpr(ctx, a->t);
3067 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
3068 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
3071 * For hppa1.1, LDCW is undefined unless aligned mod 16.
3072 * However actual hardware succeeds with aligned mod 4.
3073 * Detect this case and log a GUEST_ERROR.
3075 * TODO: HPPA64 relaxes the over-alignment requirement
3076 * with the ,co completer.
3078 gen_helper_ldc_check(addr);
3080 tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3083 save_gpr(ctx, a->b, ofs);
3085 save_gpr(ctx, a->t, dest);
3087 return nullify_end(ctx);
3090 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3097 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3098 ctx->mmu_idx == MMU_PHYS_IDX);
3099 val = load_gpr(ctx, a->r);
3101 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3102 gen_helper_stby_e_parallel(tcg_env, addr, val);
3104 gen_helper_stby_e(tcg_env, addr, val);
3107 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3108 gen_helper_stby_b_parallel(tcg_env, addr, val);
3110 gen_helper_stby_b(tcg_env, addr, val);
3114 tcg_gen_andi_i64(ofs, ofs, ~3);
3115 save_gpr(ctx, a->b, ofs);
3118 return nullify_end(ctx);
3121 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3126 if (!ctx->is_pa20) {
3131 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3132 ctx->mmu_idx == MMU_PHYS_IDX);
3133 val = load_gpr(ctx, a->r);
3135 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3136 gen_helper_stdby_e_parallel(tcg_env, addr, val);
3138 gen_helper_stdby_e(tcg_env, addr, val);
3141 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3142 gen_helper_stdby_b_parallel(tcg_env, addr, val);
3144 gen_helper_stdby_b(tcg_env, addr, val);
3148 tcg_gen_andi_i64(ofs, ofs, ~7);
3149 save_gpr(ctx, a->b, ofs);
3152 return nullify_end(ctx);
3155 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3157 int hold_mmu_idx = ctx->mmu_idx;
3159 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3160 ctx->mmu_idx = MMU_PHYS_IDX;
3162 ctx->mmu_idx = hold_mmu_idx;
3166 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3168 int hold_mmu_idx = ctx->mmu_idx;
3170 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3171 ctx->mmu_idx = MMU_PHYS_IDX;
3173 ctx->mmu_idx = hold_mmu_idx;
3177 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3179 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3181 tcg_gen_movi_i64(tcg_rt, a->i);
3182 save_gpr(ctx, a->t, tcg_rt);
3183 cond_free(&ctx->null_cond);
3187 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3189 TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3190 TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3192 tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3193 save_gpr(ctx, 1, tcg_r1);
3194 cond_free(&ctx->null_cond);
3198 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3200 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3202 /* Special case rb == 0, for the LDI pseudo-op.
3203 The COPY pseudo-op is handled for free within tcg_gen_addi_i64. */
3205 tcg_gen_movi_i64(tcg_rt, a->i);
3207 tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3209 save_gpr(ctx, a->t, tcg_rt);
3210 cond_free(&ctx->null_cond);
3214 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3215 unsigned c, unsigned f, bool d, unsigned n, int disp)
3217 TCGv_i64 dest, in2, sv;
3220 in2 = load_gpr(ctx, r);
3221 dest = tcg_temp_new_i64();
3223 tcg_gen_sub_i64(dest, in1, in2);
3226 if (cond_need_sv(c)) {
3227 sv = do_sub_sv(ctx, dest, in1, in2);
3230 cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3231 return do_cbranch(ctx, disp, n, &cond);
3234 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3236 if (!ctx->is_pa20 && a->d) {
3240 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3241 a->c, a->f, a->d, a->n, a->disp);
3244 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3246 if (!ctx->is_pa20 && a->d) {
3250 return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3251 a->c, a->f, a->d, a->n, a->disp);
3254 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3255 unsigned c, unsigned f, unsigned n, int disp)
3257 TCGv_i64 dest, in2, sv, cb_cond;
3262 * For hppa64, the ADDB conditions change with PSW.W,
3263 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3265 if (ctx->tb_flags & PSW_W) {
3272 in2 = load_gpr(ctx, r);
3273 dest = tcg_temp_new_i64();
3277 if (cond_need_cb(c)) {
3278 TCGv_i64 cb = tcg_temp_new_i64();
3279 TCGv_i64 cb_msb = tcg_temp_new_i64();
3281 tcg_gen_movi_i64(cb_msb, 0);
3282 tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3283 tcg_gen_xor_i64(cb, in1, in2);
3284 tcg_gen_xor_i64(cb, cb, dest);
3285 cb_cond = get_carry(ctx, d, cb, cb_msb);
3287 tcg_gen_add_i64(dest, in1, in2);
3289 if (cond_need_sv(c)) {
3290 sv = do_add_sv(ctx, dest, in1, in2);
3293 cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3294 save_gpr(ctx, r, dest);
3295 return do_cbranch(ctx, disp, n, &cond);
3298 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3301 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3304 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3307 return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3310 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3312 TCGv_i64 tmp, tcg_r;
3317 tmp = tcg_temp_new_i64();
3318 tcg_r = load_gpr(ctx, a->r);
3319 if (cond_need_ext(ctx, a->d)) {
3320 /* Force shift into [32,63] */
3321 tcg_gen_ori_i64(tmp, cpu_sar, 32);
3322 tcg_gen_shl_i64(tmp, tcg_r, tmp);
3324 tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3327 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3328 return do_cbranch(ctx, a->disp, a->n, &cond);
3331 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3333 TCGv_i64 tmp, tcg_r;
3339 tmp = tcg_temp_new_i64();
3340 tcg_r = load_gpr(ctx, a->r);
3341 p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3342 tcg_gen_shli_i64(tmp, tcg_r, p);
3344 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3345 return do_cbranch(ctx, a->disp, a->n, &cond);
3348 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3355 dest = dest_gpr(ctx, a->r2);
3357 tcg_gen_movi_i64(dest, 0);
3359 tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3362 /* All MOVB conditions are 32-bit. */
3363 cond = do_sed_cond(ctx, a->c, false, dest);
3364 return do_cbranch(ctx, a->disp, a->n, &cond);
3367 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3374 dest = dest_gpr(ctx, a->r);
3375 tcg_gen_movi_i64(dest, a->i);
3377 /* All MOVBI conditions are 32-bit. */
3378 cond = do_sed_cond(ctx, a->c, false, dest);
3379 return do_cbranch(ctx, a->disp, a->n, &cond);
3382 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3384 TCGv_i64 dest, src2;
3386 if (!ctx->is_pa20 && a->d) {
3393 dest = dest_gpr(ctx, a->t);
3394 src2 = load_gpr(ctx, a->r2);
3397 tcg_gen_shr_i64(dest, src2, cpu_sar);
3399 TCGv_i64 tmp = tcg_temp_new_i64();
3401 tcg_gen_ext32u_i64(dest, src2);
3402 tcg_gen_andi_i64(tmp, cpu_sar, 31);
3403 tcg_gen_shr_i64(dest, dest, tmp);
3405 } else if (a->r1 == a->r2) {
3407 tcg_gen_rotr_i64(dest, src2, cpu_sar);
3409 TCGv_i32 t32 = tcg_temp_new_i32();
3410 TCGv_i32 s32 = tcg_temp_new_i32();
3412 tcg_gen_extrl_i64_i32(t32, src2);
3413 tcg_gen_extrl_i64_i32(s32, cpu_sar);
3414 tcg_gen_andi_i32(s32, s32, 31);
3415 tcg_gen_rotr_i32(t32, t32, s32);
3416 tcg_gen_extu_i32_i64(dest, t32);
3419 TCGv_i64 src1 = load_gpr(ctx, a->r1);
3422 TCGv_i64 t = tcg_temp_new_i64();
3423 TCGv_i64 n = tcg_temp_new_i64();
3425 tcg_gen_xori_i64(n, cpu_sar, 63);
3426 tcg_gen_shl_i64(t, src2, n);
3427 tcg_gen_shli_i64(t, t, 1);
3428 tcg_gen_shr_i64(dest, src1, cpu_sar);
3429 tcg_gen_or_i64(dest, dest, t);
3431 TCGv_i64 t = tcg_temp_new_i64();
3432 TCGv_i64 s = tcg_temp_new_i64();
3434 tcg_gen_concat32_i64(t, src2, src1);
3435 tcg_gen_andi_i64(s, cpu_sar, 31);
3436 tcg_gen_shr_i64(dest, t, s);
3439 save_gpr(ctx, a->t, dest);
3441 /* Install the new nullification. */
3442 cond_free(&ctx->null_cond);
3444 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3446 return nullify_end(ctx);
3449 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3454 if (!ctx->is_pa20 && a->d) {
3461 width = a->d ? 64 : 32;
3462 sa = width - 1 - a->cpos;
3464 dest = dest_gpr(ctx, a->t);
3465 t2 = load_gpr(ctx, a->r2);
3467 tcg_gen_extract_i64(dest, t2, sa, width - sa);
3468 } else if (width == TARGET_LONG_BITS) {
3469 tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3472 if (a->r1 == a->r2) {
3473 TCGv_i32 t32 = tcg_temp_new_i32();
3474 tcg_gen_extrl_i64_i32(t32, t2);
3475 tcg_gen_rotri_i32(t32, t32, sa);
3476 tcg_gen_extu_i32_i64(dest, t32);
3478 tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3479 tcg_gen_extract_i64(dest, dest, sa, 32);
3482 save_gpr(ctx, a->t, dest);
3484 /* Install the new nullification. */
3485 cond_free(&ctx->null_cond);
3487 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3489 return nullify_end(ctx);
3492 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3494 unsigned widthm1 = a->d ? 63 : 31;
3495 TCGv_i64 dest, src, tmp;
3497 if (!ctx->is_pa20 && a->d) {
3504 dest = dest_gpr(ctx, a->t);
3505 src = load_gpr(ctx, a->r);
3506 tmp = tcg_temp_new_i64();
3508 /* Recall that SAR is using big-endian bit numbering. */
3509 tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3510 tcg_gen_xori_i64(tmp, tmp, widthm1);
3514 tcg_gen_ext32s_i64(dest, src);
3517 tcg_gen_sar_i64(dest, src, tmp);
3518 tcg_gen_sextract_i64(dest, dest, 0, a->len);
3521 tcg_gen_ext32u_i64(dest, src);
3524 tcg_gen_shr_i64(dest, src, tmp);
3525 tcg_gen_extract_i64(dest, dest, 0, a->len);
3527 save_gpr(ctx, a->t, dest);
3529 /* Install the new nullification. */
3530 cond_free(&ctx->null_cond);
3532 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3534 return nullify_end(ctx);
3537 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3539 unsigned len, cpos, width;
3542 if (!ctx->is_pa20 && a->d) {
3550 width = a->d ? 64 : 32;
3551 cpos = width - 1 - a->pos;
3552 if (cpos + len > width) {
3556 dest = dest_gpr(ctx, a->t);
3557 src = load_gpr(ctx, a->r);
3559 tcg_gen_sextract_i64(dest, src, cpos, len);
3561 tcg_gen_extract_i64(dest, src, cpos, len);
3563 save_gpr(ctx, a->t, dest);
3565 /* Install the new nullification. */
3566 cond_free(&ctx->null_cond);
3568 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3570 return nullify_end(ctx);
3573 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3575 unsigned len, width;
3576 uint64_t mask0, mask1;
3579 if (!ctx->is_pa20 && a->d) {
3587 width = a->d ? 64 : 32;
3588 if (a->cpos + len > width) {
3589 len = width - a->cpos;
3592 dest = dest_gpr(ctx, a->t);
3593 mask0 = deposit64(0, a->cpos, len, a->i);
3594 mask1 = deposit64(-1, a->cpos, len, a->i);
3597 TCGv_i64 src = load_gpr(ctx, a->t);
3598 tcg_gen_andi_i64(dest, src, mask1);
3599 tcg_gen_ori_i64(dest, dest, mask0);
3601 tcg_gen_movi_i64(dest, mask0);
3603 save_gpr(ctx, a->t, dest);
3605 /* Install the new nullification. */
3606 cond_free(&ctx->null_cond);
3608 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3610 return nullify_end(ctx);
3613 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3615 unsigned rs = a->nz ? a->t : 0;
3616 unsigned len, width;
3619 if (!ctx->is_pa20 && a->d) {
3627 width = a->d ? 64 : 32;
3628 if (a->cpos + len > width) {
3629 len = width - a->cpos;
3632 dest = dest_gpr(ctx, a->t);
3633 val = load_gpr(ctx, a->r);
3635 tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3637 tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3639 save_gpr(ctx, a->t, dest);
3641 /* Install the new nullification. */
3642 cond_free(&ctx->null_cond);
3644 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3646 return nullify_end(ctx);
3649 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3650 bool d, bool nz, unsigned len, TCGv_i64 val)
3652 unsigned rs = nz ? rt : 0;
3653 unsigned widthm1 = d ? 63 : 31;
3654 TCGv_i64 mask, tmp, shift, dest;
3655 uint64_t msb = 1ULL << (len - 1);
3657 dest = dest_gpr(ctx, rt);
3658 shift = tcg_temp_new_i64();
3659 tmp = tcg_temp_new_i64();
3661 /* Convert big-endian bit numbering in SAR to left-shift. */
3662 tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3663 tcg_gen_xori_i64(shift, shift, widthm1);
3665 mask = tcg_temp_new_i64();
3666 tcg_gen_movi_i64(mask, msb + (msb - 1));
3667 tcg_gen_and_i64(tmp, val, mask);
3669 tcg_gen_shl_i64(mask, mask, shift);
3670 tcg_gen_shl_i64(tmp, tmp, shift);
3671 tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3672 tcg_gen_or_i64(dest, dest, tmp);
3674 tcg_gen_shl_i64(dest, tmp, shift);
3676 save_gpr(ctx, rt, dest);
3678 /* Install the new nullification. */
3679 cond_free(&ctx->null_cond);
3681 ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3683 return nullify_end(ctx);
3686 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3688 if (!ctx->is_pa20 && a->d) {
3694 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3695 load_gpr(ctx, a->r));
3698 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3700 if (!ctx->is_pa20 && a->d) {
3706 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3707 tcg_constant_i64(a->i));
3710 static bool trans_be(DisasContext *ctx, arg_be *a)
3714 #ifdef CONFIG_USER_ONLY
3715 /* ??? It seems like there should be a good way of using
3716 "be disp(sr2, r0)", the canonical gateway entry mechanism
3717 to our advantage. But that appears to be inconvenient to
3718 manage along side branch delay slots. Therefore we handle
3719 entry into the gateway page via absolute address. */
3720 /* Since we don't implement spaces, just branch. Do notice the special
3721 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3722 goto_tb to the TB containing the syscall. */
3724 return do_dbranch(ctx, a->disp, a->l, a->n);
3730 tmp = tcg_temp_new_i64();
3731 tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3732 tmp = do_ibranch_priv(ctx, tmp);
3734 #ifdef CONFIG_USER_ONLY
3735 return do_ibranch(ctx, tmp, a->l, a->n);
3737 TCGv_i64 new_spc = tcg_temp_new_i64();
3739 load_spr(ctx, new_spc, a->sp);
3741 copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3742 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3744 if (a->n && use_nullify_skip(ctx)) {
3745 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3746 tcg_gen_addi_i64(tmp, tmp, 4);
3747 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3748 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3749 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3751 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3752 if (ctx->iaoq_b == -1) {
3753 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3755 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3756 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3757 nullify_set(ctx, a->n);
3759 tcg_gen_lookup_and_goto_ptr();
3760 ctx->base.is_jmp = DISAS_NORETURN;
3761 return nullify_end(ctx);
3765 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3767 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3770 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3772 uint64_t dest = iaoq_dest(ctx, a->disp);
3776 /* Make sure the caller hasn't done something weird with the queue.
3777 * ??? This is not quite the same as the PSW[B] bit, which would be
3778 * expensive to track. Real hardware will trap for
3780 * b gateway+4 (in delay slot of first branch)
3781 * However, checking for a non-sequential instruction queue *will*
3782 * diagnose the security hole
3785 * in which instructions at evil would run with increased privs.
3787 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3788 return gen_illegal(ctx);
3791 #ifndef CONFIG_USER_ONLY
3792 if (ctx->tb_flags & PSW_C) {
3793 CPUHPPAState *env = cpu_env(ctx->cs);
3794 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3795 /* If we could not find a TLB entry, then we need to generate an
3796 ITLB miss exception so the kernel will provide it.
3797 The resulting TLB fill operation will invalidate this TB and
3798 we will re-translate, at which point we *will* be able to find
3799 the TLB entry and determine if this is in fact a gateway page. */
3801 gen_excp(ctx, EXCP_ITLB_MISS);
3804 /* No change for non-gateway pages or for priv decrease. */
3805 if (type >= 4 && type - 4 < ctx->privilege) {
3806 dest = deposit32(dest, 0, 2, type - 4);
3809 dest &= -4; /* priv = 0 */
3814 TCGv_i64 tmp = dest_gpr(ctx, a->l);
3815 if (ctx->privilege < 3) {
3816 tcg_gen_andi_i64(tmp, tmp, -4);
3818 tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3819 save_gpr(ctx, a->l, tmp);
3822 return do_dbranch(ctx, dest, 0, a->n);
3825 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3828 TCGv_i64 tmp = tcg_temp_new_i64();
3829 tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3830 tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3831 /* The computation here never changes privilege level. */
3832 return do_ibranch(ctx, tmp, a->l, a->n);
3834 /* BLR R0,RX is a good way to load PC+8 into RX. */
3835 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3839 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3844 dest = load_gpr(ctx, a->b);
3846 dest = tcg_temp_new_i64();
3847 tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3848 tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3850 dest = do_ibranch_priv(ctx, dest);
3851 return do_ibranch(ctx, dest, 0, a->n);
3854 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3858 #ifdef CONFIG_USER_ONLY
3859 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3860 return do_ibranch(ctx, dest, a->l, a->n);
3863 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3865 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3866 if (ctx->iaoq_b == -1) {
3867 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3869 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3870 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3872 copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3874 nullify_set(ctx, a->n);
3875 tcg_gen_lookup_and_goto_ptr();
3876 ctx->base.is_jmp = DISAS_NORETURN;
3877 return nullify_end(ctx);
3881 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3883 /* All branch target stack instructions implement as nop. */
3884 return ctx->is_pa20;
3891 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3893 tcg_gen_mov_i32(dst, src);
3896 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3901 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3903 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3907 save_frd(0, tcg_constant_i64(ret));
3908 return nullify_end(ctx);
3911 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3913 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3916 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3918 tcg_gen_mov_i64(dst, src);
3921 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3923 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3926 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3928 tcg_gen_andi_i32(dst, src, INT32_MAX);
3931 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3933 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3936 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3938 tcg_gen_andi_i64(dst, src, INT64_MAX);
3941 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3943 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3946 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3948 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3951 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3953 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3956 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3958 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3961 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3963 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3966 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3968 tcg_gen_xori_i32(dst, src, INT32_MIN);
3971 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3973 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3976 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3978 tcg_gen_xori_i64(dst, src, INT64_MIN);
3981 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3983 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3986 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3988 tcg_gen_ori_i32(dst, src, INT32_MIN);
3991 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3993 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3996 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3998 tcg_gen_ori_i64(dst, src, INT64_MIN);
4001 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4003 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4010 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4012 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4015 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4017 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4020 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4022 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4025 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4027 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4030 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4032 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4035 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4037 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4040 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4042 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4045 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4047 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4050 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4052 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4055 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4057 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4060 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4062 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4065 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4067 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4070 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4072 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4075 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4077 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4080 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4082 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4085 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4087 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4090 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4092 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4095 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4097 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4100 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4102 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4105 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4107 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4110 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4112 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4115 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4117 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4120 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4122 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4125 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4127 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4130 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4132 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4135 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4137 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4144 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4146 TCGv_i32 ta, tb, tc, ty;
4150 ta = load_frw0_i32(a->r1);
4151 tb = load_frw0_i32(a->r2);
4152 ty = tcg_constant_i32(a->y);
4153 tc = tcg_constant_i32(a->c);
4155 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4157 return nullify_end(ctx);
4160 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4167 ta = load_frd0(a->r1);
4168 tb = load_frd0(a->r2);
4169 ty = tcg_constant_i32(a->y);
4170 tc = tcg_constant_i32(a->c);
4172 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4174 return nullify_end(ctx);
4177 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4183 t = tcg_temp_new_i64();
4184 tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4191 case 0: /* simple */
4192 tcg_gen_andi_i64(t, t, 0x4000000);
4193 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4221 TCGv_i64 c = tcg_constant_i64(mask);
4222 tcg_gen_or_i64(t, t, c);
4223 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4225 tcg_gen_andi_i64(t, t, mask);
4226 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4229 unsigned cbit = (a->y ^ 1) - 1;
4231 tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4232 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4236 return nullify_end(ctx);
4243 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4245 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4248 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4250 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4253 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4255 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4258 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4260 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4263 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4265 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4268 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4270 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4273 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4275 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4278 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4280 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4283 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4289 x = load_frw0_i64(a->r1);
4290 y = load_frw0_i64(a->r2);
4291 tcg_gen_mul_i64(x, x, y);
4294 return nullify_end(ctx);
4297 /* Convert the fmpyadd single-precision register encodings to standard. */
4298 static inline int fmpyadd_s_reg(unsigned r)
4300 return (r & 16) * 2 + 16 + (r & 15);
4303 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4305 int tm = fmpyadd_s_reg(a->tm);
4306 int ra = fmpyadd_s_reg(a->ra);
4307 int ta = fmpyadd_s_reg(a->ta);
4308 int rm2 = fmpyadd_s_reg(a->rm2);
4309 int rm1 = fmpyadd_s_reg(a->rm1);
4313 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4314 do_fop_weww(ctx, ta, ta, ra,
4315 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4317 return nullify_end(ctx);
4320 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4322 return do_fmpyadd_s(ctx, a, false);
4325 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4327 return do_fmpyadd_s(ctx, a, true);
4330 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4334 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4335 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4336 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4338 return nullify_end(ctx);
4341 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4343 return do_fmpyadd_d(ctx, a, false);
4346 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4348 return do_fmpyadd_d(ctx, a, true);
4351 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4356 x = load_frw0_i32(a->rm1);
4357 y = load_frw0_i32(a->rm2);
4358 z = load_frw0_i32(a->ra3);
4361 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4363 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4366 save_frw_i32(a->t, x);
4367 return nullify_end(ctx);
4370 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4375 x = load_frd0(a->rm1);
4376 y = load_frd0(a->rm2);
4377 z = load_frd0(a->ra3);
4380 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4382 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4386 return nullify_end(ctx);
4389 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4391 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4392 #ifndef CONFIG_USER_ONLY
4393 if (a->i == 0x100) {
4394 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4396 gen_helper_diag_btlb(tcg_env);
4397 return nullify_end(ctx);
4400 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4404 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4406 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4410 ctx->tb_flags = ctx->base.tb->flags;
4411 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4413 #ifdef CONFIG_USER_ONLY
4414 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4415 ctx->mmu_idx = MMU_USER_IDX;
4416 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4417 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4418 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4420 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4421 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4422 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4425 /* Recover the IAOQ values from the GVA + PRIV. */
4426 uint64_t cs_base = ctx->base.tb->cs_base;
4427 uint64_t iasq_f = cs_base & ~0xffffffffull;
4428 int32_t diff = cs_base;
4430 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4431 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4434 ctx->iaoq_n_var = NULL;
4436 ctx->zero = tcg_constant_i64(0);
4438 /* Bound the number of instructions by those left on the page. */
4439 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4440 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4443 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4445 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4447 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4448 ctx->null_cond = cond_make_f();
4449 ctx->psw_n_nonzero = false;
4450 if (ctx->tb_flags & PSW_N) {
4451 ctx->null_cond.c = TCG_COND_ALWAYS;
4452 ctx->psw_n_nonzero = true;
4454 ctx->null_lab = NULL;
4457 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4459 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4461 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4464 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4466 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4467 CPUHPPAState *env = cpu_env(cs);
4470 /* Execute one insn. */
4471 #ifdef CONFIG_USER_ONLY
4472 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4474 ret = ctx->base.is_jmp;
4475 assert(ret != DISAS_NEXT);
4479 /* Always fetch the insn, even if nullified, so that we check
4480 the page permissions for execute. */
4481 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4483 /* Set up the IA queue for the next insn.
4484 This will be overwritten by a branch. */
4485 if (ctx->iaoq_b == -1) {
4487 ctx->iaoq_n_var = tcg_temp_new_i64();
4488 tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4490 ctx->iaoq_n = ctx->iaoq_b + 4;
4491 ctx->iaoq_n_var = NULL;
4494 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4495 ctx->null_cond.c = TCG_COND_NEVER;
4499 if (!decode(ctx, insn)) {
4502 ret = ctx->base.is_jmp;
4503 assert(ctx->null_lab == NULL);
4507 /* Advance the insn queue. Note that this check also detects
4508 a priority change within the instruction queue. */
4509 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4510 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4511 && use_goto_tb(ctx, ctx->iaoq_b)
4512 && (ctx->null_cond.c == TCG_COND_NEVER
4513 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4514 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4515 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4516 ctx->base.is_jmp = ret = DISAS_NORETURN;
4518 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4521 ctx->iaoq_f = ctx->iaoq_b;
4522 ctx->iaoq_b = ctx->iaoq_n;
4523 ctx->base.pc_next += 4;
4526 case DISAS_NORETURN:
4527 case DISAS_IAQ_N_UPDATED:
4531 case DISAS_IAQ_N_STALE:
4532 case DISAS_IAQ_N_STALE_EXIT:
4533 if (ctx->iaoq_f == -1) {
4534 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4535 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4536 #ifndef CONFIG_USER_ONLY
4537 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4540 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4542 : DISAS_IAQ_N_UPDATED);
4543 } else if (ctx->iaoq_b == -1) {
4544 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4549 g_assert_not_reached();
4553 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4555 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4556 DisasJumpType is_jmp = ctx->base.is_jmp;
4559 case DISAS_NORETURN:
4561 case DISAS_TOO_MANY:
4562 case DISAS_IAQ_N_STALE:
4563 case DISAS_IAQ_N_STALE_EXIT:
4564 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4565 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4568 case DISAS_IAQ_N_UPDATED:
4569 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4570 tcg_gen_lookup_and_goto_ptr();
4575 tcg_gen_exit_tb(NULL, 0);
4578 g_assert_not_reached();
4582 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4583 CPUState *cs, FILE *logfile)
4585 target_ulong pc = dcbase->pc_first;
4587 #ifdef CONFIG_USER_ONLY
4590 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4593 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4596 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4599 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4604 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4605 target_disas(logfile, cs, pc, dcbase->tb->size);
4608 static const TranslatorOps hppa_tr_ops = {
4609 .init_disas_context = hppa_tr_init_disas_context,
4610 .tb_start = hppa_tr_tb_start,
4611 .insn_start = hppa_tr_insn_start,
4612 .translate_insn = hppa_tr_translate_insn,
4613 .tb_stop = hppa_tr_tb_stop,
4614 .disas_log = hppa_tr_disas_log,
4617 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4618 target_ulong pc, void *host_pc)
4621 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);