#include "trace-tcg.h"
#include "exec/log.h"
+/* Since we have a distinction between register size and address size,
+ we need to redefine all of these. */
+
+#undef TCGv
+#undef tcg_temp_new
+#undef tcg_global_reg_new
+#undef tcg_global_mem_new
+#undef tcg_temp_local_new
+#undef tcg_temp_free
+
+#if TARGET_LONG_BITS == 64
+#define TCGv_tl TCGv_i64
+#define tcg_temp_new_tl tcg_temp_new_i64
+#define tcg_temp_free_tl tcg_temp_free_i64
+#if TARGET_REGISTER_BITS == 64
+#define tcg_gen_extu_reg_tl tcg_gen_mov_i64
+#else
+#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
+#endif
+#else
+#define TCGv_tl TCGv_i32
+#define tcg_temp_new_tl tcg_temp_new_i32
+#define tcg_temp_free_tl tcg_temp_free_i32
+#define tcg_gen_extu_reg_tl tcg_gen_mov_i32
+#endif
+
+#if TARGET_REGISTER_BITS == 64
+#define TCGv_reg TCGv_i64
+
+#define tcg_temp_new tcg_temp_new_i64
+#define tcg_global_reg_new tcg_global_reg_new_i64
+#define tcg_global_mem_new tcg_global_mem_new_i64
+#define tcg_temp_local_new tcg_temp_local_new_i64
+#define tcg_temp_free tcg_temp_free_i64
+
+#define tcg_gen_movi_reg tcg_gen_movi_i64
+#define tcg_gen_mov_reg tcg_gen_mov_i64
+#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
+#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
+#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
+#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
+#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
+#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
+#define tcg_gen_ld_reg tcg_gen_ld_i64
+#define tcg_gen_st8_reg tcg_gen_st8_i64
+#define tcg_gen_st16_reg tcg_gen_st16_i64
+#define tcg_gen_st32_reg tcg_gen_st32_i64
+#define tcg_gen_st_reg tcg_gen_st_i64
+#define tcg_gen_add_reg tcg_gen_add_i64
+#define tcg_gen_addi_reg tcg_gen_addi_i64
+#define tcg_gen_sub_reg tcg_gen_sub_i64
+#define tcg_gen_neg_reg tcg_gen_neg_i64
+#define tcg_gen_subfi_reg tcg_gen_subfi_i64
+#define tcg_gen_subi_reg tcg_gen_subi_i64
+#define tcg_gen_and_reg tcg_gen_and_i64
+#define tcg_gen_andi_reg tcg_gen_andi_i64
+#define tcg_gen_or_reg tcg_gen_or_i64
+#define tcg_gen_ori_reg tcg_gen_ori_i64
+#define tcg_gen_xor_reg tcg_gen_xor_i64
+#define tcg_gen_xori_reg tcg_gen_xori_i64
+#define tcg_gen_not_reg tcg_gen_not_i64
+#define tcg_gen_shl_reg tcg_gen_shl_i64
+#define tcg_gen_shli_reg tcg_gen_shli_i64
+#define tcg_gen_shr_reg tcg_gen_shr_i64
+#define tcg_gen_shri_reg tcg_gen_shri_i64
+#define tcg_gen_sar_reg tcg_gen_sar_i64
+#define tcg_gen_sari_reg tcg_gen_sari_i64
+#define tcg_gen_brcond_reg tcg_gen_brcond_i64
+#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
+#define tcg_gen_setcond_reg tcg_gen_setcond_i64
+#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
+#define tcg_gen_mul_reg tcg_gen_mul_i64
+#define tcg_gen_muli_reg tcg_gen_muli_i64
+#define tcg_gen_div_reg tcg_gen_div_i64
+#define tcg_gen_rem_reg tcg_gen_rem_i64
+#define tcg_gen_divu_reg tcg_gen_divu_i64
+#define tcg_gen_remu_reg tcg_gen_remu_i64
+#define tcg_gen_discard_reg tcg_gen_discard_i64
+#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
+#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
+#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
+#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
+#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
+#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
+#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
+#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
+#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
+#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
+#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
+#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
+#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
+#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
+#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
+#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
+#define tcg_gen_andc_reg tcg_gen_andc_i64
+#define tcg_gen_eqv_reg tcg_gen_eqv_i64
+#define tcg_gen_nand_reg tcg_gen_nand_i64
+#define tcg_gen_nor_reg tcg_gen_nor_i64
+#define tcg_gen_orc_reg tcg_gen_orc_i64
+#define tcg_gen_clz_reg tcg_gen_clz_i64
+#define tcg_gen_ctz_reg tcg_gen_ctz_i64
+#define tcg_gen_clzi_reg tcg_gen_clzi_i64
+#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
+#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
+#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
+#define tcg_gen_rotl_reg tcg_gen_rotl_i64
+#define tcg_gen_rotli_reg tcg_gen_rotli_i64
+#define tcg_gen_rotr_reg tcg_gen_rotr_i64
+#define tcg_gen_rotri_reg tcg_gen_rotri_i64
+#define tcg_gen_deposit_reg tcg_gen_deposit_i64
+#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
+#define tcg_gen_extract_reg tcg_gen_extract_i64
+#define tcg_gen_sextract_reg tcg_gen_sextract_i64
+#define tcg_const_reg tcg_const_i64
+#define tcg_const_local_reg tcg_const_local_i64
+#define tcg_gen_movcond_reg tcg_gen_movcond_i64
+#define tcg_gen_add2_reg tcg_gen_add2_i64
+#define tcg_gen_sub2_reg tcg_gen_sub2_i64
+#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
+#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
+#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
+#if UINTPTR_MAX == UINT32_MAX
+# define tcg_gen_trunc_reg_ptr(p, r) \
+ tcg_gen_trunc_i64_i32(TCGV_PTR_TO_NAT(p), r)
+#else
+# define tcg_gen_trunc_reg_ptr(p, r) \
+ tcg_gen_mov_i64(TCGV_PTR_TO_NAT(p), r)
+#endif
+#else
+#define TCGv_reg TCGv_i32
+#define tcg_temp_new tcg_temp_new_i32
+#define tcg_global_reg_new tcg_global_reg_new_i32
+#define tcg_global_mem_new tcg_global_mem_new_i32
+#define tcg_temp_local_new tcg_temp_local_new_i32
+#define tcg_temp_free tcg_temp_free_i32
+
+#define tcg_gen_movi_reg tcg_gen_movi_i32
+#define tcg_gen_mov_reg tcg_gen_mov_i32
+#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
+#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
+#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
+#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
+#define tcg_gen_ld32u_reg tcg_gen_ld_i32
+#define tcg_gen_ld32s_reg tcg_gen_ld_i32
+#define tcg_gen_ld_reg tcg_gen_ld_i32
+#define tcg_gen_st8_reg tcg_gen_st8_i32
+#define tcg_gen_st16_reg tcg_gen_st16_i32
+#define tcg_gen_st32_reg tcg_gen_st32_i32
+#define tcg_gen_st_reg tcg_gen_st_i32
+#define tcg_gen_add_reg tcg_gen_add_i32
+#define tcg_gen_addi_reg tcg_gen_addi_i32
+#define tcg_gen_sub_reg tcg_gen_sub_i32
+#define tcg_gen_neg_reg tcg_gen_neg_i32
+#define tcg_gen_subfi_reg tcg_gen_subfi_i32
+#define tcg_gen_subi_reg tcg_gen_subi_i32
+#define tcg_gen_and_reg tcg_gen_and_i32
+#define tcg_gen_andi_reg tcg_gen_andi_i32
+#define tcg_gen_or_reg tcg_gen_or_i32
+#define tcg_gen_ori_reg tcg_gen_ori_i32
+#define tcg_gen_xor_reg tcg_gen_xor_i32
+#define tcg_gen_xori_reg tcg_gen_xori_i32
+#define tcg_gen_not_reg tcg_gen_not_i32
+#define tcg_gen_shl_reg tcg_gen_shl_i32
+#define tcg_gen_shli_reg tcg_gen_shli_i32
+#define tcg_gen_shr_reg tcg_gen_shr_i32
+#define tcg_gen_shri_reg tcg_gen_shri_i32
+#define tcg_gen_sar_reg tcg_gen_sar_i32
+#define tcg_gen_sari_reg tcg_gen_sari_i32
+#define tcg_gen_brcond_reg tcg_gen_brcond_i32
+#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
+#define tcg_gen_setcond_reg tcg_gen_setcond_i32
+#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
+#define tcg_gen_mul_reg tcg_gen_mul_i32
+#define tcg_gen_muli_reg tcg_gen_muli_i32
+#define tcg_gen_div_reg tcg_gen_div_i32
+#define tcg_gen_rem_reg tcg_gen_rem_i32
+#define tcg_gen_divu_reg tcg_gen_divu_i32
+#define tcg_gen_remu_reg tcg_gen_remu_i32
+#define tcg_gen_discard_reg tcg_gen_discard_i32
+#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
+#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
+#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
+#define tcg_gen_ext_i32_reg tcg_gen_mov_i32
+#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
+#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
+#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
+#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
+#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
+#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
+#define tcg_gen_ext32u_reg tcg_gen_mov_i32
+#define tcg_gen_ext32s_reg tcg_gen_mov_i32
+#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
+#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
+#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
+#define tcg_gen_andc_reg tcg_gen_andc_i32
+#define tcg_gen_eqv_reg tcg_gen_eqv_i32
+#define tcg_gen_nand_reg tcg_gen_nand_i32
+#define tcg_gen_nor_reg tcg_gen_nor_i32
+#define tcg_gen_orc_reg tcg_gen_orc_i32
+#define tcg_gen_clz_reg tcg_gen_clz_i32
+#define tcg_gen_ctz_reg tcg_gen_ctz_i32
+#define tcg_gen_clzi_reg tcg_gen_clzi_i32
+#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
+#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
+#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
+#define tcg_gen_rotl_reg tcg_gen_rotl_i32
+#define tcg_gen_rotli_reg tcg_gen_rotli_i32
+#define tcg_gen_rotr_reg tcg_gen_rotr_i32
+#define tcg_gen_rotri_reg tcg_gen_rotri_i32
+#define tcg_gen_deposit_reg tcg_gen_deposit_i32
+#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
+#define tcg_gen_extract_reg tcg_gen_extract_i32
+#define tcg_gen_sextract_reg tcg_gen_sextract_i32
+#define tcg_const_reg tcg_const_i32
+#define tcg_const_local_reg tcg_const_local_i32
+#define tcg_gen_movcond_reg tcg_gen_movcond_i32
+#define tcg_gen_add2_reg tcg_gen_add2_i32
+#define tcg_gen_sub2_reg tcg_gen_sub2_i32
+#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
+#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
+#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
+#if UINTPTR_MAX == UINT32_MAX
+# define tcg_gen_trunc_reg_ptr(p, r) \
+ tcg_gen_mov_i32(TCGV_PTR_TO_NAT(p), r)
+#else
+# define tcg_gen_trunc_reg_ptr(p, r) \
+ tcg_gen_extu_i32_i64(TCGV_PTR_TO_NAT(p), r)
+#endif
+#endif /* TARGET_REGISTER_BITS */
+
typedef struct DisasCond {
TCGCond c;
- TCGv a0, a1;
+ TCGv_reg a0, a1;
bool a0_is_n;
bool a1_is_0;
} DisasCond;
DisasContextBase base;
CPUState *cs;
- target_ulong iaoq_f;
- target_ulong iaoq_b;
- target_ulong iaoq_n;
- TCGv iaoq_n_var;
+ target_ureg iaoq_f;
+ target_ureg iaoq_b;
+ target_ureg iaoq_n;
+ TCGv_reg iaoq_n_var;
int ntemps;
- TCGv temps[8];
+ TCGv_reg temps[8];
DisasCond null_cond;
TCGLabel *null_lab;
DisasJumpType (*trans)(DisasContext *ctx, uint32_t insn,
const struct DisasInsn *f);
union {
- void (*ttt)(TCGv, TCGv, TCGv);
+ void (*ttt)(TCGv_reg, TCGv_reg, TCGv_reg);
void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
} DisasInsn;
/* global register indexes */
-static TCGv cpu_gr[32];
-static TCGv cpu_iaoq_f;
-static TCGv cpu_iaoq_b;
-static TCGv cpu_sar;
-static TCGv cpu_psw_n;
-static TCGv cpu_psw_v;
-static TCGv cpu_psw_cb;
-static TCGv cpu_psw_cb_msb;
-static TCGv cpu_cr26;
-static TCGv cpu_cr27;
+static TCGv_reg cpu_gr[32];
+static TCGv_reg cpu_iaoq_f;
+static TCGv_reg cpu_iaoq_b;
+static TCGv_reg cpu_sar;
+static TCGv_reg cpu_psw_n;
+static TCGv_reg cpu_psw_v;
+static TCGv_reg cpu_psw_cb;
+static TCGv_reg cpu_psw_cb_msb;
+static TCGv_reg cpu_cr26;
+static TCGv_reg cpu_cr27;
#include "exec/gen-icount.h"
{
#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
- typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
+ typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
static const GlobalVar vars[] = {
DEF_VAR(sar),
DEF_VAR(cr26),
};
}
-static DisasCond cond_make_0(TCGCond c, TCGv a0)
+static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
{
DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
r.a0 = tcg_temp_new();
- tcg_gen_mov_tl(r.a0, a0);
+ tcg_gen_mov_reg(r.a0, a0);
return r;
}
-static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
+static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
{
DisasCond r = { .c = c };
assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
r.a0 = tcg_temp_new();
- tcg_gen_mov_tl(r.a0, a0);
+ tcg_gen_mov_reg(r.a0, a0);
r.a1 = tcg_temp_new();
- tcg_gen_mov_tl(r.a1, a1);
+ tcg_gen_mov_reg(r.a1, a1);
return r;
}
{
if (cond->a1_is_0) {
cond->a1_is_0 = false;
- cond->a1 = tcg_const_tl(0);
+ cond->a1 = tcg_const_reg(0);
}
}
}
}
-static TCGv get_temp(DisasContext *ctx)
+static TCGv_reg get_temp(DisasContext *ctx)
{
unsigned i = ctx->ntemps++;
g_assert(i < ARRAY_SIZE(ctx->temps));
return ctx->temps[i] = tcg_temp_new();
}
-static TCGv load_const(DisasContext *ctx, target_long v)
+static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
{
- TCGv t = get_temp(ctx);
- tcg_gen_movi_tl(t, v);
+ TCGv_reg t = get_temp(ctx);
+ tcg_gen_movi_reg(t, v);
return t;
}
-static TCGv load_gpr(DisasContext *ctx, unsigned reg)
+static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
{
if (reg == 0) {
- TCGv t = get_temp(ctx);
- tcg_gen_movi_tl(t, 0);
+ TCGv_reg t = get_temp(ctx);
+ tcg_gen_movi_reg(t, 0);
return t;
} else {
return cpu_gr[reg];
}
}
-static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
+static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
{
if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
return get_temp(ctx);
}
}
-static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
+static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
{
if (ctx->null_cond.c != TCG_COND_NEVER) {
cond_prep(&ctx->null_cond);
- tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
+ tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
ctx->null_cond.a1, dest, t);
} else {
- tcg_gen_mov_tl(dest, t);
+ tcg_gen_mov_reg(dest, t);
}
}
-static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
+static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
{
if (reg != 0) {
save_or_nullify(ctx, cpu_gr[reg], t);
if (ctx->null_cond.a0_is_n) {
ctx->null_cond.a0_is_n = false;
ctx->null_cond.a0 = tcg_temp_new();
- tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
+ tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
}
/* ... we clear it before branching over the implementation,
so that (1) it's clear after nullifying this insn and
(2) if this insn nullifies the next, PSW[N] is valid. */
if (ctx->psw_n_nonzero) {
ctx->psw_n_nonzero = false;
- tcg_gen_movi_tl(cpu_psw_n, 0);
+ tcg_gen_movi_reg(cpu_psw_n, 0);
}
- tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
+ tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
ctx->null_cond.a1, ctx->null_lab);
cond_free(&ctx->null_cond);
}
{
if (ctx->null_cond.c == TCG_COND_NEVER) {
if (ctx->psw_n_nonzero) {
- tcg_gen_movi_tl(cpu_psw_n, 0);
+ tcg_gen_movi_reg(cpu_psw_n, 0);
}
return;
}
if (!ctx->null_cond.a0_is_n) {
cond_prep(&ctx->null_cond);
- tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
+ tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
ctx->null_cond.a0, ctx->null_cond.a1);
ctx->psw_n_nonzero = true;
}
static void nullify_set(DisasContext *ctx, bool x)
{
if (ctx->psw_n_nonzero || x) {
- tcg_gen_movi_tl(cpu_psw_n, x);
+ tcg_gen_movi_reg(cpu_psw_n, x);
}
}
return status;
}
-static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
+static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
{
if (unlikely(ival == -1)) {
- tcg_gen_mov_tl(dest, vval);
+ tcg_gen_mov_reg(dest, vval);
} else {
- tcg_gen_movi_tl(dest, ival);
+ tcg_gen_movi_reg(dest, ival);
}
}
-static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
+static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
{
return ctx->iaoq_f + disp + 8;
}
return nullify_end(ctx, gen_excp(ctx, EXCP_ILL));
}
-static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
{
/* Suppress goto_tb in the case of single-steping and IO. */
if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
}
static void gen_goto_tb(DisasContext *ctx, int which,
- target_ulong f, target_ulong b)
+ target_ureg f, target_ureg b)
{
if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
tcg_gen_goto_tb(which);
- tcg_gen_movi_tl(cpu_iaoq_f, f);
- tcg_gen_movi_tl(cpu_iaoq_b, b);
+ tcg_gen_movi_reg(cpu_iaoq_f, f);
+ tcg_gen_movi_reg(cpu_iaoq_b, b);
tcg_gen_exit_tb((uintptr_t)ctx->base.tb + which);
} else {
copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
/* PA has a habit of taking the LSB of a field and using that as the sign,
with the rest of the field becoming the least significant bits. */
-static target_long low_sextract(uint32_t val, int pos, int len)
+static target_sreg low_sextract(uint32_t val, int pos, int len)
{
- target_ulong x = -(target_ulong)extract32(val, pos, 1);
+ target_ureg x = -(target_ureg)extract32(val, pos, 1);
x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
return x;
}
return r2 * 32 + r1 * 4 + r0;
}
-static target_long assemble_12(uint32_t insn)
+static target_sreg assemble_12(uint32_t insn)
{
- target_ulong x = -(target_ulong)(insn & 1);
+ target_ureg x = -(target_ureg)(insn & 1);
x = (x << 1) | extract32(insn, 2, 1);
x = (x << 10) | extract32(insn, 3, 10);
return x;
}
-static target_long assemble_16(uint32_t insn)
+static target_sreg assemble_16(uint32_t insn)
{
/* Take the name from PA2.0, which produces a 16-bit number
only with wide mode; otherwise a 14-bit number. Since we don't
return low_sextract(insn, 0, 14);
}
-static target_long assemble_16a(uint32_t insn)
+static target_sreg assemble_16a(uint32_t insn)
{
/* Take the name from PA2.0, which produces a 14-bit shifted number
only with wide mode; otherwise a 12-bit shifted number. Since we
don't implement wide mode, this is always the 12-bit number. */
- target_ulong x = -(target_ulong)(insn & 1);
+ target_ureg x = -(target_ureg)(insn & 1);
x = (x << 11) | extract32(insn, 2, 11);
return x << 2;
}
-static target_long assemble_17(uint32_t insn)
+static target_sreg assemble_17(uint32_t insn)
{
- target_ulong x = -(target_ulong)(insn & 1);
+ target_ureg x = -(target_ureg)(insn & 1);
x = (x << 5) | extract32(insn, 16, 5);
x = (x << 1) | extract32(insn, 2, 1);
x = (x << 10) | extract32(insn, 3, 10);
return x << 2;
}
-static target_long assemble_21(uint32_t insn)
+static target_sreg assemble_21(uint32_t insn)
{
- target_ulong x = -(target_ulong)(insn & 1);
+ target_ureg x = -(target_ureg)(insn & 1);
x = (x << 11) | extract32(insn, 1, 11);
x = (x << 2) | extract32(insn, 14, 2);
x = (x << 5) | extract32(insn, 16, 5);
return x << 11;
}
-static target_long assemble_22(uint32_t insn)
+static target_sreg assemble_22(uint32_t insn)
{
- target_ulong x = -(target_ulong)(insn & 1);
+ target_ureg x = -(target_ureg)(insn & 1);
x = (x << 10) | extract32(insn, 16, 10);
x = (x << 1) | extract32(insn, 2, 1);
x = (x << 10) | extract32(insn, 3, 10);
as a whole it would appear that these relations are similar to what
a traditional NZCV set of flags would produce. */
-static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
+static DisasCond do_cond(unsigned cf, TCGv_reg res,
+ TCGv_reg cb_msb, TCGv_reg sv)
{
DisasCond cond;
- TCGv tmp;
+ TCGv_reg tmp;
switch (cf >> 1) {
case 0: /* Never / TR */
break;
case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
tmp = tcg_temp_new();
- tcg_gen_neg_tl(tmp, cb_msb);
- tcg_gen_and_tl(tmp, tmp, res);
+ tcg_gen_neg_reg(tmp, cb_msb);
+ tcg_gen_and_reg(tmp, tmp, res);
cond = cond_make_0(TCG_COND_EQ, tmp);
tcg_temp_free(tmp);
break;
break;
case 7: /* OD / EV */
tmp = tcg_temp_new();
- tcg_gen_andi_tl(tmp, res, 1);
+ tcg_gen_andi_reg(tmp, res, 1);
cond = cond_make_0(TCG_COND_NE, tmp);
tcg_temp_free(tmp);
break;
can use the inputs directly. This can allow other computation to be
deleted as unused. */
-static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
+static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
+ TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
{
DisasCond cond;
/* Similar, but for logicals, where the carry and overflow bits are not
computed, and use of them is undefined. */
-static DisasCond do_log_cond(unsigned cf, TCGv res)
+static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
{
switch (cf >> 1) {
case 4: case 5: case 6:
/* Similar, but for shift/extract/deposit conditions. */
-static DisasCond do_sed_cond(unsigned orig, TCGv res)
+static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
{
unsigned c, f;
/* Similar, but for unit conditions. */
-static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
+static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
+ TCGv_reg in1, TCGv_reg in2)
{
DisasCond cond;
- TCGv tmp, cb = NULL;
+ TCGv_reg tmp, cb = NULL;
if (cf & 8) {
/* Since we want to test lots of carry-out bits all at once, do not
*/
cb = tcg_temp_new();
tmp = tcg_temp_new();
- tcg_gen_or_tl(cb, in1, in2);
- tcg_gen_and_tl(tmp, in1, in2);
- tcg_gen_andc_tl(cb, cb, res);
- tcg_gen_or_tl(cb, cb, tmp);
+ tcg_gen_or_reg(cb, in1, in2);
+ tcg_gen_and_reg(tmp, in1, in2);
+ tcg_gen_andc_reg(cb, cb, res);
+ tcg_gen_or_reg(cb, cb, tmp);
tcg_temp_free(tmp);
}
* https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
*/
tmp = tcg_temp_new();
- tcg_gen_subi_tl(tmp, res, 0x01010101u);
- tcg_gen_andc_tl(tmp, tmp, res);
- tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
+ tcg_gen_subi_reg(tmp, res, 0x01010101u);
+ tcg_gen_andc_reg(tmp, tmp, res);
+ tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
cond = cond_make_0(TCG_COND_NE, tmp);
tcg_temp_free(tmp);
break;
case 3: /* SHZ / NHZ */
tmp = tcg_temp_new();
- tcg_gen_subi_tl(tmp, res, 0x00010001u);
- tcg_gen_andc_tl(tmp, tmp, res);
- tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
+ tcg_gen_subi_reg(tmp, res, 0x00010001u);
+ tcg_gen_andc_reg(tmp, tmp, res);
+ tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
cond = cond_make_0(TCG_COND_NE, tmp);
tcg_temp_free(tmp);
break;
case 4: /* SDC / NDC */
- tcg_gen_andi_tl(cb, cb, 0x88888888u);
+ tcg_gen_andi_reg(cb, cb, 0x88888888u);
cond = cond_make_0(TCG_COND_NE, cb);
break;
case 6: /* SBC / NBC */
- tcg_gen_andi_tl(cb, cb, 0x80808080u);
+ tcg_gen_andi_reg(cb, cb, 0x80808080u);
cond = cond_make_0(TCG_COND_NE, cb);
break;
case 7: /* SHC / NHC */
- tcg_gen_andi_tl(cb, cb, 0x80008000u);
+ tcg_gen_andi_reg(cb, cb, 0x80008000u);
cond = cond_make_0(TCG_COND_NE, cb);
break;
}
/* Compute signed overflow for addition. */
-static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
+static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
+ TCGv_reg in1, TCGv_reg in2)
{
- TCGv sv = get_temp(ctx);
- TCGv tmp = tcg_temp_new();
+ TCGv_reg sv = get_temp(ctx);
+ TCGv_reg tmp = tcg_temp_new();
- tcg_gen_xor_tl(sv, res, in1);
- tcg_gen_xor_tl(tmp, in1, in2);
- tcg_gen_andc_tl(sv, sv, tmp);
+ tcg_gen_xor_reg(sv, res, in1);
+ tcg_gen_xor_reg(tmp, in1, in2);
+ tcg_gen_andc_reg(sv, sv, tmp);
tcg_temp_free(tmp);
return sv;
}
/* Compute signed overflow for subtraction. */
-static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
+static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
+ TCGv_reg in1, TCGv_reg in2)
{
- TCGv sv = get_temp(ctx);
- TCGv tmp = tcg_temp_new();
+ TCGv_reg sv = get_temp(ctx);
+ TCGv_reg tmp = tcg_temp_new();
- tcg_gen_xor_tl(sv, res, in1);
- tcg_gen_xor_tl(tmp, in1, in2);
- tcg_gen_and_tl(sv, sv, tmp);
+ tcg_gen_xor_reg(sv, res, in1);
+ tcg_gen_xor_reg(tmp, in1, in2);
+ tcg_gen_and_reg(sv, sv, tmp);
tcg_temp_free(tmp);
return sv;
}
-static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
- unsigned shift, bool is_l, bool is_tsv, bool is_tc,
- bool is_c, unsigned cf)
+static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, unsigned shift, bool is_l,
+ bool is_tsv, bool is_tc, bool is_c, unsigned cf)
{
- TCGv dest, cb, cb_msb, sv, tmp;
+ TCGv_reg dest, cb, cb_msb, sv, tmp;
unsigned c = cf >> 1;
DisasCond cond;
if (shift) {
tmp = get_temp(ctx);
- tcg_gen_shli_tl(tmp, in1, shift);
+ tcg_gen_shli_reg(tmp, in1, shift);
in1 = tmp;
}
if (!is_l || c == 4 || c == 5) {
- TCGv zero = tcg_const_tl(0);
+ TCGv_reg zero = tcg_const_reg(0);
cb_msb = get_temp(ctx);
- tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
+ tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
if (is_c) {
- tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
+ tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
}
tcg_temp_free(zero);
if (!is_l) {
cb = get_temp(ctx);
- tcg_gen_xor_tl(cb, in1, in2);
- tcg_gen_xor_tl(cb, cb, dest);
+ tcg_gen_xor_reg(cb, in1, in2);
+ tcg_gen_xor_reg(cb, cb, dest);
}
} else {
- tcg_gen_add_tl(dest, in1, in2);
+ tcg_gen_add_reg(dest, in1, in2);
if (is_c) {
- tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
+ tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
}
}
if (is_tc) {
cond_prep(&cond);
tmp = tcg_temp_new();
- tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
+ tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
gen_helper_tcond(cpu_env, tmp);
tcg_temp_free(tmp);
}
return DISAS_NEXT;
}
-static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
- bool is_tsv, bool is_b, bool is_tc, unsigned cf)
+static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, bool is_tsv, bool is_b,
+ bool is_tc, unsigned cf)
{
- TCGv dest, sv, cb, cb_msb, zero, tmp;
+ TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
unsigned c = cf >> 1;
DisasCond cond;
cb = tcg_temp_new();
cb_msb = tcg_temp_new();
- zero = tcg_const_tl(0);
+ zero = tcg_const_reg(0);
if (is_b) {
/* DEST,C = IN1 + ~IN2 + C. */
- tcg_gen_not_tl(cb, in2);
- tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
- tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
- tcg_gen_xor_tl(cb, cb, in1);
- tcg_gen_xor_tl(cb, cb, dest);
+ tcg_gen_not_reg(cb, in2);
+ tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
+ tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
+ tcg_gen_xor_reg(cb, cb, in1);
+ tcg_gen_xor_reg(cb, cb, dest);
} else {
/* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
operations by seeding the high word with 1 and subtracting. */
- tcg_gen_movi_tl(cb_msb, 1);
- tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
- tcg_gen_eqv_tl(cb, in1, in2);
- tcg_gen_xor_tl(cb, cb, dest);
+ tcg_gen_movi_reg(cb_msb, 1);
+ tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
+ tcg_gen_eqv_reg(cb, in1, in2);
+ tcg_gen_xor_reg(cb, cb, dest);
}
tcg_temp_free(zero);
if (is_tc) {
cond_prep(&cond);
tmp = tcg_temp_new();
- tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
+ tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
gen_helper_tcond(cpu_env, tmp);
tcg_temp_free(tmp);
}
return DISAS_NEXT;
}
-static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
- TCGv in2, unsigned cf)
+static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, unsigned cf)
{
- TCGv dest, sv;
+ TCGv_reg dest, sv;
DisasCond cond;
dest = tcg_temp_new();
- tcg_gen_sub_tl(dest, in1, in2);
+ tcg_gen_sub_reg(dest, in1, in2);
/* Compute signed overflow if required. */
sv = NULL;
cond = do_sub_cond(cf, dest, in1, in2, sv);
/* Clear. */
- tcg_gen_movi_tl(dest, 0);
+ tcg_gen_movi_reg(dest, 0);
save_gpr(ctx, rt, dest);
tcg_temp_free(dest);
return DISAS_NEXT;
}
-static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
- unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
+static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, unsigned cf,
+ void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
{
- TCGv dest = dest_gpr(ctx, rt);
+ TCGv_reg dest = dest_gpr(ctx, rt);
/* Perform the operation, and writeback. */
fn(dest, in1, in2);
return DISAS_NEXT;
}
-static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
- TCGv in2, unsigned cf, bool is_tc,
- void (*fn)(TCGv, TCGv, TCGv))
+static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, unsigned cf, bool is_tc,
+ void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
{
- TCGv dest;
+ TCGv_reg dest;
DisasCond cond;
if (cf == 0) {
cond = do_unit_cond(cf, dest, in1, in2);
if (is_tc) {
- TCGv tmp = tcg_temp_new();
+ TCGv_reg tmp = tcg_temp_new();
cond_prep(&cond);
- tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
+ tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
gen_helper_tcond(cpu_env, tmp);
tcg_temp_free(tmp);
}
* = 0 for no base register update.
*/
static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
- unsigned rx, int scale, target_long disp,
+ unsigned rx, int scale, target_sreg disp,
int modify, TCGMemOp mop)
{
- TCGv addr, base;
+ TCGv_reg addr, base;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
/* Note that RX is mutually exclusive with DISP. */
if (rx) {
- tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
- tcg_gen_add_tl(addr, addr, base);
+ tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
+ tcg_gen_add_reg(addr, addr, base);
} else {
- tcg_gen_addi_tl(addr, base, disp);
+ tcg_gen_addi_reg(addr, base, disp);
}
if (modify == 0) {
}
static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
- unsigned rx, int scale, target_long disp,
+ unsigned rx, int scale, target_sreg disp,
int modify, TCGMemOp mop)
{
- TCGv addr, base;
+ TCGv_reg addr, base;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
/* Note that RX is mutually exclusive with DISP. */
if (rx) {
- tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
- tcg_gen_add_tl(addr, addr, base);
+ tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
+ tcg_gen_add_reg(addr, addr, base);
} else {
- tcg_gen_addi_tl(addr, base, disp);
+ tcg_gen_addi_reg(addr, base, disp);
}
if (modify == 0) {
}
static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
- unsigned rx, int scale, target_long disp,
+ unsigned rx, int scale, target_sreg disp,
int modify, TCGMemOp mop)
{
- TCGv addr, base;
+ TCGv_reg addr, base;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
/* Note that RX is mutually exclusive with DISP. */
if (rx) {
- tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
- tcg_gen_add_tl(addr, addr, base);
+ tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
+ tcg_gen_add_reg(addr, addr, base);
} else {
- tcg_gen_addi_tl(addr, base, disp);
+ tcg_gen_addi_reg(addr, base, disp);
}
tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
}
static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
- unsigned rx, int scale, target_long disp,
+ unsigned rx, int scale, target_sreg disp,
int modify, TCGMemOp mop)
{
- TCGv addr, base;
+ TCGv_reg addr, base;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
/* Note that RX is mutually exclusive with DISP. */
if (rx) {
- tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
- tcg_gen_add_tl(addr, addr, base);
+ tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
+ tcg_gen_add_reg(addr, addr, base);
} else {
- tcg_gen_addi_tl(addr, base, disp);
+ tcg_gen_addi_reg(addr, base, disp);
}
tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
tcg_temp_free(addr);
}
-#if TARGET_LONG_BITS == 64
-#define do_load_tl do_load_64
-#define do_store_tl do_store_64
+#if TARGET_REGISTER_BITS == 64
+#define do_load_reg do_load_64
+#define do_store_reg do_store_64
#else
-#define do_load_tl do_load_32
-#define do_store_tl do_store_32
+#define do_load_reg do_load_32
+#define do_store_reg do_store_32
#endif
static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_long disp,
+ unsigned rx, int scale, target_sreg disp,
int modify, TCGMemOp mop)
{
- TCGv dest;
+ TCGv_reg dest;
nullify_over(ctx);
/* Make sure if RT == RB, we see the result of the load. */
dest = get_temp(ctx);
}
- do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop);
+ do_load_reg(ctx, dest, rb, rx, scale, disp, modify, mop);
save_gpr(ctx, rt, dest);
return nullify_end(ctx, DISAS_NEXT);
}
static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_long disp,
+ unsigned rx, int scale, target_sreg disp,
int modify)
{
TCGv_i32 tmp;
}
static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_long disp,
+ unsigned rx, int scale, target_sreg disp,
int modify)
{
TCGv_i64 tmp;
}
static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
- target_long disp, int modify, TCGMemOp mop)
+ target_sreg disp, int modify, TCGMemOp mop)
{
nullify_over(ctx);
- do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
+ do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
return nullify_end(ctx, DISAS_NEXT);
}
static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_long disp,
+ unsigned rx, int scale, target_sreg disp,
int modify)
{
TCGv_i32 tmp;
}
static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
- unsigned rx, int scale, target_long disp,
+ unsigned rx, int scale, target_sreg disp,
int modify)
{
TCGv_i64 tmp;
/* Emit an unconditional branch to a direct target, which may or may not
have already had nullification handled. */
-static DisasJumpType do_dbranch(DisasContext *ctx, target_ulong dest,
+static DisasJumpType do_dbranch(DisasContext *ctx, target_ureg dest,
unsigned link, bool is_n)
{
if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
/* Emit a conditional branch to a direct target. If the branch itself
is nullified, we should have already used nullify_over. */
-static DisasJumpType do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
+static DisasJumpType do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
DisasCond *cond)
{
- target_ulong dest = iaoq_dest(ctx, disp);
+ target_ureg dest = iaoq_dest(ctx, disp);
TCGLabel *taken = NULL;
TCGCond c = cond->c;
bool n;
taken = gen_new_label();
cond_prep(cond);
- tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
+ tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
cond_free(cond);
/* Not taken: Condition not satisfied; nullify on backward branches. */
/* Emit an unconditional branch to an indirect target. This handles
nullification of the branch itself. */
-static DisasJumpType do_ibranch(DisasContext *ctx, TCGv dest,
+static DisasJumpType do_ibranch(DisasContext *ctx, TCGv_reg dest,
unsigned link, bool is_n)
{
- TCGv a0, a1, next, tmp;
+ TCGv_reg a0, a1, next, tmp;
TCGCond c;
assert(ctx->null_lab == NULL);
copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
}
next = get_temp(ctx);
- tcg_gen_mov_tl(next, dest);
+ tcg_gen_mov_reg(next, dest);
ctx->iaoq_n = -1;
ctx->iaoq_n_var = next;
if (is_n) {
/* We do have to handle the non-local temporary, DEST, before
branching. Since IOAQ_F is not really live at this point, we
can simply store DEST optimistically. Similarly with IAOQ_B. */
- tcg_gen_mov_tl(cpu_iaoq_f, dest);
- tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
+ tcg_gen_mov_reg(cpu_iaoq_f, dest);
+ tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
nullify_over(ctx);
if (link != 0) {
- tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
+ tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
}
tcg_gen_lookup_and_goto_ptr();
return nullify_end(ctx, DISAS_NEXT);
next = get_temp(ctx);
copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
- tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
+ tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
ctx->iaoq_n = -1;
ctx->iaoq_n_var = next;
if (link != 0) {
- tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
+ tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
}
if (is_n) {
/* The branch nullifies the next insn, which means the state of N
after the branch is the inverse of the state of N that applied
to the branch. */
- tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
+ tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
cond_free(&ctx->null_cond);
ctx->null_cond = cond_make_n();
ctx->psw_n_nonzero = true;
case TCG_COND_NEVER:
break;
case TCG_COND_ALWAYS:
- tcg_gen_movi_tl(cpu_psw_n, 0);
+ tcg_gen_movi_reg(cpu_psw_n, 0);
goto do_sigill;
default:
/* Since this is always the first (and only) insn within the
return DISAS_NORETURN;
case 0xe0: /* SET_THREAD_POINTER */
- tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
- tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
- tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
+ tcg_gen_mov_reg(cpu_cr27, cpu_gr[26]);
+ tcg_gen_mov_reg(cpu_iaoq_f, cpu_gr[31]);
+ tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
return DISAS_IAQ_N_UPDATED;
case 0x100: /* SYSCALL */
const DisasInsn *di)
{
unsigned rt = extract32(insn, 0, 5);
- TCGv tmp = dest_gpr(ctx, rt);
- tcg_gen_movi_tl(tmp, ctx->iaoq_f);
+ TCGv_reg tmp = dest_gpr(ctx, rt);
+ tcg_gen_movi_reg(tmp, ctx->iaoq_f);
save_gpr(ctx, rt, tmp);
cond_free(&ctx->null_cond);
const DisasInsn *di)
{
unsigned rt = extract32(insn, 0, 5);
- TCGv tmp = dest_gpr(ctx, rt);
+ TCGv_reg tmp = dest_gpr(ctx, rt);
/* ??? We don't implement space registers. */
- tcg_gen_movi_tl(tmp, 0);
+ tcg_gen_movi_reg(tmp, 0);
save_gpr(ctx, rt, tmp);
cond_free(&ctx->null_cond);
{
unsigned rt = extract32(insn, 0, 5);
unsigned ctl = extract32(insn, 21, 5);
- TCGv tmp;
+ TCGv_reg tmp;
switch (ctl) {
case 11: /* SAR */
if (extract32(insn, 14, 1) == 0) {
/* MFSAR without ,W masks low 5 bits. */
tmp = dest_gpr(ctx, rt);
- tcg_gen_andi_tl(tmp, cpu_sar, 31);
+ tcg_gen_andi_reg(tmp, cpu_sar, 31);
save_gpr(ctx, rt, tmp);
break;
}
{
unsigned rin = extract32(insn, 16, 5);
unsigned ctl = extract32(insn, 21, 5);
- TCGv tmp;
+ TCGv_reg tmp;
if (ctl == 11) { /* SAR */
tmp = tcg_temp_new();
- tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1);
+ tcg_gen_andi_reg(tmp, load_gpr(ctx, rin), TARGET_REGISTER_BITS - 1);
save_or_nullify(ctx, cpu_sar, tmp);
tcg_temp_free(tmp);
} else {
const DisasInsn *di)
{
unsigned rin = extract32(insn, 16, 5);
- TCGv tmp = tcg_temp_new();
+ TCGv_reg tmp = tcg_temp_new();
- tcg_gen_not_tl(tmp, load_gpr(ctx, rin));
- tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1);
+ tcg_gen_not_reg(tmp, load_gpr(ctx, rin));
+ tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
save_or_nullify(ctx, cpu_sar, tmp);
tcg_temp_free(tmp);
const DisasInsn *di)
{
unsigned rt = extract32(insn, 0, 5);
- TCGv dest = dest_gpr(ctx, rt);
+ TCGv_reg dest = dest_gpr(ctx, rt);
/* Since we don't implement space registers, this returns zero. */
- tcg_gen_movi_tl(dest, 0);
+ tcg_gen_movi_reg(dest, 0);
save_gpr(ctx, rt, dest);
cond_free(&ctx->null_cond);
{
unsigned rb = extract32(insn, 21, 5);
unsigned rx = extract32(insn, 16, 5);
- TCGv dest = dest_gpr(ctx, rb);
- TCGv src1 = load_gpr(ctx, rb);
- TCGv src2 = load_gpr(ctx, rx);
+ TCGv_reg dest = dest_gpr(ctx, rb);
+ TCGv_reg src1 = load_gpr(ctx, rb);
+ TCGv_reg src2 = load_gpr(ctx, rx);
/* The only thing we need to do is the base register modification. */
- tcg_gen_add_tl(dest, src1, src2);
+ tcg_gen_add_reg(dest, src1, src2);
save_gpr(ctx, rb, dest);
cond_free(&ctx->null_cond);
unsigned rt = extract32(insn, 0, 5);
unsigned rb = extract32(insn, 21, 5);
unsigned is_write = extract32(insn, 6, 1);
- TCGv dest;
+ TCGv_reg dest;
nullify_over(ctx);
unsigned ext = extract32(insn, 8, 4);
unsigned shift = extract32(insn, 6, 2);
unsigned rt = extract32(insn, 0, 5);
- TCGv tcg_r1, tcg_r2;
+ TCGv_reg tcg_r1, tcg_r2;
bool is_c = false;
bool is_l = false;
bool is_tc = false;
unsigned cf = extract32(insn, 12, 4);
unsigned ext = extract32(insn, 6, 6);
unsigned rt = extract32(insn, 0, 5);
- TCGv tcg_r1, tcg_r2;
+ TCGv_reg tcg_r1, tcg_r2;
bool is_b = false;
bool is_tc = false;
bool is_tsv = false;
unsigned r1 = extract32(insn, 16, 5);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 0, 5);
- TCGv tcg_r1, tcg_r2;
+ TCGv_reg tcg_r1, tcg_r2;
DisasJumpType ret;
if (cf) {
unsigned rt = extract32(insn, 0, 5);
if (r1 == 0) {
- TCGv dest = dest_gpr(ctx, rt);
- tcg_gen_movi_tl(dest, 0);
+ TCGv_reg dest = dest_gpr(ctx, rt);
+ tcg_gen_movi_reg(dest, 0);
save_gpr(ctx, rt, dest);
} else {
save_gpr(ctx, rt, cpu_gr[r1]);
unsigned r1 = extract32(insn, 16, 5);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 0, 5);
- TCGv tcg_r1, tcg_r2;
+ TCGv_reg tcg_r1, tcg_r2;
DisasJumpType ret;
if (cf) {
unsigned r1 = extract32(insn, 16, 5);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 0, 5);
- TCGv tcg_r1, tcg_r2;
+ TCGv_reg tcg_r1, tcg_r2;
DisasJumpType ret;
if (cf) {
}
tcg_r1 = load_gpr(ctx, r1);
tcg_r2 = load_gpr(ctx, r2);
- ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
+ ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_reg);
return nullify_end(ctx, ret);
}
unsigned cf = extract32(insn, 12, 4);
unsigned is_tc = extract32(insn, 6, 1);
unsigned rt = extract32(insn, 0, 5);
- TCGv tcg_r1, tcg_r2, tmp;
+ TCGv_reg tcg_r1, tcg_r2, tmp;
DisasJumpType ret;
if (cf) {
tcg_r1 = load_gpr(ctx, r1);
tcg_r2 = load_gpr(ctx, r2);
tmp = get_temp(ctx);
- tcg_gen_not_tl(tmp, tcg_r2);
- ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
+ tcg_gen_not_reg(tmp, tcg_r2);
+ ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_reg);
return nullify_end(ctx, ret);
}
unsigned cf = extract32(insn, 12, 4);
unsigned is_i = extract32(insn, 6, 1);
unsigned rt = extract32(insn, 0, 5);
- TCGv tmp;
+ TCGv_reg tmp;
DisasJumpType ret;
nullify_over(ctx);
tmp = get_temp(ctx);
- tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
+ tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
if (!is_i) {
- tcg_gen_not_tl(tmp, tmp);
+ tcg_gen_not_reg(tmp, tmp);
}
- tcg_gen_andi_tl(tmp, tmp, 0x11111111);
- tcg_gen_muli_tl(tmp, tmp, 6);
+ tcg_gen_andi_reg(tmp, tmp, 0x11111111);
+ tcg_gen_muli_reg(tmp, tmp, 6);
ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
- is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
+ is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
return nullify_end(ctx, ret);
}
unsigned r1 = extract32(insn, 16, 5);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 0, 5);
- TCGv dest, add1, add2, addc, zero, in1, in2;
+ TCGv_reg dest, add1, add2, addc, zero, in1, in2;
nullify_over(ctx);
add2 = tcg_temp_new();
addc = tcg_temp_new();
dest = tcg_temp_new();
- zero = tcg_const_tl(0);
+ zero = tcg_const_reg(0);
/* Form R1 << 1 | PSW[CB]{8}. */
- tcg_gen_add_tl(add1, in1, in1);
- tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
+ tcg_gen_add_reg(add1, in1, in1);
+ tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
/* Add or subtract R2, depending on PSW[V]. Proper computation of
carry{8} requires that we subtract via + ~R2 + 1, as described in
the manual. By extracting and masking V, we can produce the
proper inputs to the addition without movcond. */
- tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
- tcg_gen_xor_tl(add2, in2, addc);
- tcg_gen_andi_tl(addc, addc, 1);
+ tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
+ tcg_gen_xor_reg(add2, in2, addc);
+ tcg_gen_andi_reg(addc, addc, 1);
/* ??? This is only correct for 32-bit. */
tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
save_gpr(ctx, rt, dest);
/* Write back PSW[CB]. */
- tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
- tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
+ tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
+ tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
/* Write back PSW[V] for the division step. */
- tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
- tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
+ tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
+ tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
/* Install the new nullification. */
if (cf) {
- TCGv sv = NULL;
+ TCGv_reg sv = NULL;
if (cf >> 1 == 6) {
/* ??? The lshift is supposed to contribute to overflow. */
sv = do_add_sv(ctx, dest, add1, add2);
static const DisasInsn table_arith_log[] = {
{ 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
{ 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
- { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_tl },
- { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_tl },
- { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_tl },
- { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_tl },
+ { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_reg },
+ { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_reg },
+ { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_reg },
+ { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_reg },
{ 0x08000880u, 0xfc000fe0u, trans_cmpclr },
{ 0x08000380u, 0xfc000fe0u, trans_uxor },
{ 0x08000980u, 0xfc000fa0u, trans_uaddcm },
static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
{
- target_long im = low_sextract(insn, 0, 11);
+ target_sreg im = low_sextract(insn, 0, 11);
unsigned e1 = extract32(insn, 11, 1);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 16, 5);
unsigned r2 = extract32(insn, 21, 5);
unsigned o1 = extract32(insn, 26, 1);
- TCGv tcg_im, tcg_r2;
+ TCGv_reg tcg_im, tcg_r2;
DisasJumpType ret;
if (cf) {
static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
{
- target_long im = low_sextract(insn, 0, 11);
+ target_sreg im = low_sextract(insn, 0, 11);
unsigned e1 = extract32(insn, 11, 1);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 16, 5);
unsigned r2 = extract32(insn, 21, 5);
- TCGv tcg_im, tcg_r2;
+ TCGv_reg tcg_im, tcg_r2;
DisasJumpType ret;
if (cf) {
static DisasJumpType trans_cmpiclr(DisasContext *ctx, uint32_t insn)
{
- target_long im = low_sextract(insn, 0, 11);
+ target_sreg im = low_sextract(insn, 0, 11);
unsigned cf = extract32(insn, 12, 4);
unsigned rt = extract32(insn, 16, 5);
unsigned r2 = extract32(insn, 21, 5);
- TCGv tcg_im, tcg_r2;
+ TCGv_reg tcg_im, tcg_r2;
DisasJumpType ret;
if (cf) {
unsigned rx = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
- TCGv zero, addr, base, dest;
+ TCGv_reg zero, addr, base, dest;
int modify, disp = 0, scale = 0;
nullify_over(ctx);
addr = tcg_temp_new();
base = load_gpr(ctx, rb);
if (rx) {
- tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
- tcg_gen_add_tl(addr, addr, base);
+ tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
+ tcg_gen_add_reg(addr, addr, base);
} else {
- tcg_gen_addi_tl(addr, base, disp);
+ tcg_gen_addi_reg(addr, base, disp);
}
- zero = tcg_const_tl(0);
- tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base),
- zero, MMU_USER_IDX, mop);
+ zero = tcg_const_reg(0);
+ tcg_gen_atomic_xchg_reg(dest, (modify <= 0 ? addr : base),
+ zero, MMU_USER_IDX, mop);
if (modify) {
save_gpr(ctx, rb, addr);
}
static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
- target_long disp = low_sextract(insn, 0, 5);
+ target_sreg disp = low_sextract(insn, 0, 5);
unsigned m = extract32(insn, 5, 1);
unsigned a = extract32(insn, 13, 1);
unsigned rt = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
- TCGv addr, val;
+ TCGv_reg addr, val;
nullify_over(ctx);
addr = tcg_temp_new();
if (m || disp == 0) {
- tcg_gen_mov_tl(addr, load_gpr(ctx, rb));
+ tcg_gen_mov_reg(addr, load_gpr(ctx, rb));
} else {
- tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp);
+ tcg_gen_addi_reg(addr, load_gpr(ctx, rb), disp);
}
val = load_gpr(ctx, rt);
}
if (m) {
- tcg_gen_addi_tl(addr, addr, disp);
- tcg_gen_andi_tl(addr, addr, ~3);
+ tcg_gen_addi_reg(addr, addr, disp);
+ tcg_gen_andi_reg(addr, addr, ~3);
save_gpr(ctx, rb, addr);
}
tcg_temp_free(addr);
static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
{
unsigned rt = extract32(insn, 21, 5);
- target_long i = assemble_21(insn);
- TCGv tcg_rt = dest_gpr(ctx, rt);
+ target_sreg i = assemble_21(insn);
+ TCGv_reg tcg_rt = dest_gpr(ctx, rt);
- tcg_gen_movi_tl(tcg_rt, i);
+ tcg_gen_movi_reg(tcg_rt, i);
save_gpr(ctx, rt, tcg_rt);
cond_free(&ctx->null_cond);
static DisasJumpType trans_addil(DisasContext *ctx, uint32_t insn)
{
unsigned rt = extract32(insn, 21, 5);
- target_long i = assemble_21(insn);
- TCGv tcg_rt = load_gpr(ctx, rt);
- TCGv tcg_r1 = dest_gpr(ctx, 1);
+ target_sreg i = assemble_21(insn);
+ TCGv_reg tcg_rt = load_gpr(ctx, rt);
+ TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
- tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
+ tcg_gen_addi_reg(tcg_r1, tcg_rt, i);
save_gpr(ctx, 1, tcg_r1);
cond_free(&ctx->null_cond);
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
- target_long i = assemble_16(insn);
- TCGv tcg_rt = dest_gpr(ctx, rt);
+ target_sreg i = assemble_16(insn);
+ TCGv_reg tcg_rt = dest_gpr(ctx, rt);
/* Special case rb == 0, for the LDI pseudo-op.
The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
if (rb == 0) {
- tcg_gen_movi_tl(tcg_rt, i);
+ tcg_gen_movi_reg(tcg_rt, i);
} else {
- tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
+ tcg_gen_addi_reg(tcg_rt, cpu_gr[rb], i);
}
save_gpr(ctx, rt, tcg_rt);
cond_free(&ctx->null_cond);
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
- target_long i = assemble_16(insn);
+ target_sreg i = assemble_16(insn);
return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
}
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
- target_long i = assemble_16a(insn);
+ target_sreg i = assemble_16a(insn);
unsigned ext2 = extract32(insn, 1, 2);
switch (ext2) {
static DisasJumpType trans_fload_mod(DisasContext *ctx, uint32_t insn)
{
- target_long i = assemble_16a(insn);
+ target_sreg i = assemble_16a(insn);
unsigned t1 = extract32(insn, 1, 1);
unsigned a = extract32(insn, 2, 1);
unsigned t0 = extract32(insn, 16, 5);
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
- target_long i = assemble_16(insn);
+ target_sreg i = assemble_16(insn);
return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
}
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
- target_long i = assemble_16a(insn);
+ target_sreg i = assemble_16a(insn);
unsigned ext2 = extract32(insn, 1, 2);
switch (ext2) {
static DisasJumpType trans_fstore_mod(DisasContext *ctx, uint32_t insn)
{
- target_long i = assemble_16a(insn);
+ target_sreg i = assemble_16a(insn);
unsigned t1 = extract32(insn, 1, 1);
unsigned a = extract32(insn, 2, 1);
unsigned t0 = extract32(insn, 16, 5);
static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
bool is_true, bool is_imm, bool is_dw)
{
- target_long disp = assemble_12(insn) * 4;
+ target_sreg disp = assemble_12(insn) * 4;
unsigned n = extract32(insn, 1, 1);
unsigned c = extract32(insn, 13, 3);
unsigned r = extract32(insn, 21, 5);
unsigned cf = c * 2 + !is_true;
- TCGv dest, in1, in2, sv;
+ TCGv_reg dest, in1, in2, sv;
DisasCond cond;
nullify_over(ctx);
in2 = load_gpr(ctx, r);
dest = get_temp(ctx);
- tcg_gen_sub_tl(dest, in1, in2);
+ tcg_gen_sub_reg(dest, in1, in2);
sv = NULL;
if (c == 6) {
static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
bool is_true, bool is_imm)
{
- target_long disp = assemble_12(insn) * 4;
+ target_sreg disp = assemble_12(insn) * 4;
unsigned n = extract32(insn, 1, 1);
unsigned c = extract32(insn, 13, 3);
unsigned r = extract32(insn, 21, 5);
unsigned cf = c * 2 + !is_true;
- TCGv dest, in1, in2, sv, cb_msb;
+ TCGv_reg dest, in1, in2, sv, cb_msb;
DisasCond cond;
nullify_over(ctx);
switch (c) {
default:
- tcg_gen_add_tl(dest, in1, in2);
+ tcg_gen_add_reg(dest, in1, in2);
break;
case 4: case 5:
cb_msb = get_temp(ctx);
- tcg_gen_movi_tl(cb_msb, 0);
- tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
+ tcg_gen_movi_reg(cb_msb, 0);
+ tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
break;
case 6:
- tcg_gen_add_tl(dest, in1, in2);
+ tcg_gen_add_reg(dest, in1, in2);
sv = do_add_sv(ctx, dest, in1, in2);
break;
}
static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
{
- target_long disp = assemble_12(insn) * 4;
+ target_sreg disp = assemble_12(insn) * 4;
unsigned n = extract32(insn, 1, 1);
unsigned c = extract32(insn, 15, 1);
unsigned r = extract32(insn, 16, 5);
unsigned p = extract32(insn, 21, 5);
unsigned i = extract32(insn, 26, 1);
- TCGv tmp, tcg_r;
+ TCGv_reg tmp, tcg_r;
DisasCond cond;
nullify_over(ctx);
tmp = tcg_temp_new();
tcg_r = load_gpr(ctx, r);
if (i) {
- tcg_gen_shli_tl(tmp, tcg_r, p);
+ tcg_gen_shli_reg(tmp, tcg_r, p);
} else {
- tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
+ tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
}
cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
static DisasJumpType trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
{
- target_long disp = assemble_12(insn) * 4;
+ target_sreg disp = assemble_12(insn) * 4;
unsigned n = extract32(insn, 1, 1);
unsigned c = extract32(insn, 13, 3);
unsigned t = extract32(insn, 16, 5);
unsigned r = extract32(insn, 21, 5);
- TCGv dest;
+ TCGv_reg dest;
DisasCond cond;
nullify_over(ctx);
dest = dest_gpr(ctx, r);
if (is_imm) {
- tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
+ tcg_gen_movi_reg(dest, low_sextract(t, 0, 5));
} else if (t == 0) {
- tcg_gen_movi_tl(dest, 0);
+ tcg_gen_movi_reg(dest, 0);
} else {
- tcg_gen_mov_tl(dest, cpu_gr[t]);
+ tcg_gen_mov_reg(dest, cpu_gr[t]);
}
cond = do_sed_cond(c, dest);
unsigned c = extract32(insn, 13, 3);
unsigned r1 = extract32(insn, 16, 5);
unsigned r2 = extract32(insn, 21, 5);
- TCGv dest;
+ TCGv_reg dest;
if (c) {
nullify_over(ctx);
dest = dest_gpr(ctx, rt);
if (r1 == 0) {
- tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
- tcg_gen_shr_tl(dest, dest, cpu_sar);
+ tcg_gen_ext32u_reg(dest, load_gpr(ctx, r2));
+ tcg_gen_shr_reg(dest, dest, cpu_sar);
} else if (r1 == r2) {
TCGv_i32 t32 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
+ tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, r2));
tcg_gen_rotr_i32(t32, t32, cpu_sar);
- tcg_gen_extu_i32_tl(dest, t32);
+ tcg_gen_extu_i32_reg(dest, t32);
tcg_temp_free_i32(t32);
} else {
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 s = tcg_temp_new_i64();
- tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
- tcg_gen_extu_tl_i64(s, cpu_sar);
+ tcg_gen_concat_reg_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
+ tcg_gen_extu_reg_i64(s, cpu_sar);
tcg_gen_shr_i64(t, t, s);
- tcg_gen_trunc_i64_tl(dest, t);
+ tcg_gen_trunc_i64_reg(dest, t);
tcg_temp_free_i64(t);
tcg_temp_free_i64(s);
unsigned r1 = extract32(insn, 16, 5);
unsigned r2 = extract32(insn, 21, 5);
unsigned sa = 31 - cpos;
- TCGv dest, t2;
+ TCGv_reg dest, t2;
if (c) {
nullify_over(ctx);
t2 = load_gpr(ctx, r2);
if (r1 == r2) {
TCGv_i32 t32 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t32, t2);
+ tcg_gen_trunc_reg_i32(t32, t2);
tcg_gen_rotri_i32(t32, t32, sa);
- tcg_gen_extu_i32_tl(dest, t32);
+ tcg_gen_extu_i32_reg(dest, t32);
tcg_temp_free_i32(t32);
} else if (r1 == 0) {
- tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
+ tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
} else {
- TCGv t0 = tcg_temp_new();
- tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
- tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
+ TCGv_reg t0 = tcg_temp_new();
+ tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
+ tcg_gen_deposit_reg(dest, t0, cpu_gr[r1], 32 - sa, sa);
tcg_temp_free(t0);
}
save_gpr(ctx, rt, dest);
unsigned rt = extract32(insn, 16, 5);
unsigned rr = extract32(insn, 21, 5);
unsigned len = 32 - clen;
- TCGv dest, src, tmp;
+ TCGv_reg dest, src, tmp;
if (c) {
nullify_over(ctx);
tmp = tcg_temp_new();
/* Recall that SAR is using big-endian bit numbering. */
- tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
+ tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
if (is_se) {
- tcg_gen_sar_tl(dest, src, tmp);
- tcg_gen_sextract_tl(dest, dest, 0, len);
+ tcg_gen_sar_reg(dest, src, tmp);
+ tcg_gen_sextract_reg(dest, dest, 0, len);
} else {
- tcg_gen_shr_tl(dest, src, tmp);
- tcg_gen_extract_tl(dest, dest, 0, len);
+ tcg_gen_shr_reg(dest, src, tmp);
+ tcg_gen_extract_reg(dest, dest, 0, len);
}
tcg_temp_free(tmp);
save_gpr(ctx, rt, dest);
unsigned rr = extract32(insn, 21, 5);
unsigned len = 32 - clen;
unsigned cpos = 31 - pos;
- TCGv dest, src;
+ TCGv_reg dest, src;
if (c) {
nullify_over(ctx);
dest = dest_gpr(ctx, rt);
src = load_gpr(ctx, rr);
if (is_se) {
- tcg_gen_sextract_tl(dest, src, cpos, len);
+ tcg_gen_sextract_reg(dest, src, cpos, len);
} else {
- tcg_gen_extract_tl(dest, src, cpos, len);
+ tcg_gen_extract_reg(dest, src, cpos, len);
}
save_gpr(ctx, rt, dest);
unsigned cpos = extract32(insn, 5, 5);
unsigned nz = extract32(insn, 10, 1);
unsigned c = extract32(insn, 13, 3);
- target_long val = low_sextract(insn, 16, 5);
+ target_sreg val = low_sextract(insn, 16, 5);
unsigned rt = extract32(insn, 21, 5);
unsigned len = 32 - clen;
- target_long mask0, mask1;
- TCGv dest;
+ target_sreg mask0, mask1;
+ TCGv_reg dest;
if (c) {
nullify_over(ctx);
mask1 = deposit64(-1, cpos, len, val);
if (nz) {
- TCGv src = load_gpr(ctx, rt);
+ TCGv_reg src = load_gpr(ctx, rt);
if (mask1 != -1) {
- tcg_gen_andi_tl(dest, src, mask1);
+ tcg_gen_andi_reg(dest, src, mask1);
src = dest;
}
- tcg_gen_ori_tl(dest, src, mask0);
+ tcg_gen_ori_reg(dest, src, mask0);
} else {
- tcg_gen_movi_tl(dest, mask0);
+ tcg_gen_movi_reg(dest, mask0);
}
save_gpr(ctx, rt, dest);
unsigned rt = extract32(insn, 21, 5);
unsigned rs = nz ? rt : 0;
unsigned len = 32 - clen;
- TCGv dest, val;
+ TCGv_reg dest, val;
if (c) {
nullify_over(ctx);
dest = dest_gpr(ctx, rt);
val = load_gpr(ctx, rr);
if (rs == 0) {
- tcg_gen_deposit_z_tl(dest, val, cpos, len);
+ tcg_gen_deposit_z_reg(dest, val, cpos, len);
} else {
- tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
+ tcg_gen_deposit_reg(dest, cpu_gr[rs], val, cpos, len);
}
save_gpr(ctx, rt, dest);
unsigned rt = extract32(insn, 21, 5);
unsigned rs = nz ? rt : 0;
unsigned len = 32 - clen;
- TCGv val, mask, tmp, shift, dest;
+ TCGv_reg val, mask, tmp, shift, dest;
unsigned msb = 1U << (len - 1);
if (c) {
tmp = tcg_temp_new();
/* Convert big-endian bit numbering in SAR to left-shift. */
- tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
+ tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
- mask = tcg_const_tl(msb + (msb - 1));
- tcg_gen_and_tl(tmp, val, mask);
+ mask = tcg_const_reg(msb + (msb - 1));
+ tcg_gen_and_reg(tmp, val, mask);
if (rs) {
- tcg_gen_shl_tl(mask, mask, shift);
- tcg_gen_shl_tl(tmp, tmp, shift);
- tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
- tcg_gen_or_tl(dest, dest, tmp);
+ tcg_gen_shl_reg(mask, mask, shift);
+ tcg_gen_shl_reg(tmp, tmp, shift);
+ tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
+ tcg_gen_or_reg(dest, dest, tmp);
} else {
- tcg_gen_shl_tl(dest, tmp, shift);
+ tcg_gen_shl_reg(dest, tmp, shift);
}
tcg_temp_free(shift);
tcg_temp_free(mask);
{
unsigned n = extract32(insn, 1, 1);
unsigned b = extract32(insn, 21, 5);
- target_long disp = assemble_17(insn);
+ target_sreg disp = assemble_17(insn);
/* unsigned s = low_uextract(insn, 13, 3); */
/* ??? It seems like there should be a good way of using
if (b == 0) {
return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
} else {
- TCGv tmp = get_temp(ctx);
- tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
+ TCGv_reg tmp = get_temp(ctx);
+ tcg_gen_addi_reg(tmp, load_gpr(ctx, b), disp);
return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
}
}
{
unsigned n = extract32(insn, 1, 1);
unsigned link = extract32(insn, 21, 5);
- target_long disp = assemble_17(insn);
+ target_sreg disp = assemble_17(insn);
return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
}
const DisasInsn *di)
{
unsigned n = extract32(insn, 1, 1);
- target_long disp = assemble_22(insn);
+ target_sreg disp = assemble_22(insn);
return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
}
unsigned n = extract32(insn, 1, 1);
unsigned rx = extract32(insn, 16, 5);
unsigned link = extract32(insn, 21, 5);
- TCGv tmp = get_temp(ctx);
+ TCGv_reg tmp = get_temp(ctx);
- tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
- tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
+ tcg_gen_shli_reg(tmp, load_gpr(ctx, rx), 3);
+ tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
return do_ibranch(ctx, tmp, link, n);
}
unsigned n = extract32(insn, 1, 1);
unsigned rx = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
- TCGv dest;
+ TCGv_reg dest;
if (rx == 0) {
dest = load_gpr(ctx, rb);
} else {
dest = get_temp(ctx);
- tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
- tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
+ tcg_gen_shli_reg(dest, load_gpr(ctx, rx), 3);
+ tcg_gen_add_reg(dest, dest, load_gpr(ctx, rb));
}
return do_ibranch(ctx, dest, 0, n);
}
{
unsigned y = extract32(insn, 13, 3);
unsigned cbit = (y ^ 1) - 1;
- TCGv t;
+ TCGv_reg t;
nullify_over(ctx);
t = tcg_temp_new();
- tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
- tcg_gen_extract_tl(t, t, 21 - cbit, 1);
+ tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
+ tcg_gen_extract_reg(t, t, 21 - cbit, 1);
ctx->null_cond = cond_make_0(TCG_COND_NE, t);
tcg_temp_free(t);
unsigned c = extract32(insn, 0, 5);
int mask;
bool inv = false;
- TCGv t;
+ TCGv_reg t;
nullify_over(ctx);
t = tcg_temp_new();
- tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
+ tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
switch (c) {
case 0: /* simple */
- tcg_gen_andi_tl(t, t, 0x4000000);
+ tcg_gen_andi_reg(t, t, 0x4000000);
ctx->null_cond = cond_make_0(TCG_COND_NE, t);
goto done;
case 2: /* rej */
return gen_illegal(ctx);
}
if (inv) {
- TCGv c = load_const(ctx, mask);
- tcg_gen_or_tl(t, t, c);
+ TCGv_reg c = load_const(ctx, mask);
+ tcg_gen_or_reg(t, t, c);
ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
} else {
- tcg_gen_andi_tl(t, t, mask);
+ tcg_gen_andi_reg(t, t, mask);
ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
}
done:
if (ctx->iaoq_b == -1) {
ctx->iaoq_n = -1;
ctx->iaoq_n_var = get_temp(ctx);
- tcg_gen_addi_tl(ctx->iaoq_n_var, cpu_iaoq_b, 4);
+ tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
} else {
ctx->iaoq_n = ctx->iaoq_b + 4;
ctx->iaoq_n_var = NULL;
return;
}
if (ctx->iaoq_f == -1) {
- tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
+ tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
nullify_save(ctx);
ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
} else if (ctx->iaoq_b == -1) {
- tcg_gen_mov_tl(cpu_iaoq_b, ctx->iaoq_n_var);
+ tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
}
}
static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
{
- TranslationBlock *tb = dcbase->tb;
- target_ulong pc = tb->pc;
+ target_ureg pc = dcbase->pc_first;
#ifdef CONFIG_USER_ONLY
switch (pc) {
#endif
qemu_log("IN: %s\n", lookup_symbol(pc));
- log_target_disas(cs, pc, tb->size);
+ log_target_disas(cs, pc, dcbase->tb->size);
}
static const TranslatorOps hppa_tr_ops = {