2 * Tiny Code Interpreter for QEMU
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "exec/cpu_ldst.h"
22 #include "tcg/tcg-op.h"
23 #include "tcg/tcg-ldst.h"
24 #include "qemu/compiler.h"
29 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
30 * Without assertions, the interpreter runs much faster.
32 #if defined(CONFIG_DEBUG_TCG)
33 # define tci_assert(cond) assert(cond)
35 # define tci_assert(cond) ((void)(cond))
38 __thread uintptr_t tci_tb_ptr;
40 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
41 uint32_t low_index, uint64_t value)
43 regs[low_index] = (uint32_t)value;
44 regs[high_index] = value >> 32;
47 /* Create a 64 bit value from two 32 bit values. */
48 static uint64_t tci_uint64(uint32_t high, uint32_t low)
50 return ((uint64_t)high << 32) + low;
54 * Load sets of arguments all at once. The naming convention is:
55 * tci_args_<arguments>
56 * where arguments is a sequence of
58 * b = immediate (bit position)
59 * c = condition (TCGCond)
60 * i = immediate (uint32_t)
61 * I = immediate (tcg_target_ulong)
62 * l = label or pointer
63 * m = immediate (MemOpIdx)
64 * n = immediate (call return length)
66 * s = signed ldst offset
69 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
71 int diff = sextract32(insn, 12, 20);
72 *l0 = diff ? (void *)tb_ptr + diff : NULL;
75 static void tci_args_r(uint32_t insn, TCGReg *r0)
77 *r0 = extract32(insn, 8, 4);
80 static void tci_args_nl(uint32_t insn, const void *tb_ptr,
81 uint8_t *n0, void **l1)
83 *n0 = extract32(insn, 8, 4);
84 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
87 static void tci_args_rl(uint32_t insn, const void *tb_ptr,
88 TCGReg *r0, void **l1)
90 *r0 = extract32(insn, 8, 4);
91 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
94 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
96 *r0 = extract32(insn, 8, 4);
97 *r1 = extract32(insn, 12, 4);
100 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
102 *r0 = extract32(insn, 8, 4);
103 *i1 = sextract32(insn, 12, 20);
106 static void tci_args_rrm(uint32_t insn, TCGReg *r0,
107 TCGReg *r1, MemOpIdx *m2)
109 *r0 = extract32(insn, 8, 4);
110 *r1 = extract32(insn, 12, 4);
111 *m2 = extract32(insn, 20, 12);
114 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
116 *r0 = extract32(insn, 8, 4);
117 *r1 = extract32(insn, 12, 4);
118 *r2 = extract32(insn, 16, 4);
121 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
123 *r0 = extract32(insn, 8, 4);
124 *r1 = extract32(insn, 12, 4);
125 *i2 = sextract32(insn, 16, 16);
128 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
129 uint8_t *i2, uint8_t *i3)
131 *r0 = extract32(insn, 8, 4);
132 *r1 = extract32(insn, 12, 4);
133 *i2 = extract32(insn, 16, 6);
134 *i3 = extract32(insn, 22, 6);
137 static void tci_args_rrrc(uint32_t insn,
138 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
140 *r0 = extract32(insn, 8, 4);
141 *r1 = extract32(insn, 12, 4);
142 *r2 = extract32(insn, 16, 4);
143 *c3 = extract32(insn, 20, 4);
146 static void tci_args_rrrm(uint32_t insn,
147 TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3)
149 *r0 = extract32(insn, 8, 4);
150 *r1 = extract32(insn, 12, 4);
151 *r2 = extract32(insn, 16, 4);
152 *m3 = extract32(insn, 20, 12);
155 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
156 TCGReg *r2, uint8_t *i3, uint8_t *i4)
158 *r0 = extract32(insn, 8, 4);
159 *r1 = extract32(insn, 12, 4);
160 *r2 = extract32(insn, 16, 4);
161 *i3 = extract32(insn, 20, 6);
162 *i4 = extract32(insn, 26, 6);
165 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
166 TCGReg *r2, TCGReg *r3, TCGReg *r4)
168 *r0 = extract32(insn, 8, 4);
169 *r1 = extract32(insn, 12, 4);
170 *r2 = extract32(insn, 16, 4);
171 *r3 = extract32(insn, 20, 4);
172 *r4 = extract32(insn, 24, 4);
175 static void tci_args_rrrr(uint32_t insn,
176 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
178 *r0 = extract32(insn, 8, 4);
179 *r1 = extract32(insn, 12, 4);
180 *r2 = extract32(insn, 16, 4);
181 *r3 = extract32(insn, 20, 4);
184 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
185 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
187 *r0 = extract32(insn, 8, 4);
188 *r1 = extract32(insn, 12, 4);
189 *r2 = extract32(insn, 16, 4);
190 *r3 = extract32(insn, 20, 4);
191 *r4 = extract32(insn, 24, 4);
192 *c5 = extract32(insn, 28, 4);
195 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
196 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
198 *r0 = extract32(insn, 8, 4);
199 *r1 = extract32(insn, 12, 4);
200 *r2 = extract32(insn, 16, 4);
201 *r3 = extract32(insn, 20, 4);
202 *r4 = extract32(insn, 24, 4);
203 *r5 = extract32(insn, 28, 4);
206 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
243 g_assert_not_reached();
248 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
285 g_assert_not_reached();
290 static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
291 MemOpIdx oi, const void *tb_ptr)
293 MemOp mop = get_memop(oi);
294 uintptr_t ra = (uintptr_t)tb_ptr;
296 #ifdef CONFIG_SOFTMMU
297 switch (mop & (MO_BSWAP | MO_SSIZE)) {
299 return helper_ret_ldub_mmu(env, taddr, oi, ra);
301 return helper_ret_ldsb_mmu(env, taddr, oi, ra);
303 return helper_le_lduw_mmu(env, taddr, oi, ra);
305 return helper_le_ldsw_mmu(env, taddr, oi, ra);
307 return helper_le_ldul_mmu(env, taddr, oi, ra);
309 return helper_le_ldsl_mmu(env, taddr, oi, ra);
311 return helper_le_ldq_mmu(env, taddr, oi, ra);
313 return helper_be_lduw_mmu(env, taddr, oi, ra);
315 return helper_be_ldsw_mmu(env, taddr, oi, ra);
317 return helper_be_ldul_mmu(env, taddr, oi, ra);
319 return helper_be_ldsl_mmu(env, taddr, oi, ra);
321 return helper_be_ldq_mmu(env, taddr, oi, ra);
323 g_assert_not_reached();
326 void *haddr = g2h(env_cpu(env), taddr);
327 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
330 set_helper_retaddr(ra);
331 if (taddr & a_mask) {
332 helper_unaligned_ld(env, taddr);
334 switch (mop & (MO_BSWAP | MO_SSIZE)) {
342 ret = lduw_le_p(haddr);
345 ret = ldsw_le_p(haddr);
348 ret = (uint32_t)ldl_le_p(haddr);
351 ret = (int32_t)ldl_le_p(haddr);
354 ret = ldq_le_p(haddr);
357 ret = lduw_be_p(haddr);
360 ret = ldsw_be_p(haddr);
363 ret = (uint32_t)ldl_be_p(haddr);
366 ret = (int32_t)ldl_be_p(haddr);
369 ret = ldq_be_p(haddr);
372 g_assert_not_reached();
374 clear_helper_retaddr();
379 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
380 MemOpIdx oi, const void *tb_ptr)
382 MemOp mop = get_memop(oi);
383 uintptr_t ra = (uintptr_t)tb_ptr;
385 #ifdef CONFIG_SOFTMMU
386 switch (mop & (MO_BSWAP | MO_SIZE)) {
388 helper_ret_stb_mmu(env, taddr, val, oi, ra);
391 helper_le_stw_mmu(env, taddr, val, oi, ra);
394 helper_le_stl_mmu(env, taddr, val, oi, ra);
397 helper_le_stq_mmu(env, taddr, val, oi, ra);
400 helper_be_stw_mmu(env, taddr, val, oi, ra);
403 helper_be_stl_mmu(env, taddr, val, oi, ra);
406 helper_be_stq_mmu(env, taddr, val, oi, ra);
409 g_assert_not_reached();
412 void *haddr = g2h(env_cpu(env), taddr);
413 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
415 set_helper_retaddr(ra);
416 if (taddr & a_mask) {
417 helper_unaligned_st(env, taddr);
419 switch (mop & (MO_BSWAP | MO_SIZE)) {
424 stw_le_p(haddr, val);
427 stl_le_p(haddr, val);
430 stq_le_p(haddr, val);
433 stw_be_p(haddr, val);
436 stl_be_p(haddr, val);
439 stq_be_p(haddr, val);
442 g_assert_not_reached();
444 clear_helper_retaddr();
448 #if TCG_TARGET_REG_BITS == 64
449 # define CASE_32_64(x) \
450 case glue(glue(INDEX_op_, x), _i64): \
451 case glue(glue(INDEX_op_, x), _i32):
452 # define CASE_64(x) \
453 case glue(glue(INDEX_op_, x), _i64):
455 # define CASE_32_64(x) \
456 case glue(glue(INDEX_op_, x), _i32):
460 /* Interpret pseudo code in tb. */
462 * Disable CFI checks.
463 * One possible operation in the pseudo code is a call to binary code.
464 * Therefore, disable CFI checks in the interpreter function
466 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
467 const void *v_tb_ptr)
469 const uint32_t *tb_ptr = v_tb_ptr;
470 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
471 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
473 void *call_slots[TCG_STATIC_CALL_ARGS_SIZE / sizeof(uint64_t)];
475 regs[TCG_AREG0] = (tcg_target_ulong)env;
476 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
477 /* Other call_slots entries initialized at first use (see below). */
478 call_slots[0] = NULL;
484 TCGReg r0, r1, r2, r3, r4, r5;
497 opc = extract32(insn, 0, 8);
502 * Set up the ffi_avalue array once, delayed until now
503 * because many TB's do not make any calls. In tcg_gen_callN,
504 * we arranged for every real argument to be "left-aligned"
505 * in each 64-bit slot.
507 if (unlikely(call_slots[0] == NULL)) {
508 for (int i = 0; i < ARRAY_SIZE(call_slots); ++i) {
509 call_slots[i] = &stack[i];
513 tci_args_nl(insn, tb_ptr, &len, &ptr);
515 /* Helper functions may need to access the "return address" */
516 tci_tb_ptr = (uintptr_t)tb_ptr;
520 ffi_call(pptr[1], pptr[0], stack, call_slots);
523 /* Any result winds up "left-aligned" in the stack[0] slot. */
527 case 1: /* uint32_t */
529 * Note that libffi has an odd special case in that it will
530 * always widen an integral result to ffi_arg.
532 if (sizeof(ffi_arg) == 4) {
533 regs[TCG_REG_R0] = *(uint32_t *)stack;
537 case 2: /* uint64_t */
538 if (TCG_TARGET_REG_BITS == 32) {
539 tci_write_reg64(regs, TCG_REG_R1, TCG_REG_R0, stack[0]);
541 regs[TCG_REG_R0] = stack[0];
545 g_assert_not_reached();
550 tci_args_l(insn, tb_ptr, &ptr);
553 case INDEX_op_setcond_i32:
554 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
555 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
557 case INDEX_op_movcond_i32:
558 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
559 tmp32 = tci_compare32(regs[r1], regs[r2], condition);
560 regs[r0] = regs[tmp32 ? r3 : r4];
562 #if TCG_TARGET_REG_BITS == 32
563 case INDEX_op_setcond2_i32:
564 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
565 T1 = tci_uint64(regs[r2], regs[r1]);
566 T2 = tci_uint64(regs[r4], regs[r3]);
567 regs[r0] = tci_compare64(T1, T2, condition);
569 #elif TCG_TARGET_REG_BITS == 64
570 case INDEX_op_setcond_i64:
571 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
572 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
574 case INDEX_op_movcond_i64:
575 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
576 tmp32 = tci_compare64(regs[r1], regs[r2], condition);
577 regs[r0] = regs[tmp32 ? r3 : r4];
581 tci_args_rr(insn, &r0, &r1);
584 case INDEX_op_tci_movi:
585 tci_args_ri(insn, &r0, &t1);
588 case INDEX_op_tci_movl:
589 tci_args_rl(insn, tb_ptr, &r0, &ptr);
590 regs[r0] = *(tcg_target_ulong *)ptr;
593 /* Load/store operations (32 bit). */
596 tci_args_rrs(insn, &r0, &r1, &ofs);
597 ptr = (void *)(regs[r1] + ofs);
598 regs[r0] = *(uint8_t *)ptr;
601 tci_args_rrs(insn, &r0, &r1, &ofs);
602 ptr = (void *)(regs[r1] + ofs);
603 regs[r0] = *(int8_t *)ptr;
606 tci_args_rrs(insn, &r0, &r1, &ofs);
607 ptr = (void *)(regs[r1] + ofs);
608 regs[r0] = *(uint16_t *)ptr;
611 tci_args_rrs(insn, &r0, &r1, &ofs);
612 ptr = (void *)(regs[r1] + ofs);
613 regs[r0] = *(int16_t *)ptr;
615 case INDEX_op_ld_i32:
617 tci_args_rrs(insn, &r0, &r1, &ofs);
618 ptr = (void *)(regs[r1] + ofs);
619 regs[r0] = *(uint32_t *)ptr;
622 tci_args_rrs(insn, &r0, &r1, &ofs);
623 ptr = (void *)(regs[r1] + ofs);
624 *(uint8_t *)ptr = regs[r0];
627 tci_args_rrs(insn, &r0, &r1, &ofs);
628 ptr = (void *)(regs[r1] + ofs);
629 *(uint16_t *)ptr = regs[r0];
631 case INDEX_op_st_i32:
633 tci_args_rrs(insn, &r0, &r1, &ofs);
634 ptr = (void *)(regs[r1] + ofs);
635 *(uint32_t *)ptr = regs[r0];
638 /* Arithmetic operations (mixed 32/64 bit). */
641 tci_args_rrr(insn, &r0, &r1, &r2);
642 regs[r0] = regs[r1] + regs[r2];
645 tci_args_rrr(insn, &r0, &r1, &r2);
646 regs[r0] = regs[r1] - regs[r2];
649 tci_args_rrr(insn, &r0, &r1, &r2);
650 regs[r0] = regs[r1] * regs[r2];
653 tci_args_rrr(insn, &r0, &r1, &r2);
654 regs[r0] = regs[r1] & regs[r2];
657 tci_args_rrr(insn, &r0, &r1, &r2);
658 regs[r0] = regs[r1] | regs[r2];
661 tci_args_rrr(insn, &r0, &r1, &r2);
662 regs[r0] = regs[r1] ^ regs[r2];
664 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
666 tci_args_rrr(insn, &r0, &r1, &r2);
667 regs[r0] = regs[r1] & ~regs[r2];
670 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
672 tci_args_rrr(insn, &r0, &r1, &r2);
673 regs[r0] = regs[r1] | ~regs[r2];
676 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
678 tci_args_rrr(insn, &r0, &r1, &r2);
679 regs[r0] = ~(regs[r1] ^ regs[r2]);
682 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
684 tci_args_rrr(insn, &r0, &r1, &r2);
685 regs[r0] = ~(regs[r1] & regs[r2]);
688 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
690 tci_args_rrr(insn, &r0, &r1, &r2);
691 regs[r0] = ~(regs[r1] | regs[r2]);
695 /* Arithmetic operations (32 bit). */
697 case INDEX_op_div_i32:
698 tci_args_rrr(insn, &r0, &r1, &r2);
699 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
701 case INDEX_op_divu_i32:
702 tci_args_rrr(insn, &r0, &r1, &r2);
703 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
705 case INDEX_op_rem_i32:
706 tci_args_rrr(insn, &r0, &r1, &r2);
707 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
709 case INDEX_op_remu_i32:
710 tci_args_rrr(insn, &r0, &r1, &r2);
711 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
713 #if TCG_TARGET_HAS_clz_i32
714 case INDEX_op_clz_i32:
715 tci_args_rrr(insn, &r0, &r1, &r2);
717 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
720 #if TCG_TARGET_HAS_ctz_i32
721 case INDEX_op_ctz_i32:
722 tci_args_rrr(insn, &r0, &r1, &r2);
724 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
727 #if TCG_TARGET_HAS_ctpop_i32
728 case INDEX_op_ctpop_i32:
729 tci_args_rr(insn, &r0, &r1);
730 regs[r0] = ctpop32(regs[r1]);
734 /* Shift/rotate operations (32 bit). */
736 case INDEX_op_shl_i32:
737 tci_args_rrr(insn, &r0, &r1, &r2);
738 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
740 case INDEX_op_shr_i32:
741 tci_args_rrr(insn, &r0, &r1, &r2);
742 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
744 case INDEX_op_sar_i32:
745 tci_args_rrr(insn, &r0, &r1, &r2);
746 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
748 #if TCG_TARGET_HAS_rot_i32
749 case INDEX_op_rotl_i32:
750 tci_args_rrr(insn, &r0, &r1, &r2);
751 regs[r0] = rol32(regs[r1], regs[r2] & 31);
753 case INDEX_op_rotr_i32:
754 tci_args_rrr(insn, &r0, &r1, &r2);
755 regs[r0] = ror32(regs[r1], regs[r2] & 31);
758 #if TCG_TARGET_HAS_deposit_i32
759 case INDEX_op_deposit_i32:
760 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
761 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
764 #if TCG_TARGET_HAS_extract_i32
765 case INDEX_op_extract_i32:
766 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
767 regs[r0] = extract32(regs[r1], pos, len);
770 #if TCG_TARGET_HAS_sextract_i32
771 case INDEX_op_sextract_i32:
772 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
773 regs[r0] = sextract32(regs[r1], pos, len);
776 case INDEX_op_brcond_i32:
777 tci_args_rl(insn, tb_ptr, &r0, &ptr);
778 if ((uint32_t)regs[r0]) {
782 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
783 case INDEX_op_add2_i32:
784 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
785 T1 = tci_uint64(regs[r3], regs[r2]);
786 T2 = tci_uint64(regs[r5], regs[r4]);
787 tci_write_reg64(regs, r1, r0, T1 + T2);
790 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
791 case INDEX_op_sub2_i32:
792 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
793 T1 = tci_uint64(regs[r3], regs[r2]);
794 T2 = tci_uint64(regs[r5], regs[r4]);
795 tci_write_reg64(regs, r1, r0, T1 - T2);
798 #if TCG_TARGET_HAS_mulu2_i32
799 case INDEX_op_mulu2_i32:
800 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
801 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
802 tci_write_reg64(regs, r1, r0, tmp64);
805 #if TCG_TARGET_HAS_muls2_i32
806 case INDEX_op_muls2_i32:
807 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
808 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
809 tci_write_reg64(regs, r1, r0, tmp64);
812 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
814 tci_args_rr(insn, &r0, &r1);
815 regs[r0] = (int8_t)regs[r1];
818 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
819 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
821 tci_args_rr(insn, &r0, &r1);
822 regs[r0] = (int16_t)regs[r1];
825 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
827 tci_args_rr(insn, &r0, &r1);
828 regs[r0] = (uint8_t)regs[r1];
831 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
833 tci_args_rr(insn, &r0, &r1);
834 regs[r0] = (uint16_t)regs[r1];
837 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
839 tci_args_rr(insn, &r0, &r1);
840 regs[r0] = bswap16(regs[r1]);
843 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
845 tci_args_rr(insn, &r0, &r1);
846 regs[r0] = bswap32(regs[r1]);
849 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
851 tci_args_rr(insn, &r0, &r1);
852 regs[r0] = ~regs[r1];
855 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
857 tci_args_rr(insn, &r0, &r1);
858 regs[r0] = -regs[r1];
861 #if TCG_TARGET_REG_BITS == 64
862 /* Load/store operations (64 bit). */
864 case INDEX_op_ld32s_i64:
865 tci_args_rrs(insn, &r0, &r1, &ofs);
866 ptr = (void *)(regs[r1] + ofs);
867 regs[r0] = *(int32_t *)ptr;
869 case INDEX_op_ld_i64:
870 tci_args_rrs(insn, &r0, &r1, &ofs);
871 ptr = (void *)(regs[r1] + ofs);
872 regs[r0] = *(uint64_t *)ptr;
874 case INDEX_op_st_i64:
875 tci_args_rrs(insn, &r0, &r1, &ofs);
876 ptr = (void *)(regs[r1] + ofs);
877 *(uint64_t *)ptr = regs[r0];
880 /* Arithmetic operations (64 bit). */
882 case INDEX_op_div_i64:
883 tci_args_rrr(insn, &r0, &r1, &r2);
884 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
886 case INDEX_op_divu_i64:
887 tci_args_rrr(insn, &r0, &r1, &r2);
888 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
890 case INDEX_op_rem_i64:
891 tci_args_rrr(insn, &r0, &r1, &r2);
892 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
894 case INDEX_op_remu_i64:
895 tci_args_rrr(insn, &r0, &r1, &r2);
896 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
898 #if TCG_TARGET_HAS_clz_i64
899 case INDEX_op_clz_i64:
900 tci_args_rrr(insn, &r0, &r1, &r2);
901 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
904 #if TCG_TARGET_HAS_ctz_i64
905 case INDEX_op_ctz_i64:
906 tci_args_rrr(insn, &r0, &r1, &r2);
907 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
910 #if TCG_TARGET_HAS_ctpop_i64
911 case INDEX_op_ctpop_i64:
912 tci_args_rr(insn, &r0, &r1);
913 regs[r0] = ctpop64(regs[r1]);
916 #if TCG_TARGET_HAS_mulu2_i64
917 case INDEX_op_mulu2_i64:
918 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
919 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]);
922 #if TCG_TARGET_HAS_muls2_i64
923 case INDEX_op_muls2_i64:
924 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
925 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]);
928 #if TCG_TARGET_HAS_add2_i64
929 case INDEX_op_add2_i64:
930 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
931 T1 = regs[r2] + regs[r4];
932 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
937 #if TCG_TARGET_HAS_add2_i64
938 case INDEX_op_sub2_i64:
939 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
940 T1 = regs[r2] - regs[r4];
941 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
947 /* Shift/rotate operations (64 bit). */
949 case INDEX_op_shl_i64:
950 tci_args_rrr(insn, &r0, &r1, &r2);
951 regs[r0] = regs[r1] << (regs[r2] & 63);
953 case INDEX_op_shr_i64:
954 tci_args_rrr(insn, &r0, &r1, &r2);
955 regs[r0] = regs[r1] >> (regs[r2] & 63);
957 case INDEX_op_sar_i64:
958 tci_args_rrr(insn, &r0, &r1, &r2);
959 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
961 #if TCG_TARGET_HAS_rot_i64
962 case INDEX_op_rotl_i64:
963 tci_args_rrr(insn, &r0, &r1, &r2);
964 regs[r0] = rol64(regs[r1], regs[r2] & 63);
966 case INDEX_op_rotr_i64:
967 tci_args_rrr(insn, &r0, &r1, &r2);
968 regs[r0] = ror64(regs[r1], regs[r2] & 63);
971 #if TCG_TARGET_HAS_deposit_i64
972 case INDEX_op_deposit_i64:
973 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
974 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
977 #if TCG_TARGET_HAS_extract_i64
978 case INDEX_op_extract_i64:
979 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
980 regs[r0] = extract64(regs[r1], pos, len);
983 #if TCG_TARGET_HAS_sextract_i64
984 case INDEX_op_sextract_i64:
985 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
986 regs[r0] = sextract64(regs[r1], pos, len);
989 case INDEX_op_brcond_i64:
990 tci_args_rl(insn, tb_ptr, &r0, &ptr);
995 case INDEX_op_ext32s_i64:
996 case INDEX_op_ext_i32_i64:
997 tci_args_rr(insn, &r0, &r1);
998 regs[r0] = (int32_t)regs[r1];
1000 case INDEX_op_ext32u_i64:
1001 case INDEX_op_extu_i32_i64:
1002 tci_args_rr(insn, &r0, &r1);
1003 regs[r0] = (uint32_t)regs[r1];
1005 #if TCG_TARGET_HAS_bswap64_i64
1006 case INDEX_op_bswap64_i64:
1007 tci_args_rr(insn, &r0, &r1);
1008 regs[r0] = bswap64(regs[r1]);
1011 #endif /* TCG_TARGET_REG_BITS == 64 */
1013 /* QEMU specific operations. */
1015 case INDEX_op_exit_tb:
1016 tci_args_l(insn, tb_ptr, &ptr);
1017 return (uintptr_t)ptr;
1019 case INDEX_op_goto_tb:
1020 tci_args_l(insn, tb_ptr, &ptr);
1021 tb_ptr = *(void **)ptr;
1024 case INDEX_op_goto_ptr:
1025 tci_args_r(insn, &r0);
1026 ptr = (void *)regs[r0];
1033 case INDEX_op_qemu_ld_i32:
1034 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1035 tci_args_rrm(insn, &r0, &r1, &oi);
1038 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1039 taddr = tci_uint64(regs[r2], regs[r1]);
1041 tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr);
1045 case INDEX_op_qemu_ld_i64:
1046 if (TCG_TARGET_REG_BITS == 64) {
1047 tci_args_rrm(insn, &r0, &r1, &oi);
1049 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1050 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1053 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1054 taddr = tci_uint64(regs[r3], regs[r2]);
1057 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
1058 if (TCG_TARGET_REG_BITS == 32) {
1059 tci_write_reg64(regs, r1, r0, tmp64);
1065 case INDEX_op_qemu_st_i32:
1066 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1067 tci_args_rrm(insn, &r0, &r1, &oi);
1070 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1071 taddr = tci_uint64(regs[r2], regs[r1]);
1074 tci_qemu_st(env, taddr, tmp32, oi, tb_ptr);
1077 case INDEX_op_qemu_st_i64:
1078 if (TCG_TARGET_REG_BITS == 64) {
1079 tci_args_rrm(insn, &r0, &r1, &oi);
1083 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1084 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1087 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1088 taddr = tci_uint64(regs[r3], regs[r2]);
1091 tmp64 = tci_uint64(regs[r1], regs[r0]);
1093 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
1097 /* Ensure ordering for all kinds */
1101 g_assert_not_reached();
1107 * Disassembler that matches the interpreter
1110 static const char *str_r(TCGReg r)
1112 static const char regs[TCG_TARGET_NB_REGS][4] = {
1113 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1114 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
1117 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
1118 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
1120 assert((unsigned)r < TCG_TARGET_NB_REGS);
1124 static const char *str_c(TCGCond c)
1126 static const char cond[16][8] = {
1127 [TCG_COND_NEVER] = "never",
1128 [TCG_COND_ALWAYS] = "always",
1129 [TCG_COND_EQ] = "eq",
1130 [TCG_COND_NE] = "ne",
1131 [TCG_COND_LT] = "lt",
1132 [TCG_COND_GE] = "ge",
1133 [TCG_COND_LE] = "le",
1134 [TCG_COND_GT] = "gt",
1135 [TCG_COND_LTU] = "ltu",
1136 [TCG_COND_GEU] = "geu",
1137 [TCG_COND_LEU] = "leu",
1138 [TCG_COND_GTU] = "gtu",
1141 assert((unsigned)c < ARRAY_SIZE(cond));
1142 assert(cond[c][0] != 0);
1146 /* Disassemble TCI bytecode. */
1147 int print_insn_tci(bfd_vma addr, disassemble_info *info)
1149 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
1150 const TCGOpDef *def;
1151 const char *op_name;
1154 TCGReg r0, r1, r2, r3, r4, r5;
1155 tcg_target_ulong i1;
1162 /* TCI is always the host, so we don't need to load indirect. */
1165 info->fprintf_func(info->stream, "%08x ", insn);
1167 op = extract32(insn, 0, 8);
1168 def = &tcg_op_defs[op];
1169 op_name = def->name;
1173 case INDEX_op_exit_tb:
1174 case INDEX_op_goto_tb:
1175 tci_args_l(insn, tb_ptr, &ptr);
1176 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
1179 case INDEX_op_goto_ptr:
1180 tci_args_r(insn, &r0);
1181 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
1185 tci_args_nl(insn, tb_ptr, &len, &ptr);
1186 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
1189 case INDEX_op_brcond_i32:
1190 case INDEX_op_brcond_i64:
1191 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1192 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
1193 op_name, str_r(r0), ptr);
1196 case INDEX_op_setcond_i32:
1197 case INDEX_op_setcond_i64:
1198 tci_args_rrrc(insn, &r0, &r1, &r2, &c);
1199 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1200 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
1203 case INDEX_op_tci_movi:
1204 tci_args_ri(insn, &r0, &i1);
1205 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
1206 op_name, str_r(r0), i1);
1209 case INDEX_op_tci_movl:
1210 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1211 info->fprintf_func(info->stream, "%-12s %s, %p",
1212 op_name, str_r(r0), ptr);
1215 case INDEX_op_ld8u_i32:
1216 case INDEX_op_ld8u_i64:
1217 case INDEX_op_ld8s_i32:
1218 case INDEX_op_ld8s_i64:
1219 case INDEX_op_ld16u_i32:
1220 case INDEX_op_ld16u_i64:
1221 case INDEX_op_ld16s_i32:
1222 case INDEX_op_ld16s_i64:
1223 case INDEX_op_ld32u_i64:
1224 case INDEX_op_ld32s_i64:
1225 case INDEX_op_ld_i32:
1226 case INDEX_op_ld_i64:
1227 case INDEX_op_st8_i32:
1228 case INDEX_op_st8_i64:
1229 case INDEX_op_st16_i32:
1230 case INDEX_op_st16_i64:
1231 case INDEX_op_st32_i64:
1232 case INDEX_op_st_i32:
1233 case INDEX_op_st_i64:
1234 tci_args_rrs(insn, &r0, &r1, &s2);
1235 info->fprintf_func(info->stream, "%-12s %s, %s, %d",
1236 op_name, str_r(r0), str_r(r1), s2);
1239 case INDEX_op_mov_i32:
1240 case INDEX_op_mov_i64:
1241 case INDEX_op_ext8s_i32:
1242 case INDEX_op_ext8s_i64:
1243 case INDEX_op_ext8u_i32:
1244 case INDEX_op_ext8u_i64:
1245 case INDEX_op_ext16s_i32:
1246 case INDEX_op_ext16s_i64:
1247 case INDEX_op_ext16u_i32:
1248 case INDEX_op_ext32s_i64:
1249 case INDEX_op_ext32u_i64:
1250 case INDEX_op_ext_i32_i64:
1251 case INDEX_op_extu_i32_i64:
1252 case INDEX_op_bswap16_i32:
1253 case INDEX_op_bswap16_i64:
1254 case INDEX_op_bswap32_i32:
1255 case INDEX_op_bswap32_i64:
1256 case INDEX_op_bswap64_i64:
1257 case INDEX_op_not_i32:
1258 case INDEX_op_not_i64:
1259 case INDEX_op_neg_i32:
1260 case INDEX_op_neg_i64:
1261 case INDEX_op_ctpop_i32:
1262 case INDEX_op_ctpop_i64:
1263 tci_args_rr(insn, &r0, &r1);
1264 info->fprintf_func(info->stream, "%-12s %s, %s",
1265 op_name, str_r(r0), str_r(r1));
1268 case INDEX_op_add_i32:
1269 case INDEX_op_add_i64:
1270 case INDEX_op_sub_i32:
1271 case INDEX_op_sub_i64:
1272 case INDEX_op_mul_i32:
1273 case INDEX_op_mul_i64:
1274 case INDEX_op_and_i32:
1275 case INDEX_op_and_i64:
1276 case INDEX_op_or_i32:
1277 case INDEX_op_or_i64:
1278 case INDEX_op_xor_i32:
1279 case INDEX_op_xor_i64:
1280 case INDEX_op_andc_i32:
1281 case INDEX_op_andc_i64:
1282 case INDEX_op_orc_i32:
1283 case INDEX_op_orc_i64:
1284 case INDEX_op_eqv_i32:
1285 case INDEX_op_eqv_i64:
1286 case INDEX_op_nand_i32:
1287 case INDEX_op_nand_i64:
1288 case INDEX_op_nor_i32:
1289 case INDEX_op_nor_i64:
1290 case INDEX_op_div_i32:
1291 case INDEX_op_div_i64:
1292 case INDEX_op_rem_i32:
1293 case INDEX_op_rem_i64:
1294 case INDEX_op_divu_i32:
1295 case INDEX_op_divu_i64:
1296 case INDEX_op_remu_i32:
1297 case INDEX_op_remu_i64:
1298 case INDEX_op_shl_i32:
1299 case INDEX_op_shl_i64:
1300 case INDEX_op_shr_i32:
1301 case INDEX_op_shr_i64:
1302 case INDEX_op_sar_i32:
1303 case INDEX_op_sar_i64:
1304 case INDEX_op_rotl_i32:
1305 case INDEX_op_rotl_i64:
1306 case INDEX_op_rotr_i32:
1307 case INDEX_op_rotr_i64:
1308 case INDEX_op_clz_i32:
1309 case INDEX_op_clz_i64:
1310 case INDEX_op_ctz_i32:
1311 case INDEX_op_ctz_i64:
1312 tci_args_rrr(insn, &r0, &r1, &r2);
1313 info->fprintf_func(info->stream, "%-12s %s, %s, %s",
1314 op_name, str_r(r0), str_r(r1), str_r(r2));
1317 case INDEX_op_deposit_i32:
1318 case INDEX_op_deposit_i64:
1319 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
1320 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
1321 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
1324 case INDEX_op_extract_i32:
1325 case INDEX_op_extract_i64:
1326 case INDEX_op_sextract_i32:
1327 case INDEX_op_sextract_i64:
1328 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
1329 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
1330 op_name, str_r(r0), str_r(r1), pos, len);
1333 case INDEX_op_movcond_i32:
1334 case INDEX_op_movcond_i64:
1335 case INDEX_op_setcond2_i32:
1336 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
1337 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1338 op_name, str_r(r0), str_r(r1), str_r(r2),
1339 str_r(r3), str_r(r4), str_c(c));
1342 case INDEX_op_mulu2_i32:
1343 case INDEX_op_mulu2_i64:
1344 case INDEX_op_muls2_i32:
1345 case INDEX_op_muls2_i64:
1346 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1347 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1348 op_name, str_r(r0), str_r(r1),
1349 str_r(r2), str_r(r3));
1352 case INDEX_op_add2_i32:
1353 case INDEX_op_add2_i64:
1354 case INDEX_op_sub2_i32:
1355 case INDEX_op_sub2_i64:
1356 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
1357 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1358 op_name, str_r(r0), str_r(r1), str_r(r2),
1359 str_r(r3), str_r(r4), str_r(r5));
1362 case INDEX_op_qemu_ld_i64:
1363 case INDEX_op_qemu_st_i64:
1364 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1366 case INDEX_op_qemu_ld_i32:
1367 case INDEX_op_qemu_st_i32:
1370 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS);
1373 tci_args_rrm(insn, &r0, &r1, &oi);
1374 info->fprintf_func(info->stream, "%-12s %s, %s, %x",
1375 op_name, str_r(r0), str_r(r1), oi);
1378 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1379 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x",
1380 op_name, str_r(r0), str_r(r1), str_r(r2), oi);
1383 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1384 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
1385 op_name, str_r(r0), str_r(r1),
1386 str_r(r2), str_r(r3), str_r(r4));
1389 g_assert_not_reached();
1394 /* tcg_out_nop_fill uses zeros */
1396 info->fprintf_func(info->stream, "align");
1402 info->fprintf_func(info->stream, "illegal opcode %d", op);
1406 return sizeof(insn);