2 * Tiny Code Interpreter for QEMU
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "exec/cpu_ldst.h"
22 #include "tcg/tcg-op.h"
23 #include "tcg/tcg-ldst.h"
28 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
29 * Without assertions, the interpreter runs much faster.
31 #if defined(CONFIG_DEBUG_TCG)
32 # define tci_assert(cond) assert(cond)
34 # define tci_assert(cond) ((void)(cond))
37 __thread uintptr_t tci_tb_ptr;
39 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
40 uint32_t low_index, uint64_t value)
42 regs[low_index] = (uint32_t)value;
43 regs[high_index] = value >> 32;
46 /* Create a 64 bit value from two 32 bit values. */
47 static uint64_t tci_uint64(uint32_t high, uint32_t low)
49 return ((uint64_t)high << 32) + low;
53 * Load sets of arguments all at once. The naming convention is:
54 * tci_args_<arguments>
55 * where arguments is a sequence of
57 * b = immediate (bit position)
58 * c = condition (TCGCond)
59 * i = immediate (uint32_t)
60 * I = immediate (tcg_target_ulong)
61 * l = label or pointer
62 * m = immediate (MemOpIdx)
63 * n = immediate (call return length)
65 * s = signed ldst offset
68 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
70 int diff = sextract32(insn, 12, 20);
71 *l0 = diff ? (void *)tb_ptr + diff : NULL;
74 static void tci_args_r(uint32_t insn, TCGReg *r0)
76 *r0 = extract32(insn, 8, 4);
79 static void tci_args_nl(uint32_t insn, const void *tb_ptr,
80 uint8_t *n0, void **l1)
82 *n0 = extract32(insn, 8, 4);
83 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
86 static void tci_args_rl(uint32_t insn, const void *tb_ptr,
87 TCGReg *r0, void **l1)
89 *r0 = extract32(insn, 8, 4);
90 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
93 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
95 *r0 = extract32(insn, 8, 4);
96 *r1 = extract32(insn, 12, 4);
99 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
101 *r0 = extract32(insn, 8, 4);
102 *i1 = sextract32(insn, 12, 20);
105 static void tci_args_rrm(uint32_t insn, TCGReg *r0,
106 TCGReg *r1, MemOpIdx *m2)
108 *r0 = extract32(insn, 8, 4);
109 *r1 = extract32(insn, 12, 4);
110 *m2 = extract32(insn, 20, 12);
113 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
115 *r0 = extract32(insn, 8, 4);
116 *r1 = extract32(insn, 12, 4);
117 *r2 = extract32(insn, 16, 4);
120 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
122 *r0 = extract32(insn, 8, 4);
123 *r1 = extract32(insn, 12, 4);
124 *i2 = sextract32(insn, 16, 16);
127 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
128 uint8_t *i2, uint8_t *i3)
130 *r0 = extract32(insn, 8, 4);
131 *r1 = extract32(insn, 12, 4);
132 *i2 = extract32(insn, 16, 6);
133 *i3 = extract32(insn, 22, 6);
136 static void tci_args_rrrc(uint32_t insn,
137 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
139 *r0 = extract32(insn, 8, 4);
140 *r1 = extract32(insn, 12, 4);
141 *r2 = extract32(insn, 16, 4);
142 *c3 = extract32(insn, 20, 4);
145 static void tci_args_rrrm(uint32_t insn,
146 TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3)
148 *r0 = extract32(insn, 8, 4);
149 *r1 = extract32(insn, 12, 4);
150 *r2 = extract32(insn, 16, 4);
151 *m3 = extract32(insn, 20, 12);
154 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
155 TCGReg *r2, uint8_t *i3, uint8_t *i4)
157 *r0 = extract32(insn, 8, 4);
158 *r1 = extract32(insn, 12, 4);
159 *r2 = extract32(insn, 16, 4);
160 *i3 = extract32(insn, 20, 6);
161 *i4 = extract32(insn, 26, 6);
164 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
165 TCGReg *r2, TCGReg *r3, TCGReg *r4)
167 *r0 = extract32(insn, 8, 4);
168 *r1 = extract32(insn, 12, 4);
169 *r2 = extract32(insn, 16, 4);
170 *r3 = extract32(insn, 20, 4);
171 *r4 = extract32(insn, 24, 4);
174 static void tci_args_rrrr(uint32_t insn,
175 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
177 *r0 = extract32(insn, 8, 4);
178 *r1 = extract32(insn, 12, 4);
179 *r2 = extract32(insn, 16, 4);
180 *r3 = extract32(insn, 20, 4);
183 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
184 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
186 *r0 = extract32(insn, 8, 4);
187 *r1 = extract32(insn, 12, 4);
188 *r2 = extract32(insn, 16, 4);
189 *r3 = extract32(insn, 20, 4);
190 *r4 = extract32(insn, 24, 4);
191 *c5 = extract32(insn, 28, 4);
194 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
195 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
197 *r0 = extract32(insn, 8, 4);
198 *r1 = extract32(insn, 12, 4);
199 *r2 = extract32(insn, 16, 4);
200 *r3 = extract32(insn, 20, 4);
201 *r4 = extract32(insn, 24, 4);
202 *r5 = extract32(insn, 28, 4);
205 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
242 g_assert_not_reached();
247 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
284 g_assert_not_reached();
289 static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
290 MemOpIdx oi, const void *tb_ptr)
292 MemOp mop = get_memop(oi);
293 uintptr_t ra = (uintptr_t)tb_ptr;
295 switch (mop & MO_SSIZE) {
297 return helper_ldub_mmu(env, taddr, oi, ra);
299 return helper_ldsb_mmu(env, taddr, oi, ra);
301 return helper_lduw_mmu(env, taddr, oi, ra);
303 return helper_ldsw_mmu(env, taddr, oi, ra);
305 return helper_ldul_mmu(env, taddr, oi, ra);
307 return helper_ldsl_mmu(env, taddr, oi, ra);
309 return helper_ldq_mmu(env, taddr, oi, ra);
311 g_assert_not_reached();
315 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
316 MemOpIdx oi, const void *tb_ptr)
318 MemOp mop = get_memop(oi);
319 uintptr_t ra = (uintptr_t)tb_ptr;
321 switch (mop & MO_SIZE) {
323 helper_stb_mmu(env, taddr, val, oi, ra);
326 helper_stw_mmu(env, taddr, val, oi, ra);
329 helper_stl_mmu(env, taddr, val, oi, ra);
332 helper_stq_mmu(env, taddr, val, oi, ra);
335 g_assert_not_reached();
339 #if TCG_TARGET_REG_BITS == 64
340 # define CASE_32_64(x) \
341 case glue(glue(INDEX_op_, x), _i64): \
342 case glue(glue(INDEX_op_, x), _i32):
343 # define CASE_64(x) \
344 case glue(glue(INDEX_op_, x), _i64):
346 # define CASE_32_64(x) \
347 case glue(glue(INDEX_op_, x), _i32):
351 /* Interpret pseudo code in tb. */
353 * Disable CFI checks.
354 * One possible operation in the pseudo code is a call to binary code.
355 * Therefore, disable CFI checks in the interpreter function
357 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
358 const void *v_tb_ptr)
360 const uint32_t *tb_ptr = v_tb_ptr;
361 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
362 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
365 regs[TCG_AREG0] = (tcg_target_ulong)env;
366 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
372 TCGReg r0, r1, r2, r3, r4, r5;
385 opc = extract32(insn, 0, 8);
390 void *call_slots[MAX_CALL_IARGS];
395 tci_args_nl(insn, tb_ptr, &len, &ptr);
396 func = ((void **)ptr)[0];
397 cif = ((void **)ptr)[1];
400 for (i = s = 0; i < n; ++i) {
401 ffi_type *t = cif->arg_types[i];
402 call_slots[i] = &stack[s];
403 s += DIV_ROUND_UP(t->size, 8);
406 /* Helper functions may need to access the "return address" */
407 tci_tb_ptr = (uintptr_t)tb_ptr;
408 ffi_call(cif, func, stack, call_slots);
414 case 1: /* uint32_t */
416 * The result winds up "left-aligned" in the stack[0] slot.
417 * Note that libffi has an odd special case in that it will
418 * always widen an integral result to ffi_arg.
420 if (sizeof(ffi_arg) == 8) {
421 regs[TCG_REG_R0] = (uint32_t)stack[0];
423 regs[TCG_REG_R0] = *(uint32_t *)stack;
426 case 2: /* uint64_t */
428 * For TCG_TARGET_REG_BITS == 32, the register pair
429 * must stay in host memory order.
431 memcpy(®s[TCG_REG_R0], stack, 8);
434 memcpy(®s[TCG_REG_R0], stack, 16);
437 g_assert_not_reached();
442 tci_args_l(insn, tb_ptr, &ptr);
445 case INDEX_op_setcond_i32:
446 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
447 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
449 case INDEX_op_movcond_i32:
450 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
451 tmp32 = tci_compare32(regs[r1], regs[r2], condition);
452 regs[r0] = regs[tmp32 ? r3 : r4];
454 #if TCG_TARGET_REG_BITS == 32
455 case INDEX_op_setcond2_i32:
456 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
457 T1 = tci_uint64(regs[r2], regs[r1]);
458 T2 = tci_uint64(regs[r4], regs[r3]);
459 regs[r0] = tci_compare64(T1, T2, condition);
461 #elif TCG_TARGET_REG_BITS == 64
462 case INDEX_op_setcond_i64:
463 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
464 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
466 case INDEX_op_movcond_i64:
467 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
468 tmp32 = tci_compare64(regs[r1], regs[r2], condition);
469 regs[r0] = regs[tmp32 ? r3 : r4];
473 tci_args_rr(insn, &r0, &r1);
476 case INDEX_op_tci_movi:
477 tci_args_ri(insn, &r0, &t1);
480 case INDEX_op_tci_movl:
481 tci_args_rl(insn, tb_ptr, &r0, &ptr);
482 regs[r0] = *(tcg_target_ulong *)ptr;
485 /* Load/store operations (32 bit). */
488 tci_args_rrs(insn, &r0, &r1, &ofs);
489 ptr = (void *)(regs[r1] + ofs);
490 regs[r0] = *(uint8_t *)ptr;
493 tci_args_rrs(insn, &r0, &r1, &ofs);
494 ptr = (void *)(regs[r1] + ofs);
495 regs[r0] = *(int8_t *)ptr;
498 tci_args_rrs(insn, &r0, &r1, &ofs);
499 ptr = (void *)(regs[r1] + ofs);
500 regs[r0] = *(uint16_t *)ptr;
503 tci_args_rrs(insn, &r0, &r1, &ofs);
504 ptr = (void *)(regs[r1] + ofs);
505 regs[r0] = *(int16_t *)ptr;
507 case INDEX_op_ld_i32:
509 tci_args_rrs(insn, &r0, &r1, &ofs);
510 ptr = (void *)(regs[r1] + ofs);
511 regs[r0] = *(uint32_t *)ptr;
514 tci_args_rrs(insn, &r0, &r1, &ofs);
515 ptr = (void *)(regs[r1] + ofs);
516 *(uint8_t *)ptr = regs[r0];
519 tci_args_rrs(insn, &r0, &r1, &ofs);
520 ptr = (void *)(regs[r1] + ofs);
521 *(uint16_t *)ptr = regs[r0];
523 case INDEX_op_st_i32:
525 tci_args_rrs(insn, &r0, &r1, &ofs);
526 ptr = (void *)(regs[r1] + ofs);
527 *(uint32_t *)ptr = regs[r0];
530 /* Arithmetic operations (mixed 32/64 bit). */
533 tci_args_rrr(insn, &r0, &r1, &r2);
534 regs[r0] = regs[r1] + regs[r2];
537 tci_args_rrr(insn, &r0, &r1, &r2);
538 regs[r0] = regs[r1] - regs[r2];
541 tci_args_rrr(insn, &r0, &r1, &r2);
542 regs[r0] = regs[r1] * regs[r2];
545 tci_args_rrr(insn, &r0, &r1, &r2);
546 regs[r0] = regs[r1] & regs[r2];
549 tci_args_rrr(insn, &r0, &r1, &r2);
550 regs[r0] = regs[r1] | regs[r2];
553 tci_args_rrr(insn, &r0, &r1, &r2);
554 regs[r0] = regs[r1] ^ regs[r2];
556 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
558 tci_args_rrr(insn, &r0, &r1, &r2);
559 regs[r0] = regs[r1] & ~regs[r2];
562 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
564 tci_args_rrr(insn, &r0, &r1, &r2);
565 regs[r0] = regs[r1] | ~regs[r2];
568 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
570 tci_args_rrr(insn, &r0, &r1, &r2);
571 regs[r0] = ~(regs[r1] ^ regs[r2]);
574 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
576 tci_args_rrr(insn, &r0, &r1, &r2);
577 regs[r0] = ~(regs[r1] & regs[r2]);
580 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
582 tci_args_rrr(insn, &r0, &r1, &r2);
583 regs[r0] = ~(regs[r1] | regs[r2]);
587 /* Arithmetic operations (32 bit). */
589 case INDEX_op_div_i32:
590 tci_args_rrr(insn, &r0, &r1, &r2);
591 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
593 case INDEX_op_divu_i32:
594 tci_args_rrr(insn, &r0, &r1, &r2);
595 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
597 case INDEX_op_rem_i32:
598 tci_args_rrr(insn, &r0, &r1, &r2);
599 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
601 case INDEX_op_remu_i32:
602 tci_args_rrr(insn, &r0, &r1, &r2);
603 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
605 #if TCG_TARGET_HAS_clz_i32
606 case INDEX_op_clz_i32:
607 tci_args_rrr(insn, &r0, &r1, &r2);
609 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
612 #if TCG_TARGET_HAS_ctz_i32
613 case INDEX_op_ctz_i32:
614 tci_args_rrr(insn, &r0, &r1, &r2);
616 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
619 #if TCG_TARGET_HAS_ctpop_i32
620 case INDEX_op_ctpop_i32:
621 tci_args_rr(insn, &r0, &r1);
622 regs[r0] = ctpop32(regs[r1]);
626 /* Shift/rotate operations (32 bit). */
628 case INDEX_op_shl_i32:
629 tci_args_rrr(insn, &r0, &r1, &r2);
630 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
632 case INDEX_op_shr_i32:
633 tci_args_rrr(insn, &r0, &r1, &r2);
634 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
636 case INDEX_op_sar_i32:
637 tci_args_rrr(insn, &r0, &r1, &r2);
638 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
640 #if TCG_TARGET_HAS_rot_i32
641 case INDEX_op_rotl_i32:
642 tci_args_rrr(insn, &r0, &r1, &r2);
643 regs[r0] = rol32(regs[r1], regs[r2] & 31);
645 case INDEX_op_rotr_i32:
646 tci_args_rrr(insn, &r0, &r1, &r2);
647 regs[r0] = ror32(regs[r1], regs[r2] & 31);
650 #if TCG_TARGET_HAS_deposit_i32
651 case INDEX_op_deposit_i32:
652 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
653 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
656 #if TCG_TARGET_HAS_extract_i32
657 case INDEX_op_extract_i32:
658 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
659 regs[r0] = extract32(regs[r1], pos, len);
662 #if TCG_TARGET_HAS_sextract_i32
663 case INDEX_op_sextract_i32:
664 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
665 regs[r0] = sextract32(regs[r1], pos, len);
668 case INDEX_op_brcond_i32:
669 tci_args_rl(insn, tb_ptr, &r0, &ptr);
670 if ((uint32_t)regs[r0]) {
674 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
675 case INDEX_op_add2_i32:
676 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
677 T1 = tci_uint64(regs[r3], regs[r2]);
678 T2 = tci_uint64(regs[r5], regs[r4]);
679 tci_write_reg64(regs, r1, r0, T1 + T2);
682 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
683 case INDEX_op_sub2_i32:
684 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
685 T1 = tci_uint64(regs[r3], regs[r2]);
686 T2 = tci_uint64(regs[r5], regs[r4]);
687 tci_write_reg64(regs, r1, r0, T1 - T2);
690 #if TCG_TARGET_HAS_mulu2_i32
691 case INDEX_op_mulu2_i32:
692 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
693 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
694 tci_write_reg64(regs, r1, r0, tmp64);
697 #if TCG_TARGET_HAS_muls2_i32
698 case INDEX_op_muls2_i32:
699 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
700 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
701 tci_write_reg64(regs, r1, r0, tmp64);
704 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
706 tci_args_rr(insn, &r0, &r1);
707 regs[r0] = (int8_t)regs[r1];
710 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
711 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
713 tci_args_rr(insn, &r0, &r1);
714 regs[r0] = (int16_t)regs[r1];
717 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
719 tci_args_rr(insn, &r0, &r1);
720 regs[r0] = (uint8_t)regs[r1];
723 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
725 tci_args_rr(insn, &r0, &r1);
726 regs[r0] = (uint16_t)regs[r1];
729 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
731 tci_args_rr(insn, &r0, &r1);
732 regs[r0] = bswap16(regs[r1]);
735 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
737 tci_args_rr(insn, &r0, &r1);
738 regs[r0] = bswap32(regs[r1]);
741 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
743 tci_args_rr(insn, &r0, &r1);
744 regs[r0] = ~regs[r1];
747 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
749 tci_args_rr(insn, &r0, &r1);
750 regs[r0] = -regs[r1];
753 #if TCG_TARGET_REG_BITS == 64
754 /* Load/store operations (64 bit). */
756 case INDEX_op_ld32s_i64:
757 tci_args_rrs(insn, &r0, &r1, &ofs);
758 ptr = (void *)(regs[r1] + ofs);
759 regs[r0] = *(int32_t *)ptr;
761 case INDEX_op_ld_i64:
762 tci_args_rrs(insn, &r0, &r1, &ofs);
763 ptr = (void *)(regs[r1] + ofs);
764 regs[r0] = *(uint64_t *)ptr;
766 case INDEX_op_st_i64:
767 tci_args_rrs(insn, &r0, &r1, &ofs);
768 ptr = (void *)(regs[r1] + ofs);
769 *(uint64_t *)ptr = regs[r0];
772 /* Arithmetic operations (64 bit). */
774 case INDEX_op_div_i64:
775 tci_args_rrr(insn, &r0, &r1, &r2);
776 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
778 case INDEX_op_divu_i64:
779 tci_args_rrr(insn, &r0, &r1, &r2);
780 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
782 case INDEX_op_rem_i64:
783 tci_args_rrr(insn, &r0, &r1, &r2);
784 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
786 case INDEX_op_remu_i64:
787 tci_args_rrr(insn, &r0, &r1, &r2);
788 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
790 #if TCG_TARGET_HAS_clz_i64
791 case INDEX_op_clz_i64:
792 tci_args_rrr(insn, &r0, &r1, &r2);
793 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
796 #if TCG_TARGET_HAS_ctz_i64
797 case INDEX_op_ctz_i64:
798 tci_args_rrr(insn, &r0, &r1, &r2);
799 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
802 #if TCG_TARGET_HAS_ctpop_i64
803 case INDEX_op_ctpop_i64:
804 tci_args_rr(insn, &r0, &r1);
805 regs[r0] = ctpop64(regs[r1]);
808 #if TCG_TARGET_HAS_mulu2_i64
809 case INDEX_op_mulu2_i64:
810 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
811 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]);
814 #if TCG_TARGET_HAS_muls2_i64
815 case INDEX_op_muls2_i64:
816 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
817 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]);
820 #if TCG_TARGET_HAS_add2_i64
821 case INDEX_op_add2_i64:
822 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
823 T1 = regs[r2] + regs[r4];
824 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
829 #if TCG_TARGET_HAS_add2_i64
830 case INDEX_op_sub2_i64:
831 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
832 T1 = regs[r2] - regs[r4];
833 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
839 /* Shift/rotate operations (64 bit). */
841 case INDEX_op_shl_i64:
842 tci_args_rrr(insn, &r0, &r1, &r2);
843 regs[r0] = regs[r1] << (regs[r2] & 63);
845 case INDEX_op_shr_i64:
846 tci_args_rrr(insn, &r0, &r1, &r2);
847 regs[r0] = regs[r1] >> (regs[r2] & 63);
849 case INDEX_op_sar_i64:
850 tci_args_rrr(insn, &r0, &r1, &r2);
851 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
853 #if TCG_TARGET_HAS_rot_i64
854 case INDEX_op_rotl_i64:
855 tci_args_rrr(insn, &r0, &r1, &r2);
856 regs[r0] = rol64(regs[r1], regs[r2] & 63);
858 case INDEX_op_rotr_i64:
859 tci_args_rrr(insn, &r0, &r1, &r2);
860 regs[r0] = ror64(regs[r1], regs[r2] & 63);
863 #if TCG_TARGET_HAS_deposit_i64
864 case INDEX_op_deposit_i64:
865 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
866 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
869 #if TCG_TARGET_HAS_extract_i64
870 case INDEX_op_extract_i64:
871 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
872 regs[r0] = extract64(regs[r1], pos, len);
875 #if TCG_TARGET_HAS_sextract_i64
876 case INDEX_op_sextract_i64:
877 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
878 regs[r0] = sextract64(regs[r1], pos, len);
881 case INDEX_op_brcond_i64:
882 tci_args_rl(insn, tb_ptr, &r0, &ptr);
887 case INDEX_op_ext32s_i64:
888 case INDEX_op_ext_i32_i64:
889 tci_args_rr(insn, &r0, &r1);
890 regs[r0] = (int32_t)regs[r1];
892 case INDEX_op_ext32u_i64:
893 case INDEX_op_extu_i32_i64:
894 tci_args_rr(insn, &r0, &r1);
895 regs[r0] = (uint32_t)regs[r1];
897 #if TCG_TARGET_HAS_bswap64_i64
898 case INDEX_op_bswap64_i64:
899 tci_args_rr(insn, &r0, &r1);
900 regs[r0] = bswap64(regs[r1]);
903 #endif /* TCG_TARGET_REG_BITS == 64 */
905 /* QEMU specific operations. */
907 case INDEX_op_exit_tb:
908 tci_args_l(insn, tb_ptr, &ptr);
909 return (uintptr_t)ptr;
911 case INDEX_op_goto_tb:
912 tci_args_l(insn, tb_ptr, &ptr);
913 tb_ptr = *(void **)ptr;
916 case INDEX_op_goto_ptr:
917 tci_args_r(insn, &r0);
918 ptr = (void *)regs[r0];
925 case INDEX_op_qemu_ld_i32:
926 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
927 tci_args_rrm(insn, &r0, &r1, &oi);
930 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
931 taddr = tci_uint64(regs[r2], regs[r1]);
933 tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr);
937 case INDEX_op_qemu_ld_i64:
938 if (TCG_TARGET_REG_BITS == 64) {
939 tci_args_rrm(insn, &r0, &r1, &oi);
941 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
942 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
945 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
946 taddr = tci_uint64(regs[r3], regs[r2]);
949 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
950 if (TCG_TARGET_REG_BITS == 32) {
951 tci_write_reg64(regs, r1, r0, tmp64);
957 case INDEX_op_qemu_st_i32:
958 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
959 tci_args_rrm(insn, &r0, &r1, &oi);
962 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
963 taddr = tci_uint64(regs[r2], regs[r1]);
966 tci_qemu_st(env, taddr, tmp32, oi, tb_ptr);
969 case INDEX_op_qemu_st_i64:
970 if (TCG_TARGET_REG_BITS == 64) {
971 tci_args_rrm(insn, &r0, &r1, &oi);
975 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
976 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
979 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
980 taddr = tci_uint64(regs[r3], regs[r2]);
983 tmp64 = tci_uint64(regs[r1], regs[r0]);
985 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
989 /* Ensure ordering for all kinds */
993 g_assert_not_reached();
999 * Disassembler that matches the interpreter
1002 static const char *str_r(TCGReg r)
1004 static const char regs[TCG_TARGET_NB_REGS][4] = {
1005 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1006 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
1009 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
1010 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
1012 assert((unsigned)r < TCG_TARGET_NB_REGS);
1016 static const char *str_c(TCGCond c)
1018 static const char cond[16][8] = {
1019 [TCG_COND_NEVER] = "never",
1020 [TCG_COND_ALWAYS] = "always",
1021 [TCG_COND_EQ] = "eq",
1022 [TCG_COND_NE] = "ne",
1023 [TCG_COND_LT] = "lt",
1024 [TCG_COND_GE] = "ge",
1025 [TCG_COND_LE] = "le",
1026 [TCG_COND_GT] = "gt",
1027 [TCG_COND_LTU] = "ltu",
1028 [TCG_COND_GEU] = "geu",
1029 [TCG_COND_LEU] = "leu",
1030 [TCG_COND_GTU] = "gtu",
1033 assert((unsigned)c < ARRAY_SIZE(cond));
1034 assert(cond[c][0] != 0);
1038 /* Disassemble TCI bytecode. */
1039 int print_insn_tci(bfd_vma addr, disassemble_info *info)
1041 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
1042 const TCGOpDef *def;
1043 const char *op_name;
1046 TCGReg r0, r1, r2, r3, r4, r5;
1047 tcg_target_ulong i1;
1054 /* TCI is always the host, so we don't need to load indirect. */
1057 info->fprintf_func(info->stream, "%08x ", insn);
1059 op = extract32(insn, 0, 8);
1060 def = &tcg_op_defs[op];
1061 op_name = def->name;
1065 case INDEX_op_exit_tb:
1066 case INDEX_op_goto_tb:
1067 tci_args_l(insn, tb_ptr, &ptr);
1068 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
1071 case INDEX_op_goto_ptr:
1072 tci_args_r(insn, &r0);
1073 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
1077 tci_args_nl(insn, tb_ptr, &len, &ptr);
1078 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
1081 case INDEX_op_brcond_i32:
1082 case INDEX_op_brcond_i64:
1083 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1084 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
1085 op_name, str_r(r0), ptr);
1088 case INDEX_op_setcond_i32:
1089 case INDEX_op_setcond_i64:
1090 tci_args_rrrc(insn, &r0, &r1, &r2, &c);
1091 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1092 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
1095 case INDEX_op_tci_movi:
1096 tci_args_ri(insn, &r0, &i1);
1097 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
1098 op_name, str_r(r0), i1);
1101 case INDEX_op_tci_movl:
1102 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1103 info->fprintf_func(info->stream, "%-12s %s, %p",
1104 op_name, str_r(r0), ptr);
1107 case INDEX_op_ld8u_i32:
1108 case INDEX_op_ld8u_i64:
1109 case INDEX_op_ld8s_i32:
1110 case INDEX_op_ld8s_i64:
1111 case INDEX_op_ld16u_i32:
1112 case INDEX_op_ld16u_i64:
1113 case INDEX_op_ld16s_i32:
1114 case INDEX_op_ld16s_i64:
1115 case INDEX_op_ld32u_i64:
1116 case INDEX_op_ld32s_i64:
1117 case INDEX_op_ld_i32:
1118 case INDEX_op_ld_i64:
1119 case INDEX_op_st8_i32:
1120 case INDEX_op_st8_i64:
1121 case INDEX_op_st16_i32:
1122 case INDEX_op_st16_i64:
1123 case INDEX_op_st32_i64:
1124 case INDEX_op_st_i32:
1125 case INDEX_op_st_i64:
1126 tci_args_rrs(insn, &r0, &r1, &s2);
1127 info->fprintf_func(info->stream, "%-12s %s, %s, %d",
1128 op_name, str_r(r0), str_r(r1), s2);
1131 case INDEX_op_mov_i32:
1132 case INDEX_op_mov_i64:
1133 case INDEX_op_ext8s_i32:
1134 case INDEX_op_ext8s_i64:
1135 case INDEX_op_ext8u_i32:
1136 case INDEX_op_ext8u_i64:
1137 case INDEX_op_ext16s_i32:
1138 case INDEX_op_ext16s_i64:
1139 case INDEX_op_ext16u_i32:
1140 case INDEX_op_ext32s_i64:
1141 case INDEX_op_ext32u_i64:
1142 case INDEX_op_ext_i32_i64:
1143 case INDEX_op_extu_i32_i64:
1144 case INDEX_op_bswap16_i32:
1145 case INDEX_op_bswap16_i64:
1146 case INDEX_op_bswap32_i32:
1147 case INDEX_op_bswap32_i64:
1148 case INDEX_op_bswap64_i64:
1149 case INDEX_op_not_i32:
1150 case INDEX_op_not_i64:
1151 case INDEX_op_neg_i32:
1152 case INDEX_op_neg_i64:
1153 case INDEX_op_ctpop_i32:
1154 case INDEX_op_ctpop_i64:
1155 tci_args_rr(insn, &r0, &r1);
1156 info->fprintf_func(info->stream, "%-12s %s, %s",
1157 op_name, str_r(r0), str_r(r1));
1160 case INDEX_op_add_i32:
1161 case INDEX_op_add_i64:
1162 case INDEX_op_sub_i32:
1163 case INDEX_op_sub_i64:
1164 case INDEX_op_mul_i32:
1165 case INDEX_op_mul_i64:
1166 case INDEX_op_and_i32:
1167 case INDEX_op_and_i64:
1168 case INDEX_op_or_i32:
1169 case INDEX_op_or_i64:
1170 case INDEX_op_xor_i32:
1171 case INDEX_op_xor_i64:
1172 case INDEX_op_andc_i32:
1173 case INDEX_op_andc_i64:
1174 case INDEX_op_orc_i32:
1175 case INDEX_op_orc_i64:
1176 case INDEX_op_eqv_i32:
1177 case INDEX_op_eqv_i64:
1178 case INDEX_op_nand_i32:
1179 case INDEX_op_nand_i64:
1180 case INDEX_op_nor_i32:
1181 case INDEX_op_nor_i64:
1182 case INDEX_op_div_i32:
1183 case INDEX_op_div_i64:
1184 case INDEX_op_rem_i32:
1185 case INDEX_op_rem_i64:
1186 case INDEX_op_divu_i32:
1187 case INDEX_op_divu_i64:
1188 case INDEX_op_remu_i32:
1189 case INDEX_op_remu_i64:
1190 case INDEX_op_shl_i32:
1191 case INDEX_op_shl_i64:
1192 case INDEX_op_shr_i32:
1193 case INDEX_op_shr_i64:
1194 case INDEX_op_sar_i32:
1195 case INDEX_op_sar_i64:
1196 case INDEX_op_rotl_i32:
1197 case INDEX_op_rotl_i64:
1198 case INDEX_op_rotr_i32:
1199 case INDEX_op_rotr_i64:
1200 case INDEX_op_clz_i32:
1201 case INDEX_op_clz_i64:
1202 case INDEX_op_ctz_i32:
1203 case INDEX_op_ctz_i64:
1204 tci_args_rrr(insn, &r0, &r1, &r2);
1205 info->fprintf_func(info->stream, "%-12s %s, %s, %s",
1206 op_name, str_r(r0), str_r(r1), str_r(r2));
1209 case INDEX_op_deposit_i32:
1210 case INDEX_op_deposit_i64:
1211 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
1212 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
1213 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
1216 case INDEX_op_extract_i32:
1217 case INDEX_op_extract_i64:
1218 case INDEX_op_sextract_i32:
1219 case INDEX_op_sextract_i64:
1220 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
1221 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
1222 op_name, str_r(r0), str_r(r1), pos, len);
1225 case INDEX_op_movcond_i32:
1226 case INDEX_op_movcond_i64:
1227 case INDEX_op_setcond2_i32:
1228 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
1229 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1230 op_name, str_r(r0), str_r(r1), str_r(r2),
1231 str_r(r3), str_r(r4), str_c(c));
1234 case INDEX_op_mulu2_i32:
1235 case INDEX_op_mulu2_i64:
1236 case INDEX_op_muls2_i32:
1237 case INDEX_op_muls2_i64:
1238 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1239 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1240 op_name, str_r(r0), str_r(r1),
1241 str_r(r2), str_r(r3));
1244 case INDEX_op_add2_i32:
1245 case INDEX_op_add2_i64:
1246 case INDEX_op_sub2_i32:
1247 case INDEX_op_sub2_i64:
1248 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
1249 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1250 op_name, str_r(r0), str_r(r1), str_r(r2),
1251 str_r(r3), str_r(r4), str_r(r5));
1254 case INDEX_op_qemu_ld_i64:
1255 case INDEX_op_qemu_st_i64:
1256 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1258 case INDEX_op_qemu_ld_i32:
1259 case INDEX_op_qemu_st_i32:
1262 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS);
1265 tci_args_rrm(insn, &r0, &r1, &oi);
1266 info->fprintf_func(info->stream, "%-12s %s, %s, %x",
1267 op_name, str_r(r0), str_r(r1), oi);
1270 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1271 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x",
1272 op_name, str_r(r0), str_r(r1), str_r(r2), oi);
1275 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1276 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
1277 op_name, str_r(r0), str_r(r1),
1278 str_r(r2), str_r(r3), str_r(r4));
1281 g_assert_not_reached();
1286 /* tcg_out_nop_fill uses zeros */
1288 info->fprintf_func(info->stream, "align");
1294 info->fprintf_func(info->stream, "illegal opcode %d", op);
1298 return sizeof(insn);