2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
6 * Based on tcg/riscv/tcg-target.c.inc
8 * Copyright (c) 2018 SiFive, Inc
9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
11 * Copyright (c) 2008 Fabrice Bellard
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to deal
15 * in the Software without restriction, including without limitation the rights
16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 * copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
32 #include "../tcg-ldst.c.inc"
34 #ifdef CONFIG_DEBUG_TCG
35 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
57 "r21", /* reserved in the LP64* ABI, hence no ABI name */
71 static const int tcg_target_reg_alloc_order[] = {
72 /* Registers preserved across calls */
73 /* TCG_REG_S0 reserved for TCG_AREG0 */
84 /* Registers (potentially) clobbered across calls */
95 /* Argument registers, opposite order of allocation. */
106 static const int tcg_target_call_iarg_regs[] = {
117 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
119 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
120 tcg_debug_assert(slot >= 0 && slot <= 1);
121 return TCG_REG_A0 + slot;
124 #ifndef CONFIG_SOFTMMU
125 #define USE_GUEST_BASE (guest_base != 0)
126 #define TCG_GUEST_BASE_REG TCG_REG_S1
129 #define TCG_CT_CONST_ZERO 0x100
130 #define TCG_CT_CONST_S12 0x200
131 #define TCG_CT_CONST_S32 0x400
132 #define TCG_CT_CONST_U12 0x800
133 #define TCG_CT_CONST_C12 0x1000
134 #define TCG_CT_CONST_WSZ 0x2000
136 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
138 * For softmmu, we need to avoid conflicts with the first 5
139 * argument registers to call the helper. Some of these are
140 * also used for the tlb lookup.
142 #ifdef CONFIG_SOFTMMU
143 #define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5)
145 #define SOFTMMU_RESERVE_REGS 0
149 static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
151 return sextract64(val, pos, len);
154 /* test if a constant matches the constraint */
155 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
157 if (ct & TCG_CT_CONST) {
160 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
163 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
166 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
169 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
172 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
175 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
186 * Relocation records defined in LoongArch ELF psABI v1.00 is way too
187 * complicated; a whopping stack machine is needed to stuff the fields, at
188 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
191 * Hence, define our own simpler relocation types. Numbers are chosen as to
192 * not collide with potential future additions to the true ELF relocation
196 /* Field Sk16, shifted right by 2; suitable for conditional jumps */
197 #define R_LOONGARCH_BR_SK16 256
198 /* Field Sd10k16, shifted right by 2; suitable for B and BL */
199 #define R_LOONGARCH_BR_SD10K16 257
201 static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
203 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
204 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
206 tcg_debug_assert((offset & 3) == 0);
208 if (offset == sextreg(offset, 0, 16)) {
209 *src_rw = deposit64(*src_rw, 10, 16, offset);
216 static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
217 const tcg_insn_unit *target)
219 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
220 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
222 tcg_debug_assert((offset & 3) == 0);
224 if (offset == sextreg(offset, 0, 26)) {
225 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
226 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
233 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
234 intptr_t value, intptr_t addend)
236 tcg_debug_assert(addend == 0);
238 case R_LOONGARCH_BR_SK16:
239 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
240 case R_LOONGARCH_BR_SD10K16:
241 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
243 g_assert_not_reached();
247 #include "tcg-insn-defs.c.inc"
253 static void tcg_out_mb(TCGContext *s, TCGArg a0)
255 /* Baseline LoongArch only has the full barrier, unfortunately. */
256 tcg_out_opc_dbar(s, 0);
259 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
268 * Conventional register-register move used in LoongArch is
269 * `or dst, src, zero`.
271 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
274 g_assert_not_reached();
279 /* Loads a 32-bit immediate into rd, sign-extended. */
280 static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
282 tcg_target_long lo = sextreg(val, 0, 12);
283 tcg_target_long hi12 = sextreg(val, 12, 20);
285 /* Single-instruction cases. */
287 /* val fits in uimm12: ori rd, zero, val */
288 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
291 if (hi12 == sextreg(lo, 12, 20)) {
292 /* val fits in simm12: addi.w rd, zero, val */
293 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
297 /* High bits must be set; load with lu12i.w + optional ori. */
298 tcg_out_opc_lu12i_w(s, rd, hi12);
300 tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
304 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
308 * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
309 * with dedicated instructions for filling the respective bitfields
313 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
314 * +-----------------------+---------------------------------------+...
316 * +-----------------------+---------------------------------------+...
318 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
319 * ...+-------------------------------------+-------------------------+
321 * ...+-------------------------------------+-------------------------+
323 * Check if val belong to one of the several fast cases, before falling
324 * back to the slow path.
328 tcg_target_long val_lo, val_hi, pc_hi, offset_hi;
329 tcg_target_long hi12, hi32, hi52;
331 /* Value fits in signed i32. */
332 if (type == TCG_TYPE_I32 || val == (int32_t)val) {
333 tcg_out_movi_i32(s, rd, val);
337 /* PC-relative cases. */
338 pc_offset = tcg_pcrel_diff(s, (void *)val);
339 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) {
340 /* Single pcaddu2i. */
341 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
345 if (pc_offset == (int32_t)pc_offset) {
346 /* Offset within 32 bits; load with pcalau12i + ori. */
347 val_lo = sextreg(val, 0, 12);
349 pc_hi = (val - pc_offset) >> 12;
350 offset_hi = val_hi - pc_hi;
352 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20));
353 tcg_out_opc_pcalau12i(s, rd, offset_hi);
355 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff);
360 hi12 = sextreg(val, 12, 20);
361 hi32 = sextreg(val, 32, 20);
362 hi52 = sextreg(val, 52, 12);
364 /* Single cu52i.d case. */
365 if ((hi52 != 0) && (ctz64(val) >= 52)) {
366 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
370 /* Slow path. Initialize the low 32 bits, then concat high bits. */
371 tcg_out_movi_i32(s, rd, val);
373 /* Load hi32 and hi52 explicitly when they are unexpected values. */
374 if (hi32 != sextreg(hi12, 20, 20)) {
375 tcg_out_opc_cu32i_d(s, rd, hi32);
378 if (hi52 != sextreg(hi32, 20, 12)) {
379 tcg_out_opc_cu52i_d(s, rd, rd, hi52);
383 static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
384 TCGReg rs, tcg_target_long imm)
386 tcg_target_long lo12 = sextreg(imm, 0, 12);
387 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16);
390 * Note that there's a hole in between hi16 and lo12:
393 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
394 * ...+-------------------------------+-------+-----------------------+
396 * ...+-------------------------------+-------+-----------------------+
398 * For bits within that hole, it's more efficient to use LU12I and ADD.
400 if (imm == (hi16 << 16) + lo12) {
402 tcg_out_opc_addu16i_d(s, rd, rs, hi16);
405 if (type == TCG_TYPE_I32) {
406 tcg_out_opc_addi_w(s, rd, rs, lo12);
408 tcg_out_opc_addi_d(s, rd, rs, lo12);
410 tcg_out_mov(s, type, rd, rs);
413 tcg_out_movi(s, type, TCG_REG_TMP0, imm);
414 if (type == TCG_TYPE_I32) {
415 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0);
417 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0);
422 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
427 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
430 /* This function is only used for passing structs by reference. */
431 g_assert_not_reached();
434 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
436 tcg_out_opc_andi(s, ret, arg, 0xff);
439 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
441 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
444 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
446 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
449 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
451 tcg_out_opc_sext_b(s, ret, arg);
454 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
456 tcg_out_opc_sext_h(s, ret, arg);
459 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
461 tcg_out_opc_addi_w(s, ret, arg, 0);
464 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
466 tcg_out_ext32s(s, ret, arg);
469 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
471 tcg_out_ext32u(s, ret, arg);
474 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
476 tcg_out_ext32s(s, ret, arg);
479 static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
480 TCGReg a0, TCGReg a1, TCGReg a2,
481 bool c2, bool is_32bit)
485 * Fast path: semantics already satisfied due to constraint and
486 * insn behavior, single instruction is enough.
488 tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
489 /* all clz/ctz insns belong to DJ-format */
490 tcg_out32(s, encode_dj_insn(opc, a0, a1));
494 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
495 /* a0 = a1 ? REG_TMP0 : a2 */
496 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
497 tcg_out_opc_masknez(s, a0, a2, a1);
498 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
501 #define SETCOND_INV TCG_TARGET_NB_REGS
502 #define SETCOND_NEZ (SETCOND_INV << 1)
503 #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
505 static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
506 TCGReg arg1, tcg_target_long arg2, bool c2)
511 case TCG_COND_EQ: /* -> NE */
512 case TCG_COND_GE: /* -> LT */
513 case TCG_COND_GEU: /* -> LTU */
514 case TCG_COND_GT: /* -> LE */
515 case TCG_COND_GTU: /* -> LEU */
516 cond = tcg_invert_cond(cond);
517 flags ^= SETCOND_INV;
527 * If we have a constant input, the most efficient way to implement
528 * LE is by adding 1 and using LT. Watch out for wrap around for LEU.
529 * We don't need to care for this for LE because the constant input
530 * is still constrained to int32_t, and INT32_MAX+1 is representable
531 * in the 64-bit temporary register.
534 if (cond == TCG_COND_LEU) {
535 /* unsigned <= -1 is true */
537 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
549 cond = tcg_swap_cond(cond); /* LE -> GE */
550 cond = tcg_invert_cond(cond); /* GE -> LT */
551 flags ^= SETCOND_INV;
560 flags |= SETCOND_NEZ;
562 tcg_out_opc_xor(s, ret, arg1, arg2);
563 } else if (arg2 == 0) {
565 } else if (arg2 >= 0 && arg2 <= 0xfff) {
566 tcg_out_opc_xori(s, ret, arg1, arg2);
568 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2);
575 if (arg2 >= -0x800 && arg2 <= 0x7ff) {
576 if (cond == TCG_COND_LT) {
577 tcg_out_opc_slti(s, ret, arg1, arg2);
579 tcg_out_opc_sltui(s, ret, arg1, arg2);
583 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
586 if (cond == TCG_COND_LT) {
587 tcg_out_opc_slt(s, ret, arg1, arg2);
589 tcg_out_opc_sltu(s, ret, arg1, arg2);
594 g_assert_not_reached();
601 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
602 TCGReg arg1, tcg_target_long arg2, bool c2)
604 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
606 if (tmpflags != ret) {
607 TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
609 switch (tmpflags & SETCOND_FLAGS) {
611 /* Intermediate result is boolean: simply invert. */
612 tcg_out_opc_xori(s, ret, tmp, 1);
615 /* Intermediate result is zero/non-zero: test != 0. */
616 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
618 case SETCOND_NEZ | SETCOND_INV:
619 /* Intermediate result is zero/non-zero: test == 0. */
620 tcg_out_opc_sltui(s, ret, tmp, 1);
623 g_assert_not_reached();
628 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
629 TCGReg c1, tcg_target_long c2, bool const2,
630 TCGReg v1, TCGReg v2)
632 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
635 /* Standardize the test below to t != 0. */
636 if (tmpflags & SETCOND_INV) {
637 t = v1, v1 = v2, v2 = t;
640 t = tmpflags & ~SETCOND_FLAGS;
641 if (v1 == TCG_REG_ZERO) {
642 tcg_out_opc_masknez(s, ret, v2, t);
643 } else if (v2 == TCG_REG_ZERO) {
644 tcg_out_opc_maskeqz(s, ret, v1, t);
646 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */
647 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */
648 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2);
656 static const struct {
659 } tcg_brcond_to_loongarch[] = {
660 [TCG_COND_EQ] = { OPC_BEQ, false },
661 [TCG_COND_NE] = { OPC_BNE, false },
662 [TCG_COND_LT] = { OPC_BGT, true },
663 [TCG_COND_GE] = { OPC_BLE, true },
664 [TCG_COND_LE] = { OPC_BLE, false },
665 [TCG_COND_GT] = { OPC_BGT, false },
666 [TCG_COND_LTU] = { OPC_BGTU, true },
667 [TCG_COND_GEU] = { OPC_BLEU, true },
668 [TCG_COND_LEU] = { OPC_BLEU, false },
669 [TCG_COND_GTU] = { OPC_BGTU, false }
672 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
673 TCGReg arg2, TCGLabel *l)
675 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
677 tcg_debug_assert(op != 0);
679 if (tcg_brcond_to_loongarch[cond].swap) {
685 /* all conditional branch insns belong to DJSk16-format */
686 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
687 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
690 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
692 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
693 ptrdiff_t offset = tcg_pcrel_diff(s, arg);
695 tcg_debug_assert((offset & 3) == 0);
696 if (offset == sextreg(offset, 0, 28)) {
697 /* short jump: +/- 256MiB */
699 tcg_out_opc_b(s, offset >> 2);
701 tcg_out_opc_bl(s, offset >> 2);
703 } else if (offset == sextreg(offset, 0, 38)) {
704 /* long jump: +/- 256GiB */
705 tcg_target_long lo = sextreg(offset, 0, 18);
706 tcg_target_long hi = offset - lo;
707 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
708 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
710 /* far jump: 64-bit */
711 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
712 tcg_target_long hi = (tcg_target_long)arg - lo;
713 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
714 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
718 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
719 const TCGHelperInfo *info)
721 tcg_out_call_int(s, arg, false);
728 static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
729 TCGReg addr, intptr_t offset)
731 intptr_t imm12 = sextreg(offset, 0, 12);
733 if (offset != imm12) {
734 intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
736 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
737 imm12 = sextreg(diff, 0, 12);
738 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
740 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
741 if (addr != TCG_REG_ZERO) {
742 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
760 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
763 g_assert_not_reached();
767 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
768 TCGReg arg1, intptr_t arg2)
770 bool is_32bit = type == TCG_TYPE_I32;
771 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2);
774 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
775 TCGReg arg1, intptr_t arg2)
777 bool is_32bit = type == TCG_TYPE_I32;
778 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2);
781 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
782 TCGReg base, intptr_t ofs)
785 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
792 * Load/store helpers for SoftMMU, and qemu_ld/st implementations
795 #if defined(CONFIG_SOFTMMU)
797 * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
798 * MemOpIdx oi, uintptr_t ra)
800 static void * const qemu_ld_helpers[4] = {
801 [MO_8] = helper_ret_ldub_mmu,
802 [MO_16] = helper_le_lduw_mmu,
803 [MO_32] = helper_le_ldul_mmu,
804 [MO_64] = helper_le_ldq_mmu,
808 * helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
809 * uintxx_t val, MemOpIdx oi,
812 static void * const qemu_st_helpers[4] = {
813 [MO_8] = helper_ret_stb_mmu,
814 [MO_16] = helper_le_stw_mmu,
815 [MO_32] = helper_le_stl_mmu,
816 [MO_64] = helper_le_stq_mmu,
819 /* We expect to use a 12-bit negative offset from ENV. */
820 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
821 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
823 static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
826 return reloc_br_sd10k16(s->code_ptr - 1, target);
830 * Emits common code for TLB addend lookup, that eventually loads the
831 * addend in TCG_REG_TMP2.
833 static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, MemOpIdx oi,
834 tcg_insn_unit **label_ptr, bool is_load)
836 MemOp opc = get_memop(oi);
837 unsigned s_bits = opc & MO_SIZE;
838 unsigned a_bits = get_alignment_bits(opc);
839 tcg_target_long compare_mask;
840 int mem_index = get_mmuidx(oi);
841 int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
842 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
843 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
845 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
846 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
848 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addrl,
849 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
850 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
851 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
853 /* Load the tlb comparator and the addend. */
854 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
855 is_load ? offsetof(CPUTLBEntry, addr_read)
856 : offsetof(CPUTLBEntry, addr_write));
857 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
858 offsetof(CPUTLBEntry, addend));
860 /* We don't support unaligned accesses. */
861 if (a_bits < s_bits) {
864 /* Clear the non-page, non-alignment bits from the address. */
865 compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
866 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
867 tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addrl);
869 /* Compare masked address with the TLB entry. */
870 label_ptr[0] = s->code_ptr;
871 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
873 /* TLB Hit - addend in TCG_REG_TMP2, ready for use. */
876 static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
878 TCGReg datalo, TCGReg addrlo,
879 void *raddr, tcg_insn_unit **label_ptr)
881 TCGLabelQemuLdst *label = new_ldst_label(s);
883 label->is_ld = is_ld;
886 label->datalo_reg = datalo;
887 label->datahi_reg = 0; /* unused */
888 label->addrlo_reg = addrlo;
889 label->addrhi_reg = 0; /* unused */
890 label->raddr = tcg_splitwx_to_rx(raddr);
891 label->label_ptr[0] = label_ptr[0];
894 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
897 MemOp opc = get_memop(oi);
898 MemOp size = opc & MO_SIZE;
900 /* resolve label address */
901 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
905 /* call load helper */
906 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
907 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
908 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A2, oi);
909 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, (tcg_target_long)l->raddr);
911 tcg_out_call_int(s, qemu_ld_helpers[size], false);
913 tcg_out_movext(s, l->type, l->datalo_reg,
914 TCG_TYPE_REG, opc & MO_SSIZE, TCG_REG_A0);
915 return tcg_out_goto(s, l->raddr);
918 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
921 MemOp opc = get_memop(oi);
922 MemOp size = opc & MO_SIZE;
924 /* resolve label address */
925 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
929 /* call store helper */
930 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
931 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
932 tcg_out_movext(s, size == MO_64 ? TCG_TYPE_I32 : TCG_TYPE_I32, TCG_REG_A2,
933 l->type, size, l->datalo_reg);
934 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, oi);
935 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A4, (tcg_target_long)l->raddr);
937 tcg_out_call_int(s, qemu_st_helpers[size], false);
939 return tcg_out_goto(s, l->raddr);
944 * Alignment helpers for user-mode emulation
947 static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
950 TCGLabelQemuLdst *l = new_ldst_label(s);
953 l->addrlo_reg = addr_reg;
956 * Without micro-architecture details, we don't know which of bstrpick or
957 * andi is faster, so use bstrpick as it's not constrained by imm field
958 * width. (Not to say alignments >= 2^12 are going to happen any time
961 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
963 l->label_ptr[0] = s->code_ptr;
964 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
966 l->raddr = tcg_splitwx_to_rx(s->code_ptr);
969 static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
971 /* resolve label address */
972 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
976 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
977 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
979 /* tail call, with the return address back inline. */
980 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
981 tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
982 : helper_unaligned_st), true);
986 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
988 return tcg_out_fail_alignment(s, l);
991 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
993 return tcg_out_fail_alignment(s, l);
996 #endif /* CONFIG_SOFTMMU */
999 * `ext32u` the address register into the temp register given,
1000 * if target is 32-bit, no-op otherwise.
1002 * Returns the address register ready for use with TLB addend.
1004 static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
1005 TCGReg addr, TCGReg tmp)
1007 if (TARGET_LONG_BITS == 32) {
1008 tcg_out_ext32u(s, tmp, addr);
1014 static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
1015 TCGReg rk, MemOp opc, TCGType type)
1017 /* Byte swapping is left to middle-end expansion. */
1018 tcg_debug_assert((opc & MO_BSWAP) == 0);
1020 switch (opc & MO_SSIZE) {
1022 tcg_out_opc_ldx_bu(s, rd, rj, rk);
1025 tcg_out_opc_ldx_b(s, rd, rj, rk);
1028 tcg_out_opc_ldx_hu(s, rd, rj, rk);
1031 tcg_out_opc_ldx_h(s, rd, rj, rk);
1034 if (type == TCG_TYPE_I64) {
1035 tcg_out_opc_ldx_wu(s, rd, rj, rk);
1040 tcg_out_opc_ldx_w(s, rd, rj, rk);
1043 tcg_out_opc_ldx_d(s, rd, rj, rk);
1046 g_assert_not_reached();
1050 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
1056 #if defined(CONFIG_SOFTMMU)
1057 tcg_insn_unit *label_ptr[1];
1063 data_regl = *args++;
1064 addr_regl = *args++;
1066 opc = get_memop(oi);
1068 #if defined(CONFIG_SOFTMMU)
1069 tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1);
1070 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1071 tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type);
1072 add_qemu_ldst_label(s, 1, oi, type,
1073 data_regl, addr_regl,
1074 s->code_ptr, label_ptr);
1076 a_bits = get_alignment_bits(opc);
1078 tcg_out_test_alignment(s, true, addr_regl, a_bits);
1080 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1081 TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
1082 tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type);
1086 static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data,
1087 TCGReg rj, TCGReg rk, MemOp opc)
1089 /* Byte swapping is left to middle-end expansion. */
1090 tcg_debug_assert((opc & MO_BSWAP) == 0);
1092 switch (opc & MO_SIZE) {
1094 tcg_out_opc_stx_b(s, data, rj, rk);
1097 tcg_out_opc_stx_h(s, data, rj, rk);
1100 tcg_out_opc_stx_w(s, data, rj, rk);
1103 tcg_out_opc_stx_d(s, data, rj, rk);
1106 g_assert_not_reached();
1110 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGType type)
1116 #if defined(CONFIG_SOFTMMU)
1117 tcg_insn_unit *label_ptr[1];
1123 data_regl = *args++;
1124 addr_regl = *args++;
1126 opc = get_memop(oi);
1128 #if defined(CONFIG_SOFTMMU)
1129 tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0);
1130 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1131 tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc);
1132 add_qemu_ldst_label(s, 0, oi, type,
1133 data_regl, addr_regl,
1134 s->code_ptr, label_ptr);
1136 a_bits = get_alignment_bits(opc);
1138 tcg_out_test_alignment(s, false, addr_regl, a_bits);
1140 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1141 TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
1142 tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc);
1150 static const tcg_insn_unit *tb_ret_addr;
1152 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1154 /* Reuse the zeroing that exists for goto_ptr. */
1156 tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1158 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1159 tcg_out_call_int(s, tb_ret_addr, true);
1163 static void tcg_out_goto_tb(TCGContext *s, int which)
1166 * Direct branch, or load indirect address, to be patched
1167 * by tb_target_set_jmp_target. Check indirect load offset
1168 * in range early, regardless of direct branch distance,
1169 * via assert within tcg_out_opc_pcaddu2i.
1171 uintptr_t i_addr = get_jmp_target_addr(s, which);
1172 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr);
1174 set_jmp_insn_offset(s, which);
1175 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2);
1177 /* Finish the load and indirect branch. */
1178 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0);
1179 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1180 set_jmp_reset_offset(s, which);
1183 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1184 uintptr_t jmp_rx, uintptr_t jmp_rw)
1186 uintptr_t d_addr = tb->jmp_target_addr[n];
1187 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2;
1190 /* Either directly branch, or load slot address for indirect branch. */
1191 if (d_disp == sextreg(d_disp, 0, 26)) {
1192 insn = encode_sd10k16_insn(OPC_B, d_disp);
1194 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
1195 intptr_t i_disp = i_addr - jmp_rx;
1196 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2);
1199 qatomic_set((tcg_insn_unit *)jmp_rw, insn);
1200 flush_idcache_range(jmp_rx, jmp_rw, 4);
1203 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1204 const TCGArg args[TCG_MAX_OP_ARGS],
1205 const int const_args[TCG_MAX_OP_ARGS])
1207 TCGArg a0 = args[0];
1208 TCGArg a1 = args[1];
1209 TCGArg a2 = args[2];
1210 int c2 = const_args[2];
1217 case INDEX_op_goto_ptr:
1218 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
1222 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
1224 tcg_out_opc_b(s, 0);
1227 case INDEX_op_brcond_i32:
1228 case INDEX_op_brcond_i64:
1229 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1232 case INDEX_op_extrh_i64_i32:
1233 tcg_out_opc_srai_d(s, a0, a1, 32);
1236 case INDEX_op_not_i32:
1237 case INDEX_op_not_i64:
1238 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
1241 case INDEX_op_nor_i32:
1242 case INDEX_op_nor_i64:
1244 tcg_out_opc_ori(s, a0, a1, a2);
1245 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
1247 tcg_out_opc_nor(s, a0, a1, a2);
1251 case INDEX_op_andc_i32:
1252 case INDEX_op_andc_i64:
1254 /* guaranteed to fit due to constraint */
1255 tcg_out_opc_andi(s, a0, a1, ~a2);
1257 tcg_out_opc_andn(s, a0, a1, a2);
1261 case INDEX_op_orc_i32:
1262 case INDEX_op_orc_i64:
1264 /* guaranteed to fit due to constraint */
1265 tcg_out_opc_ori(s, a0, a1, ~a2);
1267 tcg_out_opc_orn(s, a0, a1, a2);
1271 case INDEX_op_and_i32:
1272 case INDEX_op_and_i64:
1274 tcg_out_opc_andi(s, a0, a1, a2);
1276 tcg_out_opc_and(s, a0, a1, a2);
1280 case INDEX_op_or_i32:
1281 case INDEX_op_or_i64:
1283 tcg_out_opc_ori(s, a0, a1, a2);
1285 tcg_out_opc_or(s, a0, a1, a2);
1289 case INDEX_op_xor_i32:
1290 case INDEX_op_xor_i64:
1292 tcg_out_opc_xori(s, a0, a1, a2);
1294 tcg_out_opc_xor(s, a0, a1, a2);
1298 case INDEX_op_extract_i32:
1299 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
1301 case INDEX_op_extract_i64:
1302 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
1305 case INDEX_op_deposit_i32:
1306 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
1308 case INDEX_op_deposit_i64:
1309 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
1312 case INDEX_op_bswap16_i32:
1313 case INDEX_op_bswap16_i64:
1314 tcg_out_opc_revb_2h(s, a0, a1);
1315 if (a2 & TCG_BSWAP_OS) {
1316 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
1317 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1318 tcg_out_ext16u(s, a0, a0);
1322 case INDEX_op_bswap32_i32:
1323 /* All 32-bit values are computed sign-extended in the register. */
1326 case INDEX_op_bswap32_i64:
1327 tcg_out_opc_revb_2w(s, a0, a1);
1328 if (a2 & TCG_BSWAP_OS) {
1329 tcg_out_ext32s(s, a0, a0);
1330 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1331 tcg_out_ext32u(s, a0, a0);
1335 case INDEX_op_bswap64_i64:
1336 tcg_out_opc_revb_d(s, a0, a1);
1339 case INDEX_op_clz_i32:
1340 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
1342 case INDEX_op_clz_i64:
1343 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
1346 case INDEX_op_ctz_i32:
1347 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
1349 case INDEX_op_ctz_i64:
1350 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
1353 case INDEX_op_shl_i32:
1355 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
1357 tcg_out_opc_sll_w(s, a0, a1, a2);
1360 case INDEX_op_shl_i64:
1362 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
1364 tcg_out_opc_sll_d(s, a0, a1, a2);
1368 case INDEX_op_shr_i32:
1370 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
1372 tcg_out_opc_srl_w(s, a0, a1, a2);
1375 case INDEX_op_shr_i64:
1377 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
1379 tcg_out_opc_srl_d(s, a0, a1, a2);
1383 case INDEX_op_sar_i32:
1385 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
1387 tcg_out_opc_sra_w(s, a0, a1, a2);
1390 case INDEX_op_sar_i64:
1392 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
1394 tcg_out_opc_sra_d(s, a0, a1, a2);
1398 case INDEX_op_rotl_i32:
1399 /* transform into equivalent rotr/rotri */
1401 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
1403 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1404 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
1407 case INDEX_op_rotl_i64:
1408 /* transform into equivalent rotr/rotri */
1410 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
1412 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1413 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
1417 case INDEX_op_rotr_i32:
1419 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
1421 tcg_out_opc_rotr_w(s, a0, a1, a2);
1424 case INDEX_op_rotr_i64:
1426 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
1428 tcg_out_opc_rotr_d(s, a0, a1, a2);
1432 case INDEX_op_add_i32:
1434 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
1436 tcg_out_opc_add_w(s, a0, a1, a2);
1439 case INDEX_op_add_i64:
1441 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
1443 tcg_out_opc_add_d(s, a0, a1, a2);
1447 case INDEX_op_sub_i32:
1449 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
1451 tcg_out_opc_sub_w(s, a0, a1, a2);
1454 case INDEX_op_sub_i64:
1456 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
1458 tcg_out_opc_sub_d(s, a0, a1, a2);
1462 case INDEX_op_mul_i32:
1463 tcg_out_opc_mul_w(s, a0, a1, a2);
1465 case INDEX_op_mul_i64:
1466 tcg_out_opc_mul_d(s, a0, a1, a2);
1469 case INDEX_op_mulsh_i32:
1470 tcg_out_opc_mulh_w(s, a0, a1, a2);
1472 case INDEX_op_mulsh_i64:
1473 tcg_out_opc_mulh_d(s, a0, a1, a2);
1476 case INDEX_op_muluh_i32:
1477 tcg_out_opc_mulh_wu(s, a0, a1, a2);
1479 case INDEX_op_muluh_i64:
1480 tcg_out_opc_mulh_du(s, a0, a1, a2);
1483 case INDEX_op_div_i32:
1484 tcg_out_opc_div_w(s, a0, a1, a2);
1486 case INDEX_op_div_i64:
1487 tcg_out_opc_div_d(s, a0, a1, a2);
1490 case INDEX_op_divu_i32:
1491 tcg_out_opc_div_wu(s, a0, a1, a2);
1493 case INDEX_op_divu_i64:
1494 tcg_out_opc_div_du(s, a0, a1, a2);
1497 case INDEX_op_rem_i32:
1498 tcg_out_opc_mod_w(s, a0, a1, a2);
1500 case INDEX_op_rem_i64:
1501 tcg_out_opc_mod_d(s, a0, a1, a2);
1504 case INDEX_op_remu_i32:
1505 tcg_out_opc_mod_wu(s, a0, a1, a2);
1507 case INDEX_op_remu_i64:
1508 tcg_out_opc_mod_du(s, a0, a1, a2);
1511 case INDEX_op_setcond_i32:
1512 case INDEX_op_setcond_i64:
1513 tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1516 case INDEX_op_movcond_i32:
1517 case INDEX_op_movcond_i64:
1518 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
1521 case INDEX_op_ld8s_i32:
1522 case INDEX_op_ld8s_i64:
1523 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
1525 case INDEX_op_ld8u_i32:
1526 case INDEX_op_ld8u_i64:
1527 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
1529 case INDEX_op_ld16s_i32:
1530 case INDEX_op_ld16s_i64:
1531 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
1533 case INDEX_op_ld16u_i32:
1534 case INDEX_op_ld16u_i64:
1535 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
1537 case INDEX_op_ld_i32:
1538 case INDEX_op_ld32s_i64:
1539 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
1541 case INDEX_op_ld32u_i64:
1542 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
1544 case INDEX_op_ld_i64:
1545 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
1548 case INDEX_op_st8_i32:
1549 case INDEX_op_st8_i64:
1550 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
1552 case INDEX_op_st16_i32:
1553 case INDEX_op_st16_i64:
1554 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
1556 case INDEX_op_st_i32:
1557 case INDEX_op_st32_i64:
1558 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
1560 case INDEX_op_st_i64:
1561 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
1564 case INDEX_op_qemu_ld_i32:
1565 tcg_out_qemu_ld(s, args, TCG_TYPE_I32);
1567 case INDEX_op_qemu_ld_i64:
1568 tcg_out_qemu_ld(s, args, TCG_TYPE_I64);
1570 case INDEX_op_qemu_st_i32:
1571 tcg_out_qemu_st(s, args, TCG_TYPE_I32);
1573 case INDEX_op_qemu_st_i64:
1574 tcg_out_qemu_st(s, args, TCG_TYPE_I64);
1577 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1578 case INDEX_op_mov_i64:
1579 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1580 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1581 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1582 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1583 case INDEX_op_ext8s_i64:
1584 case INDEX_op_ext8u_i32:
1585 case INDEX_op_ext8u_i64:
1586 case INDEX_op_ext16s_i32:
1587 case INDEX_op_ext16s_i64:
1588 case INDEX_op_ext16u_i32:
1589 case INDEX_op_ext16u_i64:
1590 case INDEX_op_ext32s_i64:
1591 case INDEX_op_ext32u_i64:
1592 case INDEX_op_ext_i32_i64:
1593 case INDEX_op_extu_i32_i64:
1594 case INDEX_op_extrl_i64_i32:
1596 g_assert_not_reached();
1600 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1603 case INDEX_op_goto_ptr:
1606 case INDEX_op_st8_i32:
1607 case INDEX_op_st8_i64:
1608 case INDEX_op_st16_i32:
1609 case INDEX_op_st16_i64:
1610 case INDEX_op_st32_i64:
1611 case INDEX_op_st_i32:
1612 case INDEX_op_st_i64:
1613 return C_O0_I2(rZ, r);
1615 case INDEX_op_brcond_i32:
1616 case INDEX_op_brcond_i64:
1617 return C_O0_I2(rZ, rZ);
1619 case INDEX_op_qemu_st_i32:
1620 case INDEX_op_qemu_st_i64:
1621 return C_O0_I2(LZ, L);
1623 case INDEX_op_ext8s_i32:
1624 case INDEX_op_ext8s_i64:
1625 case INDEX_op_ext8u_i32:
1626 case INDEX_op_ext8u_i64:
1627 case INDEX_op_ext16s_i32:
1628 case INDEX_op_ext16s_i64:
1629 case INDEX_op_ext16u_i32:
1630 case INDEX_op_ext16u_i64:
1631 case INDEX_op_ext32s_i64:
1632 case INDEX_op_ext32u_i64:
1633 case INDEX_op_extu_i32_i64:
1634 case INDEX_op_extrl_i64_i32:
1635 case INDEX_op_extrh_i64_i32:
1636 case INDEX_op_ext_i32_i64:
1637 case INDEX_op_not_i32:
1638 case INDEX_op_not_i64:
1639 case INDEX_op_extract_i32:
1640 case INDEX_op_extract_i64:
1641 case INDEX_op_bswap16_i32:
1642 case INDEX_op_bswap16_i64:
1643 case INDEX_op_bswap32_i32:
1644 case INDEX_op_bswap32_i64:
1645 case INDEX_op_bswap64_i64:
1646 case INDEX_op_ld8s_i32:
1647 case INDEX_op_ld8s_i64:
1648 case INDEX_op_ld8u_i32:
1649 case INDEX_op_ld8u_i64:
1650 case INDEX_op_ld16s_i32:
1651 case INDEX_op_ld16s_i64:
1652 case INDEX_op_ld16u_i32:
1653 case INDEX_op_ld16u_i64:
1654 case INDEX_op_ld32s_i64:
1655 case INDEX_op_ld32u_i64:
1656 case INDEX_op_ld_i32:
1657 case INDEX_op_ld_i64:
1658 return C_O1_I1(r, r);
1660 case INDEX_op_qemu_ld_i32:
1661 case INDEX_op_qemu_ld_i64:
1662 return C_O1_I1(r, L);
1664 case INDEX_op_andc_i32:
1665 case INDEX_op_andc_i64:
1666 case INDEX_op_orc_i32:
1667 case INDEX_op_orc_i64:
1669 * LoongArch insns for these ops don't have reg-imm forms, but we
1670 * can express using andi/ori if ~constant satisfies
1673 return C_O1_I2(r, r, rC);
1675 case INDEX_op_shl_i32:
1676 case INDEX_op_shl_i64:
1677 case INDEX_op_shr_i32:
1678 case INDEX_op_shr_i64:
1679 case INDEX_op_sar_i32:
1680 case INDEX_op_sar_i64:
1681 case INDEX_op_rotl_i32:
1682 case INDEX_op_rotl_i64:
1683 case INDEX_op_rotr_i32:
1684 case INDEX_op_rotr_i64:
1685 return C_O1_I2(r, r, ri);
1687 case INDEX_op_add_i32:
1688 return C_O1_I2(r, r, ri);
1689 case INDEX_op_add_i64:
1690 return C_O1_I2(r, r, rJ);
1692 case INDEX_op_and_i32:
1693 case INDEX_op_and_i64:
1694 case INDEX_op_nor_i32:
1695 case INDEX_op_nor_i64:
1696 case INDEX_op_or_i32:
1697 case INDEX_op_or_i64:
1698 case INDEX_op_xor_i32:
1699 case INDEX_op_xor_i64:
1700 /* LoongArch reg-imm bitops have their imms ZERO-extended */
1701 return C_O1_I2(r, r, rU);
1703 case INDEX_op_clz_i32:
1704 case INDEX_op_clz_i64:
1705 case INDEX_op_ctz_i32:
1706 case INDEX_op_ctz_i64:
1707 return C_O1_I2(r, r, rW);
1709 case INDEX_op_deposit_i32:
1710 case INDEX_op_deposit_i64:
1711 /* Must deposit into the same register as input */
1712 return C_O1_I2(r, 0, rZ);
1714 case INDEX_op_sub_i32:
1715 case INDEX_op_setcond_i32:
1716 return C_O1_I2(r, rZ, ri);
1717 case INDEX_op_sub_i64:
1718 case INDEX_op_setcond_i64:
1719 return C_O1_I2(r, rZ, rJ);
1721 case INDEX_op_mul_i32:
1722 case INDEX_op_mul_i64:
1723 case INDEX_op_mulsh_i32:
1724 case INDEX_op_mulsh_i64:
1725 case INDEX_op_muluh_i32:
1726 case INDEX_op_muluh_i64:
1727 case INDEX_op_div_i32:
1728 case INDEX_op_div_i64:
1729 case INDEX_op_divu_i32:
1730 case INDEX_op_divu_i64:
1731 case INDEX_op_rem_i32:
1732 case INDEX_op_rem_i64:
1733 case INDEX_op_remu_i32:
1734 case INDEX_op_remu_i64:
1735 return C_O1_I2(r, rZ, rZ);
1737 case INDEX_op_movcond_i32:
1738 case INDEX_op_movcond_i64:
1739 return C_O1_I4(r, rZ, rJ, rZ, rZ);
1742 g_assert_not_reached();
1746 static const int tcg_target_callee_save_regs[] = {
1747 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
1757 TCG_REG_RA, /* should be last for ABI compliance */
1760 /* Stack frame parameters. */
1761 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
1762 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
1763 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1764 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
1765 + TCG_TARGET_STACK_ALIGN - 1) \
1766 & -TCG_TARGET_STACK_ALIGN)
1767 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
1769 /* We're expecting to be able to use an immediate for frame allocation. */
1770 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
1772 /* Generate global QEMU prologue and epilogue code */
1773 static void tcg_target_qemu_prologue(TCGContext *s)
1777 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
1780 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
1781 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1782 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1783 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1786 #if !defined(CONFIG_SOFTMMU)
1787 if (USE_GUEST_BASE) {
1788 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
1789 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1793 /* Call generated code */
1794 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1795 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
1797 /* Return path for goto_ptr. Set return value to 0 */
1798 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1799 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
1802 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
1803 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1804 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1805 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1808 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
1809 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
1812 static void tcg_target_init(TCGContext *s)
1814 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1815 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1817 tcg_target_call_clobber_regs = ALL_GENERAL_REGS;
1818 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
1819 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
1820 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
1821 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
1822 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
1823 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
1824 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
1825 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
1826 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
1827 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
1829 s->reserved_regs = 0;
1830 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
1831 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
1832 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
1833 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
1834 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
1835 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
1836 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
1841 uint8_t fde_def_cfa[4];
1842 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1845 #define ELF_HOST_MACHINE EM_LOONGARCH
1847 static const DebugFrame debug_frame = {
1848 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
1851 .h.cie.code_align = 1,
1852 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
1853 .h.cie.return_column = TCG_REG_RA,
1855 /* Total FDE size does not include the "len" member. */
1856 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1859 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
1860 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
1864 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */
1865 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */
1866 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */
1867 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */
1868 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */
1869 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */
1870 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */
1871 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */
1872 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */
1873 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */
1874 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
1878 void tcg_register_jit(const void *buf, size_t buf_size)
1880 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));