2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* We only support generating code for 64-bit mode. */
27 #error "unsupported code generation mode"
30 #include "../tcg-pool.c.inc"
32 #ifdef CONFIG_DEBUG_TCG
33 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
69 #define TCG_CT_CONST_S11 0x100
70 #define TCG_CT_CONST_S13 0x200
71 #define TCG_CT_CONST_ZERO 0x400
74 * For softmmu, we need to avoid conflicts with the first 3
75 * argument registers to perform the tlb lookup, and to call
76 * the helper function.
79 #define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
81 #define SOFTMMU_RESERVE_REGS 0
83 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
84 #define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
86 /* Define some temporary registers. T2 is used for constant generation. */
87 #define TCG_REG_T1 TCG_REG_G1
88 #define TCG_REG_T2 TCG_REG_O7
90 #ifndef CONFIG_SOFTMMU
91 # define TCG_GUEST_BASE_REG TCG_REG_I5
94 #define TCG_REG_TB TCG_REG_I1
96 static const int tcg_target_reg_alloc_order[] = {
126 static const int tcg_target_call_iarg_regs[6] = {
135 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
137 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
138 tcg_debug_assert(slot >= 0 && slot <= 3);
139 return TCG_REG_O0 + slot;
142 #define INSN_OP(x) ((x) << 30)
143 #define INSN_OP2(x) ((x) << 22)
144 #define INSN_OP3(x) ((x) << 19)
145 #define INSN_OPF(x) ((x) << 5)
146 #define INSN_RD(x) ((x) << 25)
147 #define INSN_RS1(x) ((x) << 14)
148 #define INSN_RS2(x) (x)
149 #define INSN_ASI(x) ((x) << 5)
151 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
152 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
153 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
154 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
155 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
156 #define INSN_COND(x) ((x) << 25)
174 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
183 #define MOVCC_ICC (1 << 18)
184 #define MOVCC_XCC (1 << 18 | 1 << 12)
187 #define BPCC_XCC (2 << 20)
188 #define BPCC_PT (1 << 19)
190 #define BPCC_A (1 << 29)
192 #define BPR_PT BPCC_PT
194 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
195 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
196 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
197 #define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
198 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
199 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
200 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
201 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
202 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
203 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
204 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
205 #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
206 #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
207 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
208 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
209 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
210 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
211 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
212 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
213 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
214 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
215 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
217 #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
218 #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
220 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
221 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
222 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
224 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
225 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
226 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
228 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
229 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
230 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
231 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
232 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
233 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
234 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
235 #define CALL INSN_OP(1)
236 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
237 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
238 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
239 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
240 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
241 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
242 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
243 #define STB (INSN_OP(3) | INSN_OP3(0x05))
244 #define STH (INSN_OP(3) | INSN_OP3(0x06))
245 #define STW (INSN_OP(3) | INSN_OP3(0x04))
246 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
247 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
248 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
249 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
250 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
251 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
252 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
253 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
254 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
255 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
256 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
257 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
259 #define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
261 #define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
263 #ifndef ASI_PRIMARY_LITTLE
264 #define ASI_PRIMARY_LITTLE 0x88
267 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
268 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
269 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
270 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
273 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
274 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
275 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
277 #ifndef use_vis3_instructions
278 bool use_vis3_instructions;
281 static bool check_fit_i64(int64_t val, unsigned int bits)
283 return val == sextract64(val, 0, bits);
286 static bool check_fit_i32(int32_t val, unsigned int bits)
288 return val == sextract32(val, 0, bits);
291 #define check_fit_tl check_fit_i64
292 #define check_fit_ptr check_fit_i64
294 static bool patch_reloc(tcg_insn_unit *src_rw, int type,
295 intptr_t value, intptr_t addend)
297 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298 uint32_t insn = *src_rw;
302 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
305 case R_SPARC_WDISP16:
306 if (!check_fit_ptr(pcrel >> 2, 16)) {
309 insn &= ~INSN_OFF16(-1);
310 insn |= INSN_OFF16(pcrel);
312 case R_SPARC_WDISP19:
313 if (!check_fit_ptr(pcrel >> 2, 19)) {
316 insn &= ~INSN_OFF19(-1);
317 insn |= INSN_OFF19(pcrel);
320 if (!check_fit_ptr(value, 13)) {
323 insn &= ~INSN_IMM13(-1);
324 insn |= INSN_IMM13(value);
327 g_assert_not_reached();
334 /* test if a constant matches the constraint */
335 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
337 if (ct & TCG_CT_CONST) {
341 if (type == TCG_TYPE_I32) {
345 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
347 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
349 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
356 static void tcg_out_nop(TCGContext *s)
361 static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
364 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
367 static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
368 int32_t offset, int op)
370 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
373 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
374 int32_t val2, int val2const, int op)
376 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
377 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
380 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
383 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
388 static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
391 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
397 static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
399 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
402 static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
404 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
407 static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
409 if (check_fit_i32(arg, 13)) {
410 /* A 13-bit constant sign-extended to 64-bits. */
411 tcg_out_movi_imm13(s, ret, arg);
413 /* A 32-bit constant zero-extended to 64 bits. */
414 tcg_out_sethi(s, ret, arg);
416 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
421 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
422 tcg_target_long arg, bool in_prologue,
425 tcg_target_long hi, lo = (int32_t)arg;
426 tcg_target_long test, lsb;
428 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
429 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
430 tcg_out_movi_imm32(s, ret, arg);
434 /* A 13-bit constant sign-extended to 64-bits. */
435 if (check_fit_tl(arg, 13)) {
436 tcg_out_movi_imm13(s, ret, arg);
440 /* A 13-bit constant relative to the TB. */
442 test = tcg_tbrel_diff(s, (void *)arg);
443 if (check_fit_ptr(test, 13)) {
444 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
449 /* A 32-bit constant sign-extended to 64-bits. */
451 tcg_out_sethi(s, ret, ~arg);
452 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
456 /* A 32-bit constant, shifted. */
458 test = (tcg_target_long)arg >> lsb;
459 if (lsb > 10 && test == extract64(test, 0, 21)) {
460 tcg_out_sethi(s, ret, test << 10);
461 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
463 } else if (test == (uint32_t)test || test == (int32_t)test) {
464 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
465 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
469 /* Use the constant pool, if possible. */
471 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
472 tcg_tbrel_diff(s, NULL));
473 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
477 /* A 64-bit constant decomposed into 2 32-bit pieces. */
478 if (check_fit_i32(lo, 13)) {
479 hi = (arg - lo) >> 32;
480 tcg_out_movi_imm32(s, ret, hi);
481 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
482 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
485 tcg_out_movi_imm32(s, ret, hi);
486 tcg_out_movi_imm32(s, scratch, lo);
487 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
488 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
492 static void tcg_out_movi(TCGContext *s, TCGType type,
493 TCGReg ret, tcg_target_long arg)
495 tcg_debug_assert(ret != TCG_REG_T2);
496 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
499 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
501 g_assert_not_reached();
504 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
506 g_assert_not_reached();
509 static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
511 tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
514 static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
516 tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL);
517 tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
520 static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
522 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
525 static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
527 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
530 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
532 tcg_out_ext32s(s, rd, rs);
535 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
538 /* This function is only used for passing structs by reference. */
539 g_assert_not_reached();
542 static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
545 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
548 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
549 intptr_t offset, int op)
551 if (check_fit_ptr(offset, 13)) {
552 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
555 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
556 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
560 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
561 TCGReg arg1, intptr_t arg2)
563 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
566 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
567 TCGReg arg1, intptr_t arg2)
569 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
572 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
573 TCGReg base, intptr_t ofs)
576 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
582 static void tcg_out_sety(TCGContext *s, TCGReg rs)
584 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
587 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
588 int32_t val2, int val2const, int uns)
590 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
592 tcg_out_sety(s, TCG_REG_G0);
594 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
595 tcg_out_sety(s, TCG_REG_T1);
598 tcg_out_arithc(s, rd, rs1, val2, val2const,
599 uns ? ARITH_UDIV : ARITH_SDIV);
602 static const uint8_t tcg_cond_to_bcond[] = {
603 [TCG_COND_EQ] = COND_E,
604 [TCG_COND_NE] = COND_NE,
605 [TCG_COND_LT] = COND_L,
606 [TCG_COND_GE] = COND_GE,
607 [TCG_COND_LE] = COND_LE,
608 [TCG_COND_GT] = COND_G,
609 [TCG_COND_LTU] = COND_CS,
610 [TCG_COND_GEU] = COND_CC,
611 [TCG_COND_LEU] = COND_LEU,
612 [TCG_COND_GTU] = COND_GU,
615 static const uint8_t tcg_cond_to_rcond[] = {
616 [TCG_COND_EQ] = RCOND_Z,
617 [TCG_COND_NE] = RCOND_NZ,
618 [TCG_COND_LT] = RCOND_LZ,
619 [TCG_COND_GT] = RCOND_GZ,
620 [TCG_COND_LE] = RCOND_LEZ,
621 [TCG_COND_GE] = RCOND_GEZ
624 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
626 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
629 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
634 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
636 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
638 tcg_out_bpcc0(s, scond, flags, off19);
641 static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
643 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
646 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
647 int32_t arg2, int const_arg2, TCGLabel *l)
649 tcg_out_cmp(s, arg1, arg2, const_arg2);
650 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
654 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
655 int32_t v1, int v1const)
657 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
658 | INSN_RS1(tcg_cond_to_bcond[cond])
659 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
662 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
663 TCGReg c1, int32_t c2, int c2const,
664 int32_t v1, int v1const)
666 tcg_out_cmp(s, c1, c2, c2const);
667 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
670 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
671 int32_t arg2, int const_arg2, TCGLabel *l)
673 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
674 if (arg2 == 0 && !is_unsigned_cond(cond)) {
678 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
680 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
682 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
683 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
685 tcg_out_cmp(s, arg1, arg2, const_arg2);
686 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
691 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
692 int32_t v1, int v1const)
694 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
695 | (tcg_cond_to_rcond[cond] << 10)
696 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
699 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
700 TCGReg c1, int32_t c2, int c2const,
701 int32_t v1, int v1const)
703 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
704 Note that the immediate range is one bit smaller, so we must check
706 if (c2 == 0 && !is_unsigned_cond(cond)
707 && (!v1const || check_fit_i32(v1, 10))) {
708 tcg_out_movr(s, cond, ret, c1, v1, v1const);
710 tcg_out_cmp(s, c1, c2, c2const);
711 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
715 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
716 TCGReg c1, int32_t c2, int c2const)
718 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
722 /* The result of the comparison is in the carry bit. */
727 /* For equality, we can transform to inequality vs zero. */
729 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
734 c1 = TCG_REG_G0, c2const = 0;
735 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
740 /* If we don't need to load a constant into a register, we can
741 swap the operands on GTU/LEU. There's no benefit to loading
742 the constant into a temporary register. */
743 if (!c2const || c2 == 0) {
748 cond = tcg_swap_cond(cond);
754 tcg_out_cmp(s, c1, c2, c2const);
755 tcg_out_movi_imm13(s, ret, 0);
756 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
760 tcg_out_cmp(s, c1, c2, c2const);
761 if (cond == TCG_COND_LTU) {
762 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
764 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
768 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
769 TCGReg c1, int32_t c2, int c2const)
771 if (use_vis3_instructions) {
777 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
780 tcg_out_cmp(s, c1, c2, c2const);
781 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
788 /* For 64-bit signed comparisons vs zero, we can avoid the compare
789 if the input does not overlap the output. */
790 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
791 tcg_out_movi_imm13(s, ret, 0);
792 tcg_out_movr(s, cond, ret, c1, 1, 1);
794 tcg_out_cmp(s, c1, c2, c2const);
795 tcg_out_movi_imm13(s, ret, 0);
796 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
800 static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
801 TCGReg al, TCGReg ah, int32_t bl, int blconst,
802 int32_t bh, int bhconst, int opl, int oph)
804 TCGReg tmp = TCG_REG_T1;
806 /* Note that the low parts are fully consumed before tmp is set. */
807 if (rl != ah && (bhconst || rl != bh)) {
811 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
812 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
813 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
816 static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
817 TCGReg al, TCGReg ah, int32_t bl, int blconst,
818 int32_t bh, int bhconst, bool is_sub)
820 TCGReg tmp = TCG_REG_T1;
822 /* Note that the low parts are fully consumed before tmp is set. */
823 if (rl != ah && (bhconst || rl != bh)) {
827 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
829 if (use_vis3_instructions && !is_sub) {
830 /* Note that ADDXC doesn't accept immediates. */
831 if (bhconst && bh != 0) {
832 tcg_out_movi_imm13(s, TCG_REG_T2, bh);
835 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
836 } else if (bh == TCG_REG_G0) {
837 /* If we have a zero, we can perform the operation in two insns,
838 with the arithmetic first, and a conditional move into place. */
840 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
841 is_sub ? ARITH_SUB : ARITH_ADD);
842 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
844 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
845 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
849 * Otherwise adjust BH as if there is carry into T2.
850 * Note that constant BH is constrained to 11 bits for the MOVCC,
851 * so the adjustment fits 12 bits.
854 tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
856 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
857 is_sub ? ARITH_SUB : ARITH_ADD);
859 /* ... smoosh T2 back to original BH if carry is clear ... */
860 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
861 /* ... and finally perform the arithmetic with the new operand. */
862 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
865 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
868 static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
869 bool in_prologue, bool tail_call)
871 uintptr_t desti = (uintptr_t)dest;
873 /* Be careful not to clobber %o7 for a tail call. */
874 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
875 desti & ~0xfff, in_prologue,
876 tail_call ? TCG_REG_G2 : TCG_REG_O7);
877 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
878 TCG_REG_T1, desti & 0xfff, JMPL);
881 static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
884 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
886 if (disp == (int32_t)disp) {
887 tcg_out32(s, CALL | (uint32_t)disp >> 2);
889 tcg_out_jmpl_const(s, dest, in_prologue, false);
893 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
894 const TCGHelperInfo *info)
896 tcg_out_call_nodelay(s, dest, false);
900 static void tcg_out_mb(TCGContext *s, TCGArg a0)
902 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
903 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
906 #ifdef CONFIG_SOFTMMU
907 static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
908 static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
910 static void emit_extend(TCGContext *s, TCGReg r, int op)
912 /* Emit zero extend of 8, 16 or 32 bit data as
913 * required by the MO_* value op; do nothing for 64 bit.
915 switch (op & MO_SIZE) {
917 tcg_out_ext8u(s, r, r);
920 tcg_out_ext16u(s, r, r);
923 tcg_out_ext32u(s, r, r);
930 static void build_trampolines(TCGContext *s)
932 static void * const qemu_ld_helpers[] = {
933 [MO_UB] = helper_ret_ldub_mmu,
934 [MO_SB] = helper_ret_ldsb_mmu,
935 [MO_LEUW] = helper_le_lduw_mmu,
936 [MO_LESW] = helper_le_ldsw_mmu,
937 [MO_LEUL] = helper_le_ldul_mmu,
938 [MO_LEUQ] = helper_le_ldq_mmu,
939 [MO_BEUW] = helper_be_lduw_mmu,
940 [MO_BESW] = helper_be_ldsw_mmu,
941 [MO_BEUL] = helper_be_ldul_mmu,
942 [MO_BEUQ] = helper_be_ldq_mmu,
944 static void * const qemu_st_helpers[] = {
945 [MO_UB] = helper_ret_stb_mmu,
946 [MO_LEUW] = helper_le_stw_mmu,
947 [MO_LEUL] = helper_le_stl_mmu,
948 [MO_LEUQ] = helper_le_stq_mmu,
949 [MO_BEUW] = helper_be_stw_mmu,
950 [MO_BEUL] = helper_be_stl_mmu,
951 [MO_BEUQ] = helper_be_stq_mmu,
956 for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
957 if (qemu_ld_helpers[i] == NULL) {
961 /* May as well align the trampoline. */
962 while ((uintptr_t)s->code_ptr & 15) {
965 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
967 /* Set the retaddr operand. */
968 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
970 tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
971 /* delay slot -- set the env argument */
972 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
975 for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
976 if (qemu_st_helpers[i] == NULL) {
980 /* May as well align the trampoline. */
981 while ((uintptr_t)s->code_ptr & 15) {
984 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
986 emit_extend(s, TCG_REG_O2, i);
988 /* Set the retaddr operand. */
989 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
992 tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
993 /* delay slot -- set the env argument */
994 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
998 static const tcg_insn_unit *qemu_unalign_ld_trampoline;
999 static const tcg_insn_unit *qemu_unalign_st_trampoline;
1001 static void build_trampolines(TCGContext *s)
1003 for (int ld = 0; ld < 2; ++ld) {
1006 while ((uintptr_t)s->code_ptr & 15) {
1011 helper = helper_unaligned_ld;
1012 qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
1014 helper = helper_unaligned_st;
1015 qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
1019 tcg_out_jmpl_const(s, helper, true, true);
1020 /* delay slot -- set the env argument */
1021 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
1026 /* Generate global QEMU prologue and epilogue code */
1027 static void tcg_target_qemu_prologue(TCGContext *s)
1029 int tmp_buf_size, frame_size;
1032 * The TCG temp buffer is at the top of the frame, immediately
1033 * below the frame pointer. Use the logical (aligned) offset here;
1034 * the stack bias is applied in temp_allocate_frame().
1036 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
1037 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
1040 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1041 * otherwise the minimal frame usable by callees.
1043 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1044 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1045 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1046 frame_size &= -TCG_TARGET_STACK_ALIGN;
1047 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1048 INSN_IMM13(-frame_size));
1050 #ifndef CONFIG_SOFTMMU
1051 if (guest_base != 0) {
1052 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1053 guest_base, true, TCG_REG_T1);
1054 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1058 /* We choose TCG_REG_TB such that no move is required. */
1059 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1060 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1062 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1066 /* Epilogue for goto_ptr. */
1067 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1068 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1070 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1072 build_trampolines(s);
1075 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1078 for (i = 0; i < count; ++i) {
1083 #if defined(CONFIG_SOFTMMU)
1085 /* We expect to use a 13-bit negative offset from ENV. */
1086 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1087 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1089 /* Perform the TLB load and compare.
1092 ADDRLO and ADDRHI contain the possible two parts of the address.
1094 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1096 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1097 This should be offsetof addr_read or addr_write.
1099 The result of the TLB comparison is in %[ix]cc. The sanitized address
1100 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1102 static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1103 MemOp opc, int which)
1105 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1106 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1107 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1108 const TCGReg r0 = TCG_REG_O0;
1109 const TCGReg r1 = TCG_REG_O1;
1110 const TCGReg r2 = TCG_REG_O2;
1111 unsigned s_bits = opc & MO_SIZE;
1112 unsigned a_bits = get_alignment_bits(opc);
1113 tcg_target_long compare_mask;
1115 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1116 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1117 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
1119 /* Extract the page index, shifted into place for tlb index. */
1120 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1122 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1124 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1125 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1127 /* Load the tlb comparator and the addend. */
1128 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1129 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1131 /* Mask out the page offset, except for the required alignment.
1132 We don't support unaligned accesses. */
1133 if (a_bits < s_bits) {
1136 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1137 if (check_fit_tl(compare_mask, 13)) {
1138 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1140 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1141 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1143 tcg_out_cmp(s, r0, r2, 0);
1145 /* If the guest address must be zero-extended, do so now. */
1146 if (TARGET_LONG_BITS == 32) {
1147 tcg_out_ext32u(s, r0, addr);
1152 #endif /* CONFIG_SOFTMMU */
1154 static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1157 [MO_UB | MO_LE] = LDUB,
1158 [MO_SB | MO_LE] = LDSB,
1167 [MO_LEUW] = LDUH_LE,
1168 [MO_LESW] = LDSH_LE,
1169 [MO_LEUL] = LDUW_LE,
1170 [MO_LESL] = LDSW_LE,
1175 static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1187 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1188 MemOpIdx oi, bool is_64)
1190 MemOp memop = get_memop(oi);
1191 tcg_insn_unit *label_ptr;
1193 #ifdef CONFIG_SOFTMMU
1194 unsigned memi = get_mmuidx(oi);
1196 const tcg_insn_unit *func;
1198 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1199 offsetof(CPUTLBEntry, addr_read));
1201 /* The fast path is exactly one insn. Thus we can perform the
1202 entire TLB Hit in the (annulled) delay slot of the branch
1203 over the TLB Miss case. */
1205 /* beq,a,pt %[xi]cc, label0 */
1206 label_ptr = s->code_ptr;
1207 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1208 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1210 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1211 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1215 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1217 /* We use the helpers to extend SB and SW data, leaving the case
1218 of SL needing explicit extending below. */
1219 if ((memop & MO_SSIZE) == MO_SL) {
1220 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1222 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1224 tcg_debug_assert(func != NULL);
1225 tcg_out_call_nodelay(s, func, false);
1227 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
1229 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1230 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1231 tcg_out_ext32s(s, data, TCG_REG_O0);
1233 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1236 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1238 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1239 unsigned a_bits = get_alignment_bits(memop);
1240 unsigned s_bits = memop & MO_SIZE;
1243 if (TARGET_LONG_BITS == 32) {
1244 tcg_out_ext32u(s, TCG_REG_T1, addr);
1249 * Normal case: alignment equal to access size.
1251 if (a_bits == s_bits) {
1252 tcg_out_ldst_rr(s, data, addr, index,
1253 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1258 * Test for at least natural alignment, and assume most accesses
1259 * will be aligned -- perform a straight load in the delay slot.
1260 * This is required to preserve atomicity for aligned accesses.
1262 t_bits = MAX(a_bits, s_bits);
1263 tcg_debug_assert(t_bits < 13);
1264 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1266 /* beq,a,pt %icc, label */
1267 label_ptr = s->code_ptr;
1268 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1270 tcg_out_ldst_rr(s, data, addr, index,
1271 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1273 if (a_bits >= s_bits) {
1275 * Overalignment: A successful alignment test will perform the memory
1276 * operation in the delay slot, and failure need only invoke the
1277 * handler for SIGBUS.
1279 tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
1280 /* delay slot -- move to low part of argument reg */
1281 tcg_out_mov_delay(s, TCG_REG_O1, addr);
1283 /* Underalignment: load by pieces of minimum alignment. */
1284 int ld_opc, a_size, s_size, i;
1287 * Force full address into T1 early; avoids problems with
1288 * overlap between @addr and @data.
1290 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1292 a_size = 1 << a_bits;
1293 s_size = 1 << s_bits;
1294 if ((memop & MO_BSWAP) == MO_BE) {
1295 ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
1296 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1297 ld_opc = qemu_ld_opc[a_bits | MO_BE];
1298 for (i = a_size; i < s_size; i += a_size) {
1299 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1300 tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
1301 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1303 } else if (a_bits == 0) {
1305 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1306 for (i = a_size; i < s_size; i += a_size) {
1307 if ((memop & MO_SIGN) && i == s_size - a_size) {
1310 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1311 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1312 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1315 ld_opc = qemu_ld_opc[a_bits | MO_LE];
1316 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
1317 for (i = a_size; i < s_size; i += a_size) {
1318 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1319 if ((memop & MO_SIGN) && i == s_size - a_size) {
1320 ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
1322 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
1323 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1324 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1329 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1330 #endif /* CONFIG_SOFTMMU */
1333 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1336 MemOp memop = get_memop(oi);
1337 tcg_insn_unit *label_ptr;
1339 #ifdef CONFIG_SOFTMMU
1340 unsigned memi = get_mmuidx(oi);
1342 const tcg_insn_unit *func;
1344 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1345 offsetof(CPUTLBEntry, addr_write));
1347 /* The fast path is exactly one insn. Thus we can perform the entire
1348 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1349 /* beq,a,pt %[xi]cc, label0 */
1350 label_ptr = s->code_ptr;
1351 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1352 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1354 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1355 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1359 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1360 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data);
1362 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1363 tcg_debug_assert(func != NULL);
1364 tcg_out_call_nodelay(s, func, false);
1366 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
1368 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1370 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1371 unsigned a_bits = get_alignment_bits(memop);
1372 unsigned s_bits = memop & MO_SIZE;
1375 if (TARGET_LONG_BITS == 32) {
1376 tcg_out_ext32u(s, TCG_REG_T1, addr);
1381 * Normal case: alignment equal to access size.
1383 if (a_bits == s_bits) {
1384 tcg_out_ldst_rr(s, data, addr, index,
1385 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1390 * Test for at least natural alignment, and assume most accesses
1391 * will be aligned -- perform a straight store in the delay slot.
1392 * This is required to preserve atomicity for aligned accesses.
1394 t_bits = MAX(a_bits, s_bits);
1395 tcg_debug_assert(t_bits < 13);
1396 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1398 /* beq,a,pt %icc, label */
1399 label_ptr = s->code_ptr;
1400 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1402 tcg_out_ldst_rr(s, data, addr, index,
1403 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1405 if (a_bits >= s_bits) {
1407 * Overalignment: A successful alignment test will perform the memory
1408 * operation in the delay slot, and failure need only invoke the
1409 * handler for SIGBUS.
1411 tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
1412 /* delay slot -- move to low part of argument reg */
1413 tcg_out_mov_delay(s, TCG_REG_O1, addr);
1415 /* Underalignment: store by pieces of minimum alignment. */
1416 int st_opc, a_size, s_size, i;
1419 * Force full address into T1 early; avoids problems with
1420 * overlap between @addr and @data.
1422 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1424 a_size = 1 << a_bits;
1425 s_size = 1 << s_bits;
1426 if ((memop & MO_BSWAP) == MO_BE) {
1427 st_opc = qemu_st_opc[a_bits | MO_BE];
1428 for (i = 0; i < s_size; i += a_size) {
1430 int shift = (s_size - a_size - i) * 8;
1433 tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
1435 tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
1437 } else if (a_bits == 0) {
1438 tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
1439 for (i = 1; i < s_size; i++) {
1440 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1441 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
1444 /* Note that ST*A with immediate asi must use indexed address. */
1445 st_opc = qemu_st_opc[a_bits + MO_LE];
1446 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
1447 for (i = a_size; i < s_size; i += a_size) {
1448 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1449 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1450 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
1455 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1456 #endif /* CONFIG_SOFTMMU */
1459 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1461 if (check_fit_ptr(a0, 13)) {
1462 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1463 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1466 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1467 if (check_fit_ptr(tb_diff, 13)) {
1468 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1469 /* Note that TCG_REG_TB has been unwound to O1. */
1470 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1474 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1475 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1476 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1479 static void tcg_out_goto_tb(TCGContext *s, int which)
1481 ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1483 /* Load link and indirect branch. */
1484 set_jmp_insn_offset(s, which);
1485 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1486 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1489 set_jmp_reset_offset(s, which);
1492 * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1493 * to the beginning of this TB.
1495 off = -tcg_current_code_size(s);
1496 if (check_fit_i32(off, 13)) {
1497 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1499 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1500 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1504 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1505 uintptr_t jmp_rx, uintptr_t jmp_rw)
1509 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1510 const TCGArg args[TCG_MAX_OP_ARGS],
1511 const int const_args[TCG_MAX_OP_ARGS])
1516 /* Hoist the loads of the most common arguments. */
1523 case INDEX_op_goto_ptr:
1524 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1525 tcg_out_mov_delay(s, TCG_REG_TB, a0);
1528 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1532 #define OP_32_64(x) \
1533 glue(glue(case INDEX_op_, x), _i32): \
1534 glue(glue(case INDEX_op_, x), _i64)
1537 tcg_out_ldst(s, a0, a1, a2, LDUB);
1540 tcg_out_ldst(s, a0, a1, a2, LDSB);
1543 tcg_out_ldst(s, a0, a1, a2, LDUH);
1546 tcg_out_ldst(s, a0, a1, a2, LDSH);
1548 case INDEX_op_ld_i32:
1549 case INDEX_op_ld32u_i64:
1550 tcg_out_ldst(s, a0, a1, a2, LDUW);
1553 tcg_out_ldst(s, a0, a1, a2, STB);
1556 tcg_out_ldst(s, a0, a1, a2, STH);
1558 case INDEX_op_st_i32:
1559 case INDEX_op_st32_i64:
1560 tcg_out_ldst(s, a0, a1, a2, STW);
1583 case INDEX_op_shl_i32:
1586 /* Limit immediate shift count lest we create an illegal insn. */
1587 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1589 case INDEX_op_shr_i32:
1592 case INDEX_op_sar_i32:
1595 case INDEX_op_mul_i32:
1606 case INDEX_op_div_i32:
1607 tcg_out_div32(s, a0, a1, a2, c2, 0);
1609 case INDEX_op_divu_i32:
1610 tcg_out_div32(s, a0, a1, a2, c2, 1);
1613 case INDEX_op_brcond_i32:
1614 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1616 case INDEX_op_setcond_i32:
1617 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1619 case INDEX_op_movcond_i32:
1620 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1623 case INDEX_op_add2_i32:
1624 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1625 args[4], const_args[4], args[5], const_args[5],
1626 ARITH_ADDCC, ARITH_ADDC);
1628 case INDEX_op_sub2_i32:
1629 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1630 args[4], const_args[4], args[5], const_args[5],
1631 ARITH_SUBCC, ARITH_SUBC);
1633 case INDEX_op_mulu2_i32:
1636 case INDEX_op_muls2_i32:
1639 /* The 32-bit multiply insns produce a full 64-bit result. */
1640 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1641 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1644 case INDEX_op_qemu_ld_i32:
1645 tcg_out_qemu_ld(s, a0, a1, a2, false);
1647 case INDEX_op_qemu_ld_i64:
1648 tcg_out_qemu_ld(s, a0, a1, a2, true);
1650 case INDEX_op_qemu_st_i32:
1651 case INDEX_op_qemu_st_i64:
1652 tcg_out_qemu_st(s, a0, a1, a2);
1655 case INDEX_op_ld32s_i64:
1656 tcg_out_ldst(s, a0, a1, a2, LDSW);
1658 case INDEX_op_ld_i64:
1659 tcg_out_ldst(s, a0, a1, a2, LDX);
1661 case INDEX_op_st_i64:
1662 tcg_out_ldst(s, a0, a1, a2, STX);
1664 case INDEX_op_shl_i64:
1667 /* Limit immediate shift count lest we create an illegal insn. */
1668 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1670 case INDEX_op_shr_i64:
1673 case INDEX_op_sar_i64:
1676 case INDEX_op_mul_i64:
1679 case INDEX_op_div_i64:
1682 case INDEX_op_divu_i64:
1685 case INDEX_op_extu_i32_i64:
1686 tcg_out_ext32u(s, a0, a1);
1688 case INDEX_op_extrl_i64_i32:
1689 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1691 case INDEX_op_extrh_i64_i32:
1692 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1695 case INDEX_op_brcond_i64:
1696 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1698 case INDEX_op_setcond_i64:
1699 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1701 case INDEX_op_movcond_i64:
1702 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1704 case INDEX_op_add2_i64:
1705 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1706 const_args[4], args[5], const_args[5], false);
1708 case INDEX_op_sub2_i64:
1709 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1710 const_args[4], args[5], const_args[5], true);
1712 case INDEX_op_muluh_i64:
1713 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1717 tcg_out_arithc(s, a0, a1, a2, c2, c);
1721 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1728 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1729 case INDEX_op_mov_i64:
1730 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1731 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1732 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1733 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1734 case INDEX_op_ext8s_i64:
1735 case INDEX_op_ext8u_i32:
1736 case INDEX_op_ext8u_i64:
1737 case INDEX_op_ext16s_i32:
1738 case INDEX_op_ext16s_i64:
1739 case INDEX_op_ext16u_i32:
1740 case INDEX_op_ext16u_i64:
1741 case INDEX_op_ext32s_i64:
1742 case INDEX_op_ext32u_i64:
1743 case INDEX_op_ext_i32_i64:
1745 g_assert_not_reached();
1749 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1752 case INDEX_op_goto_ptr:
1755 case INDEX_op_ld8u_i32:
1756 case INDEX_op_ld8u_i64:
1757 case INDEX_op_ld8s_i32:
1758 case INDEX_op_ld8s_i64:
1759 case INDEX_op_ld16u_i32:
1760 case INDEX_op_ld16u_i64:
1761 case INDEX_op_ld16s_i32:
1762 case INDEX_op_ld16s_i64:
1763 case INDEX_op_ld_i32:
1764 case INDEX_op_ld32u_i64:
1765 case INDEX_op_ld32s_i64:
1766 case INDEX_op_ld_i64:
1767 case INDEX_op_neg_i32:
1768 case INDEX_op_neg_i64:
1769 case INDEX_op_not_i32:
1770 case INDEX_op_not_i64:
1771 case INDEX_op_ext32s_i64:
1772 case INDEX_op_ext32u_i64:
1773 case INDEX_op_ext_i32_i64:
1774 case INDEX_op_extu_i32_i64:
1775 case INDEX_op_extrl_i64_i32:
1776 case INDEX_op_extrh_i64_i32:
1777 return C_O1_I1(r, r);
1779 case INDEX_op_st8_i32:
1780 case INDEX_op_st8_i64:
1781 case INDEX_op_st16_i32:
1782 case INDEX_op_st16_i64:
1783 case INDEX_op_st_i32:
1784 case INDEX_op_st32_i64:
1785 case INDEX_op_st_i64:
1786 return C_O0_I2(rZ, r);
1788 case INDEX_op_add_i32:
1789 case INDEX_op_add_i64:
1790 case INDEX_op_mul_i32:
1791 case INDEX_op_mul_i64:
1792 case INDEX_op_div_i32:
1793 case INDEX_op_div_i64:
1794 case INDEX_op_divu_i32:
1795 case INDEX_op_divu_i64:
1796 case INDEX_op_sub_i32:
1797 case INDEX_op_sub_i64:
1798 case INDEX_op_and_i32:
1799 case INDEX_op_and_i64:
1800 case INDEX_op_andc_i32:
1801 case INDEX_op_andc_i64:
1802 case INDEX_op_or_i32:
1803 case INDEX_op_or_i64:
1804 case INDEX_op_orc_i32:
1805 case INDEX_op_orc_i64:
1806 case INDEX_op_xor_i32:
1807 case INDEX_op_xor_i64:
1808 case INDEX_op_shl_i32:
1809 case INDEX_op_shl_i64:
1810 case INDEX_op_shr_i32:
1811 case INDEX_op_shr_i64:
1812 case INDEX_op_sar_i32:
1813 case INDEX_op_sar_i64:
1814 case INDEX_op_setcond_i32:
1815 case INDEX_op_setcond_i64:
1816 return C_O1_I2(r, rZ, rJ);
1818 case INDEX_op_brcond_i32:
1819 case INDEX_op_brcond_i64:
1820 return C_O0_I2(rZ, rJ);
1821 case INDEX_op_movcond_i32:
1822 case INDEX_op_movcond_i64:
1823 return C_O1_I4(r, rZ, rJ, rI, 0);
1824 case INDEX_op_add2_i32:
1825 case INDEX_op_add2_i64:
1826 case INDEX_op_sub2_i32:
1827 case INDEX_op_sub2_i64:
1828 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1829 case INDEX_op_mulu2_i32:
1830 case INDEX_op_muls2_i32:
1831 return C_O2_I2(r, r, rZ, rJ);
1832 case INDEX_op_muluh_i64:
1833 return C_O1_I2(r, r, r);
1835 case INDEX_op_qemu_ld_i32:
1836 case INDEX_op_qemu_ld_i64:
1837 return C_O1_I1(r, s);
1838 case INDEX_op_qemu_st_i32:
1839 case INDEX_op_qemu_st_i64:
1840 return C_O0_I2(sZ, s);
1843 g_assert_not_reached();
1847 static void tcg_target_init(TCGContext *s)
1850 * Only probe for the platform and capabilities if we haven't already
1851 * determined maximum values at compile time.
1853 #ifndef use_vis3_instructions
1855 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1856 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1860 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1861 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1863 tcg_target_call_clobber_regs = 0;
1864 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1865 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1866 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1867 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1868 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1869 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1870 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1871 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1872 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1873 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1874 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1875 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1876 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1877 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1878 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1880 s->reserved_regs = 0;
1881 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1882 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1883 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1884 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1885 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1886 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1887 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1888 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1891 #define ELF_HOST_MACHINE EM_SPARCV9
1895 uint8_t fde_def_cfa[4];
1896 uint8_t fde_win_save;
1897 uint8_t fde_ret_save[3];
1900 static const DebugFrame debug_frame = {
1901 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1904 .h.cie.code_align = 1,
1905 .h.cie.data_align = -sizeof(void *) & 0x7f,
1906 .h.cie.return_column = 15, /* o7 */
1908 /* Total FDE size does not include the "len" member. */
1909 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1912 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1913 (2047 & 0x7f) | 0x80, (2047 >> 7)
1915 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1916 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1919 void tcg_register_jit(const void *buf, size_t buf_size)
1921 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));