OSDN Git Service

crypto: Always initialize splitkeylen
[qmiga/qemu.git] / tcg / tcg-op-ldst.c
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24
25 #include "qemu/osdep.h"
26 #include "tcg/tcg.h"
27 #include "tcg/tcg-temp-internal.h"
28 #include "tcg/tcg-op-common.h"
29 #include "tcg/tcg-mo.h"
30 #include "exec/translation-block.h"
31 #include "exec/plugin-gen.h"
32 #include "tcg-internal.h"
33
34
35 static void check_max_alignment(unsigned a_bits)
36 {
37 #if defined(CONFIG_SOFTMMU)
38     /*
39      * The requested alignment cannot overlap the TLB flags.
40      * FIXME: Must keep the count up-to-date with "exec/cpu-all.h".
41      */
42     tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
43 #endif
44 }
45
46 static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
47 {
48     unsigned a_bits = get_alignment_bits(op);
49
50     check_max_alignment(a_bits);
51
52     /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
53     if (a_bits == (op & MO_SIZE)) {
54         op = (op & ~MO_AMASK) | MO_ALIGN;
55     }
56
57     switch (op & MO_SIZE) {
58     case MO_8:
59         op &= ~MO_BSWAP;
60         break;
61     case MO_16:
62         break;
63     case MO_32:
64         if (!is64) {
65             op &= ~MO_SIGN;
66         }
67         break;
68     case MO_64:
69         if (is64) {
70             op &= ~MO_SIGN;
71             break;
72         }
73         /* fall through */
74     default:
75         g_assert_not_reached();
76     }
77     if (st) {
78         op &= ~MO_SIGN;
79     }
80     return op;
81 }
82
83 static void gen_ldst(TCGOpcode opc, TCGTemp *vl, TCGTemp *vh,
84                      TCGTemp *addr, MemOpIdx oi)
85 {
86     if (TCG_TARGET_REG_BITS == 64 || tcg_ctx->addr_type == TCG_TYPE_I32) {
87         if (vh) {
88             tcg_gen_op4(opc, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi);
89         } else {
90             tcg_gen_op3(opc, temp_arg(vl), temp_arg(addr), oi);
91         }
92     } else {
93         /* See TCGV_LOW/HIGH. */
94         TCGTemp *al = addr + HOST_BIG_ENDIAN;
95         TCGTemp *ah = addr + !HOST_BIG_ENDIAN;
96
97         if (vh) {
98             tcg_gen_op5(opc, temp_arg(vl), temp_arg(vh),
99                         temp_arg(al), temp_arg(ah), oi);
100         } else {
101             tcg_gen_op4(opc, temp_arg(vl), temp_arg(al), temp_arg(ah), oi);
102         }
103     }
104 }
105
106 static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
107 {
108     if (TCG_TARGET_REG_BITS == 32) {
109         TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v));
110         TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v));
111         gen_ldst(opc, vl, vh, addr, oi);
112     } else {
113         gen_ldst(opc, tcgv_i64_temp(v), NULL, addr, oi);
114     }
115 }
116
117 static void tcg_gen_req_mo(TCGBar type)
118 {
119     type &= tcg_ctx->guest_mo;
120     type &= ~TCG_TARGET_DEFAULT_MO;
121     if (type) {
122         tcg_gen_mb(type | TCG_BAR_SC);
123     }
124 }
125
126 /* Only required for loads, where value might overlap addr. */
127 static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr)
128 {
129 #ifdef CONFIG_PLUGIN
130     if (tcg_ctx->plugin_insn != NULL) {
131         /* Save a copy of the vaddr for use after a load.  */
132         TCGv_i64 temp = tcg_temp_ebb_new_i64();
133         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
134             tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr));
135         } else {
136             tcg_gen_mov_i64(temp, temp_tcgv_i64(addr));
137         }
138         return temp;
139     }
140 #endif
141     return NULL;
142 }
143
144 static void
145 plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi,
146                          enum qemu_plugin_mem_rw rw)
147 {
148 #ifdef CONFIG_PLUGIN
149     if (tcg_ctx->plugin_insn != NULL) {
150         qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
151
152         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
153             if (!copy_addr) {
154                 copy_addr = tcg_temp_ebb_new_i64();
155                 tcg_gen_extu_i32_i64(copy_addr, temp_tcgv_i32(orig_addr));
156             }
157             plugin_gen_empty_mem_callback(copy_addr, info);
158             tcg_temp_free_i64(copy_addr);
159         } else {
160             if (copy_addr) {
161                 plugin_gen_empty_mem_callback(copy_addr, info);
162                 tcg_temp_free_i64(copy_addr);
163             } else {
164                 plugin_gen_empty_mem_callback(temp_tcgv_i64(orig_addr), info);
165             }
166         }
167     }
168 #endif
169 }
170
171 static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
172                                     TCGArg idx, MemOp memop)
173 {
174     MemOp orig_memop;
175     MemOpIdx orig_oi, oi;
176     TCGv_i64 copy_addr;
177     TCGOpcode opc;
178
179     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
180     orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0);
181     orig_oi = oi = make_memop_idx(memop, idx);
182
183     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
184         memop &= ~MO_BSWAP;
185         /* The bswap primitive benefits from zero-extended input.  */
186         if ((memop & MO_SSIZE) == MO_SW) {
187             memop &= ~MO_SIGN;
188         }
189         oi = make_memop_idx(memop, idx);
190     }
191
192     copy_addr = plugin_maybe_preserve_addr(addr);
193     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
194         opc = INDEX_op_qemu_ld_a32_i32;
195     } else {
196         opc = INDEX_op_qemu_ld_a64_i32;
197     }
198     gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
199     plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
200
201     if ((orig_memop ^ memop) & MO_BSWAP) {
202         switch (orig_memop & MO_SIZE) {
203         case MO_16:
204             tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN
205                                            ? TCG_BSWAP_IZ | TCG_BSWAP_OS
206                                            : TCG_BSWAP_IZ | TCG_BSWAP_OZ));
207             break;
208         case MO_32:
209             tcg_gen_bswap32_i32(val, val);
210             break;
211         default:
212             g_assert_not_reached();
213         }
214     }
215 }
216
217 void tcg_gen_qemu_ld_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
218                              MemOp memop, TCGType addr_type)
219 {
220     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
221     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
222     tcg_gen_qemu_ld_i32_int(val, addr, idx, memop);
223 }
224
225 static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
226                                     TCGArg idx, MemOp memop)
227 {
228     TCGv_i32 swap = NULL;
229     MemOpIdx orig_oi, oi;
230     TCGOpcode opc;
231
232     tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
233     memop = tcg_canonicalize_memop(memop, 0, 1);
234     orig_oi = oi = make_memop_idx(memop, idx);
235
236     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
237         swap = tcg_temp_ebb_new_i32();
238         switch (memop & MO_SIZE) {
239         case MO_16:
240             tcg_gen_bswap16_i32(swap, val, 0);
241             break;
242         case MO_32:
243             tcg_gen_bswap32_i32(swap, val);
244             break;
245         default:
246             g_assert_not_reached();
247         }
248         val = swap;
249         memop &= ~MO_BSWAP;
250         oi = make_memop_idx(memop, idx);
251     }
252
253     if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
254         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
255             opc = INDEX_op_qemu_st8_a32_i32;
256         } else {
257             opc = INDEX_op_qemu_st8_a64_i32;
258         }
259     } else {
260         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
261             opc = INDEX_op_qemu_st_a32_i32;
262         } else {
263             opc = INDEX_op_qemu_st_a64_i32;
264         }
265     }
266     gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
267     plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
268
269     if (swap) {
270         tcg_temp_free_i32(swap);
271     }
272 }
273
274 void tcg_gen_qemu_st_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
275                              MemOp memop, TCGType addr_type)
276 {
277     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
278     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
279     tcg_gen_qemu_st_i32_int(val, addr, idx, memop);
280 }
281
282 static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
283                                     TCGArg idx, MemOp memop)
284 {
285     MemOp orig_memop;
286     MemOpIdx orig_oi, oi;
287     TCGv_i64 copy_addr;
288     TCGOpcode opc;
289
290     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
291         tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
292         if (memop & MO_SIGN) {
293             tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
294         } else {
295             tcg_gen_movi_i32(TCGV_HIGH(val), 0);
296         }
297         return;
298     }
299
300     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
301     orig_memop = memop = tcg_canonicalize_memop(memop, 1, 0);
302     orig_oi = oi = make_memop_idx(memop, idx);
303
304     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
305         memop &= ~MO_BSWAP;
306         /* The bswap primitive benefits from zero-extended input.  */
307         if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) {
308             memop &= ~MO_SIGN;
309         }
310         oi = make_memop_idx(memop, idx);
311     }
312
313     copy_addr = plugin_maybe_preserve_addr(addr);
314     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
315         opc = INDEX_op_qemu_ld_a32_i64;
316     } else {
317         opc = INDEX_op_qemu_ld_a64_i64;
318     }
319     gen_ldst_i64(opc, val, addr, oi);
320     plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
321
322     if ((orig_memop ^ memop) & MO_BSWAP) {
323         int flags = (orig_memop & MO_SIGN
324                      ? TCG_BSWAP_IZ | TCG_BSWAP_OS
325                      : TCG_BSWAP_IZ | TCG_BSWAP_OZ);
326         switch (orig_memop & MO_SIZE) {
327         case MO_16:
328             tcg_gen_bswap16_i64(val, val, flags);
329             break;
330         case MO_32:
331             tcg_gen_bswap32_i64(val, val, flags);
332             break;
333         case MO_64:
334             tcg_gen_bswap64_i64(val, val);
335             break;
336         default:
337             g_assert_not_reached();
338         }
339     }
340 }
341
342 void tcg_gen_qemu_ld_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
343                              MemOp memop, TCGType addr_type)
344 {
345     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
346     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
347     tcg_gen_qemu_ld_i64_int(val, addr, idx, memop);
348 }
349
350 static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
351                                     TCGArg idx, MemOp memop)
352 {
353     TCGv_i64 swap = NULL;
354     MemOpIdx orig_oi, oi;
355     TCGOpcode opc;
356
357     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
358         tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
359         return;
360     }
361
362     tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
363     memop = tcg_canonicalize_memop(memop, 1, 1);
364     orig_oi = oi = make_memop_idx(memop, idx);
365
366     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
367         swap = tcg_temp_ebb_new_i64();
368         switch (memop & MO_SIZE) {
369         case MO_16:
370             tcg_gen_bswap16_i64(swap, val, 0);
371             break;
372         case MO_32:
373             tcg_gen_bswap32_i64(swap, val, 0);
374             break;
375         case MO_64:
376             tcg_gen_bswap64_i64(swap, val);
377             break;
378         default:
379             g_assert_not_reached();
380         }
381         val = swap;
382         memop &= ~MO_BSWAP;
383         oi = make_memop_idx(memop, idx);
384     }
385
386     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
387         opc = INDEX_op_qemu_st_a32_i64;
388     } else {
389         opc = INDEX_op_qemu_st_a64_i64;
390     }
391     gen_ldst_i64(opc, val, addr, oi);
392     plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
393
394     if (swap) {
395         tcg_temp_free_i64(swap);
396     }
397 }
398
399 void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
400                              MemOp memop, TCGType addr_type)
401 {
402     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
403     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
404     tcg_gen_qemu_st_i64_int(val, addr, idx, memop);
405 }
406
407 /*
408  * Return true if @mop, without knowledge of the pointer alignment,
409  * does not require 16-byte atomicity, and it would be adventagous
410  * to avoid a call to a helper function.
411  */
412 static bool use_two_i64_for_i128(MemOp mop)
413 {
414 #ifdef CONFIG_SOFTMMU
415     /* Two softmmu tlb lookups is larger than one function call. */
416     return false;
417 #else
418     /*
419      * For user-only, two 64-bit operations may well be smaller than a call.
420      * Determine if that would be legal for the requested atomicity.
421      */
422     switch (mop & MO_ATOM_MASK) {
423     case MO_ATOM_NONE:
424     case MO_ATOM_IFALIGN_PAIR:
425         return true;
426     case MO_ATOM_IFALIGN:
427     case MO_ATOM_SUBALIGN:
428     case MO_ATOM_WITHIN16:
429     case MO_ATOM_WITHIN16_PAIR:
430         /* In a serialized context, no atomicity is required. */
431         return !(tcg_ctx->gen_tb->cflags & CF_PARALLEL);
432     default:
433         g_assert_not_reached();
434     }
435 #endif
436 }
437
438 static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig)
439 {
440     MemOp mop_1 = orig, mop_2;
441
442     /* Reduce the size to 64-bit. */
443     mop_1 = (mop_1 & ~MO_SIZE) | MO_64;
444
445     /* Retain the alignment constraints of the original. */
446     switch (orig & MO_AMASK) {
447     case MO_UNALN:
448     case MO_ALIGN_2:
449     case MO_ALIGN_4:
450         mop_2 = mop_1;
451         break;
452     case MO_ALIGN_8:
453         /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */
454         mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
455         mop_2 = mop_1;
456         break;
457     case MO_ALIGN:
458         /* Second has 8-byte alignment; first has 16-byte alignment. */
459         mop_2 = mop_1;
460         mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN_16;
461         break;
462     case MO_ALIGN_16:
463     case MO_ALIGN_32:
464     case MO_ALIGN_64:
465         /* Second has 8-byte alignment; first retains original. */
466         mop_2 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
467         break;
468     default:
469         g_assert_not_reached();
470     }
471
472     /* Use a memory ordering implemented by the host. */
473     if ((orig & MO_BSWAP) && !tcg_target_has_memory_bswap(mop_1)) {
474         mop_1 &= ~MO_BSWAP;
475         mop_2 &= ~MO_BSWAP;
476     }
477
478     ret[0] = mop_1;
479     ret[1] = mop_2;
480 }
481
482 static TCGv_i64 maybe_extend_addr64(TCGTemp *addr)
483 {
484     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
485         TCGv_i64 a64 = tcg_temp_ebb_new_i64();
486         tcg_gen_extu_i32_i64(a64, temp_tcgv_i32(addr));
487         return a64;
488     }
489     return temp_tcgv_i64(addr);
490 }
491
492 static void maybe_free_addr64(TCGv_i64 a64)
493 {
494     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
495         tcg_temp_free_i64(a64);
496     }
497 }
498
499 static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
500                                      TCGArg idx, MemOp memop)
501 {
502     const MemOpIdx orig_oi = make_memop_idx(memop, idx);
503     TCGv_i64 ext_addr = NULL;
504     TCGOpcode opc;
505
506     check_max_alignment(get_alignment_bits(memop));
507     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
508
509     /* TODO: For now, force 32-bit hosts to use the helper. */
510     if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
511         TCGv_i64 lo, hi;
512         bool need_bswap = false;
513         MemOpIdx oi = orig_oi;
514
515         if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
516             lo = TCGV128_HIGH(val);
517             hi = TCGV128_LOW(val);
518             oi = make_memop_idx(memop & ~MO_BSWAP, idx);
519             need_bswap = true;
520         } else {
521             lo = TCGV128_LOW(val);
522             hi = TCGV128_HIGH(val);
523         }
524
525         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
526             opc = INDEX_op_qemu_ld_a32_i128;
527         } else {
528             opc = INDEX_op_qemu_ld_a64_i128;
529         }
530         gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
531
532         if (need_bswap) {
533             tcg_gen_bswap64_i64(lo, lo);
534             tcg_gen_bswap64_i64(hi, hi);
535         }
536     } else if (use_two_i64_for_i128(memop)) {
537         MemOp mop[2];
538         TCGTemp *addr_p8;
539         TCGv_i64 x, y;
540         bool need_bswap;
541
542         canonicalize_memop_i128_as_i64(mop, memop);
543         need_bswap = (mop[0] ^ memop) & MO_BSWAP;
544
545         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
546             opc = INDEX_op_qemu_ld_a32_i64;
547         } else {
548             opc = INDEX_op_qemu_ld_a64_i64;
549         }
550
551         /*
552          * Since there are no global TCGv_i128, there is no visible state
553          * changed if the second load faults.  Load directly into the two
554          * subwords.
555          */
556         if ((memop & MO_BSWAP) == MO_LE) {
557             x = TCGV128_LOW(val);
558             y = TCGV128_HIGH(val);
559         } else {
560             x = TCGV128_HIGH(val);
561             y = TCGV128_LOW(val);
562         }
563
564         gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
565
566         if (need_bswap) {
567             tcg_gen_bswap64_i64(x, x);
568         }
569
570         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
571             TCGv_i32 t = tcg_temp_ebb_new_i32();
572             tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
573             addr_p8 = tcgv_i32_temp(t);
574         } else {
575             TCGv_i64 t = tcg_temp_ebb_new_i64();
576             tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
577             addr_p8 = tcgv_i64_temp(t);
578         }
579
580         gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
581         tcg_temp_free_internal(addr_p8);
582
583         if (need_bswap) {
584             tcg_gen_bswap64_i64(y, y);
585         }
586     } else {
587         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
588             ext_addr = tcg_temp_ebb_new_i64();
589             tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
590             addr = tcgv_i64_temp(ext_addr);
591         }
592         gen_helper_ld_i128(val, cpu_env, temp_tcgv_i64(addr),
593                            tcg_constant_i32(orig_oi));
594     }
595
596     plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
597 }
598
599 void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
600                               MemOp memop, TCGType addr_type)
601 {
602     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
603     tcg_debug_assert((memop & MO_SIZE) == MO_128);
604     tcg_debug_assert((memop & MO_SIGN) == 0);
605     tcg_gen_qemu_ld_i128_int(val, addr, idx, memop);
606 }
607
608 static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
609                                      TCGArg idx, MemOp memop)
610 {
611     const MemOpIdx orig_oi = make_memop_idx(memop, idx);
612     TCGv_i64 ext_addr = NULL;
613     TCGOpcode opc;
614
615     check_max_alignment(get_alignment_bits(memop));
616     tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
617
618     /* TODO: For now, force 32-bit hosts to use the helper. */
619
620     if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
621         TCGv_i64 lo, hi;
622         MemOpIdx oi = orig_oi;
623         bool need_bswap = false;
624
625         if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
626             lo = tcg_temp_ebb_new_i64();
627             hi = tcg_temp_ebb_new_i64();
628             tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val));
629             tcg_gen_bswap64_i64(hi, TCGV128_LOW(val));
630             oi = make_memop_idx(memop & ~MO_BSWAP, idx);
631             need_bswap = true;
632         } else {
633             lo = TCGV128_LOW(val);
634             hi = TCGV128_HIGH(val);
635         }
636
637         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
638             opc = INDEX_op_qemu_st_a32_i128;
639         } else {
640             opc = INDEX_op_qemu_st_a64_i128;
641         }
642         gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
643
644         if (need_bswap) {
645             tcg_temp_free_i64(lo);
646             tcg_temp_free_i64(hi);
647         }
648     } else if (use_two_i64_for_i128(memop)) {
649         MemOp mop[2];
650         TCGTemp *addr_p8;
651         TCGv_i64 x, y, b = NULL;
652
653         canonicalize_memop_i128_as_i64(mop, memop);
654
655         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
656             opc = INDEX_op_qemu_st_a32_i64;
657         } else {
658             opc = INDEX_op_qemu_st_a64_i64;
659         }
660
661         if ((memop & MO_BSWAP) == MO_LE) {
662             x = TCGV128_LOW(val);
663             y = TCGV128_HIGH(val);
664         } else {
665             x = TCGV128_HIGH(val);
666             y = TCGV128_LOW(val);
667         }
668
669         if ((mop[0] ^ memop) & MO_BSWAP) {
670             b = tcg_temp_ebb_new_i64();
671             tcg_gen_bswap64_i64(b, x);
672             x = b;
673         }
674
675         gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
676
677         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
678             TCGv_i32 t = tcg_temp_ebb_new_i32();
679             tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
680             addr_p8 = tcgv_i32_temp(t);
681         } else {
682             TCGv_i64 t = tcg_temp_ebb_new_i64();
683             tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
684             addr_p8 = tcgv_i64_temp(t);
685         }
686
687         if (b) {
688             tcg_gen_bswap64_i64(b, y);
689             gen_ldst_i64(opc, b, addr_p8, make_memop_idx(mop[1], idx));
690             tcg_temp_free_i64(b);
691         } else {
692             gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
693         }
694         tcg_temp_free_internal(addr_p8);
695     } else {
696         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
697             ext_addr = tcg_temp_ebb_new_i64();
698             tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
699             addr = tcgv_i64_temp(ext_addr);
700         }
701         gen_helper_st_i128(cpu_env, temp_tcgv_i64(addr), val,
702                            tcg_constant_i32(orig_oi));
703     }
704
705     plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_W);
706 }
707
708 void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
709                               MemOp memop, TCGType addr_type)
710 {
711     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
712     tcg_debug_assert((memop & MO_SIZE) == MO_128);
713     tcg_debug_assert((memop & MO_SIGN) == 0);
714     tcg_gen_qemu_st_i128_int(val, addr, idx, memop);
715 }
716
717 static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc)
718 {
719     switch (opc & MO_SSIZE) {
720     case MO_SB:
721         tcg_gen_ext8s_i32(ret, val);
722         break;
723     case MO_UB:
724         tcg_gen_ext8u_i32(ret, val);
725         break;
726     case MO_SW:
727         tcg_gen_ext16s_i32(ret, val);
728         break;
729     case MO_UW:
730         tcg_gen_ext16u_i32(ret, val);
731         break;
732     default:
733         tcg_gen_mov_i32(ret, val);
734         break;
735     }
736 }
737
738 static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
739 {
740     switch (opc & MO_SSIZE) {
741     case MO_SB:
742         tcg_gen_ext8s_i64(ret, val);
743         break;
744     case MO_UB:
745         tcg_gen_ext8u_i64(ret, val);
746         break;
747     case MO_SW:
748         tcg_gen_ext16s_i64(ret, val);
749         break;
750     case MO_UW:
751         tcg_gen_ext16u_i64(ret, val);
752         break;
753     case MO_SL:
754         tcg_gen_ext32s_i64(ret, val);
755         break;
756     case MO_UL:
757         tcg_gen_ext32u_i64(ret, val);
758         break;
759     default:
760         tcg_gen_mov_i64(ret, val);
761         break;
762     }
763 }
764
765 typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv_i64,
766                                   TCGv_i32, TCGv_i32, TCGv_i32);
767 typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv_i64,
768                                   TCGv_i64, TCGv_i64, TCGv_i32);
769 typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv_i64,
770                                    TCGv_i128, TCGv_i128, TCGv_i32);
771 typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv_i64,
772                                   TCGv_i32, TCGv_i32);
773 typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64,
774                                   TCGv_i64, TCGv_i32);
775
776 #ifdef CONFIG_ATOMIC64
777 # define WITH_ATOMIC64(X) X,
778 #else
779 # define WITH_ATOMIC64(X)
780 #endif
781 #if HAVE_CMPXCHG128
782 # define WITH_ATOMIC128(X) X,
783 #else
784 # define WITH_ATOMIC128(X)
785 #endif
786
787 static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = {
788     [MO_8] = gen_helper_atomic_cmpxchgb,
789     [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
790     [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
791     [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
792     [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
793     WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le)
794     WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be)
795     WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_cmpxchgo_le)
796     WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be)
797 };
798
799 static void tcg_gen_nonatomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
800                                               TCGv_i32 cmpv, TCGv_i32 newv,
801                                               TCGArg idx, MemOp memop)
802 {
803     TCGv_i32 t1 = tcg_temp_ebb_new_i32();
804     TCGv_i32 t2 = tcg_temp_ebb_new_i32();
805
806     tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
807
808     tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop & ~MO_SIGN);
809     tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
810     tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
811     tcg_temp_free_i32(t2);
812
813     if (memop & MO_SIGN) {
814         tcg_gen_ext_i32(retv, t1, memop);
815     } else {
816         tcg_gen_mov_i32(retv, t1);
817     }
818     tcg_temp_free_i32(t1);
819 }
820
821 void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
822                                        TCGv_i32 cmpv, TCGv_i32 newv,
823                                        TCGArg idx, MemOp memop,
824                                        TCGType addr_type)
825 {
826     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
827     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
828     tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
829 }
830
831 static void tcg_gen_atomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
832                                            TCGv_i32 cmpv, TCGv_i32 newv,
833                                            TCGArg idx, MemOp memop)
834 {
835     gen_atomic_cx_i32 gen;
836     TCGv_i64 a64;
837     MemOpIdx oi;
838
839     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
840         tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
841         return;
842     }
843
844     memop = tcg_canonicalize_memop(memop, 0, 0);
845     gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
846     tcg_debug_assert(gen != NULL);
847
848     oi = make_memop_idx(memop & ~MO_SIGN, idx);
849     a64 = maybe_extend_addr64(addr);
850     gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
851     maybe_free_addr64(a64);
852
853     if (memop & MO_SIGN) {
854         tcg_gen_ext_i32(retv, retv, memop);
855     }
856 }
857
858 void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
859                                     TCGv_i32 cmpv, TCGv_i32 newv,
860                                     TCGArg idx, MemOp memop,
861                                     TCGType addr_type)
862 {
863     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
864     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
865     tcg_gen_atomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
866 }
867
868 static void tcg_gen_nonatomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
869                                               TCGv_i64 cmpv, TCGv_i64 newv,
870                                               TCGArg idx, MemOp memop)
871 {
872     TCGv_i64 t1, t2;
873
874     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
875         tcg_gen_nonatomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
876                                           TCGV_LOW(newv), idx, memop);
877         if (memop & MO_SIGN) {
878             tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
879         } else {
880             tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
881         }
882         return;
883     }
884
885     t1 = tcg_temp_ebb_new_i64();
886     t2 = tcg_temp_ebb_new_i64();
887
888     tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
889
890     tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop & ~MO_SIGN);
891     tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
892     tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
893     tcg_temp_free_i64(t2);
894
895     if (memop & MO_SIGN) {
896         tcg_gen_ext_i64(retv, t1, memop);
897     } else {
898         tcg_gen_mov_i64(retv, t1);
899     }
900     tcg_temp_free_i64(t1);
901 }
902
903 void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
904                                        TCGv_i64 cmpv, TCGv_i64 newv,
905                                        TCGArg idx, MemOp memop,
906                                        TCGType addr_type)
907 {
908     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
909     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
910     tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
911 }
912
913 static void tcg_gen_atomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
914                                            TCGv_i64 cmpv, TCGv_i64 newv,
915                                            TCGArg idx, MemOp memop)
916 {
917     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
918         tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
919         return;
920     }
921
922     if ((memop & MO_SIZE) == MO_64) {
923         gen_atomic_cx_i64 gen;
924
925         memop = tcg_canonicalize_memop(memop, 1, 0);
926         gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
927         if (gen) {
928             MemOpIdx oi = make_memop_idx(memop, idx);
929             TCGv_i64 a64 = maybe_extend_addr64(addr);
930             gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
931             maybe_free_addr64(a64);
932             return;
933         }
934
935         gen_helper_exit_atomic(cpu_env);
936
937         /*
938          * Produce a result for a well-formed opcode stream.  This satisfies
939          * liveness for set before used, which happens before this dead code
940          * is removed.
941          */
942         tcg_gen_movi_i64(retv, 0);
943         return;
944     }
945
946     if (TCG_TARGET_REG_BITS == 32) {
947         tcg_gen_atomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
948                                        TCGV_LOW(newv), idx, memop);
949         if (memop & MO_SIGN) {
950             tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
951         } else {
952             tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
953         }
954     } else {
955         TCGv_i32 c32 = tcg_temp_ebb_new_i32();
956         TCGv_i32 n32 = tcg_temp_ebb_new_i32();
957         TCGv_i32 r32 = tcg_temp_ebb_new_i32();
958
959         tcg_gen_extrl_i64_i32(c32, cmpv);
960         tcg_gen_extrl_i64_i32(n32, newv);
961         tcg_gen_atomic_cmpxchg_i32_int(r32, addr, c32, n32,
962                                        idx, memop & ~MO_SIGN);
963         tcg_temp_free_i32(c32);
964         tcg_temp_free_i32(n32);
965
966         tcg_gen_extu_i32_i64(retv, r32);
967         tcg_temp_free_i32(r32);
968
969         if (memop & MO_SIGN) {
970             tcg_gen_ext_i64(retv, retv, memop);
971         }
972     }
973 }
974
975 void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
976                                     TCGv_i64 cmpv, TCGv_i64 newv,
977                                     TCGArg idx, MemOp memop, TCGType addr_type)
978 {
979     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
980     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
981     tcg_gen_atomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
982 }
983
984 static void tcg_gen_nonatomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
985                                                TCGv_i128 cmpv, TCGv_i128 newv,
986                                                TCGArg idx, MemOp memop)
987 {
988     if (TCG_TARGET_REG_BITS == 32) {
989         /* Inline expansion below is simply too large for 32-bit hosts. */
990         MemOpIdx oi = make_memop_idx(memop, idx);
991         TCGv_i64 a64 = maybe_extend_addr64(addr);
992
993         gen_helper_nonatomic_cmpxchgo(retv, cpu_env, a64, cmpv, newv,
994                                       tcg_constant_i32(oi));
995         maybe_free_addr64(a64);
996     } else {
997         TCGv_i128 oldv = tcg_temp_ebb_new_i128();
998         TCGv_i128 tmpv = tcg_temp_ebb_new_i128();
999         TCGv_i64 t0 = tcg_temp_ebb_new_i64();
1000         TCGv_i64 t1 = tcg_temp_ebb_new_i64();
1001         TCGv_i64 z = tcg_constant_i64(0);
1002
1003         tcg_gen_qemu_ld_i128_int(oldv, addr, idx, memop);
1004
1005         /* Compare i128 */
1006         tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv));
1007         tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv));
1008         tcg_gen_or_i64(t0, t0, t1);
1009
1010         /* tmpv = equal ? newv : oldv */
1011         tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z,
1012                             TCGV128_LOW(newv), TCGV128_LOW(oldv));
1013         tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z,
1014                             TCGV128_HIGH(newv), TCGV128_HIGH(oldv));
1015
1016         /* Unconditional writeback. */
1017         tcg_gen_qemu_st_i128_int(tmpv, addr, idx, memop);
1018         tcg_gen_mov_i128(retv, oldv);
1019
1020         tcg_temp_free_i64(t0);
1021         tcg_temp_free_i64(t1);
1022         tcg_temp_free_i128(tmpv);
1023         tcg_temp_free_i128(oldv);
1024     }
1025 }
1026
1027 void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1028                                         TCGv_i128 cmpv, TCGv_i128 newv,
1029                                         TCGArg idx, MemOp memop,
1030                                         TCGType addr_type)
1031 {
1032     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1033     tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1034     tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1035 }
1036
1037 static void tcg_gen_atomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
1038                                             TCGv_i128 cmpv, TCGv_i128 newv,
1039                                             TCGArg idx, MemOp memop)
1040 {
1041     gen_atomic_cx_i128 gen;
1042
1043     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
1044         tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1045         return;
1046     }
1047
1048     gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
1049     if (gen) {
1050         MemOpIdx oi = make_memop_idx(memop, idx);
1051         TCGv_i64 a64 = maybe_extend_addr64(addr);
1052         gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
1053         maybe_free_addr64(a64);
1054         return;
1055     }
1056
1057     gen_helper_exit_atomic(cpu_env);
1058
1059     /*
1060      * Produce a result for a well-formed opcode stream.  This satisfies
1061      * liveness for set before used, which happens before this dead code
1062      * is removed.
1063      */
1064     tcg_gen_movi_i64(TCGV128_LOW(retv), 0);
1065     tcg_gen_movi_i64(TCGV128_HIGH(retv), 0);
1066 }
1067
1068 void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1069                                      TCGv_i128 cmpv, TCGv_i128 newv,
1070                                      TCGArg idx, MemOp memop,
1071                                      TCGType addr_type)
1072 {
1073     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1074     tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1075     tcg_gen_atomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1076 }
1077
1078 static void do_nonatomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1079                                 TCGArg idx, MemOp memop, bool new_val,
1080                                 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1081 {
1082     TCGv_i32 t1 = tcg_temp_ebb_new_i32();
1083     TCGv_i32 t2 = tcg_temp_ebb_new_i32();
1084
1085     memop = tcg_canonicalize_memop(memop, 0, 0);
1086
1087     tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop);
1088     tcg_gen_ext_i32(t2, val, memop);
1089     gen(t2, t1, t2);
1090     tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
1091
1092     tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
1093     tcg_temp_free_i32(t1);
1094     tcg_temp_free_i32(t2);
1095 }
1096
1097 static void do_atomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1098                              TCGArg idx, MemOp memop, void * const table[])
1099 {
1100     gen_atomic_op_i32 gen;
1101     TCGv_i64 a64;
1102     MemOpIdx oi;
1103
1104     memop = tcg_canonicalize_memop(memop, 0, 0);
1105
1106     gen = table[memop & (MO_SIZE | MO_BSWAP)];
1107     tcg_debug_assert(gen != NULL);
1108
1109     oi = make_memop_idx(memop & ~MO_SIGN, idx);
1110     a64 = maybe_extend_addr64(addr);
1111     gen(ret, cpu_env, a64, val, tcg_constant_i32(oi));
1112     maybe_free_addr64(a64);
1113
1114     if (memop & MO_SIGN) {
1115         tcg_gen_ext_i32(ret, ret, memop);
1116     }
1117 }
1118
1119 static void do_nonatomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1120                                 TCGArg idx, MemOp memop, bool new_val,
1121                                 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1122 {
1123     TCGv_i64 t1 = tcg_temp_ebb_new_i64();
1124     TCGv_i64 t2 = tcg_temp_ebb_new_i64();
1125
1126     memop = tcg_canonicalize_memop(memop, 1, 0);
1127
1128     tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop);
1129     tcg_gen_ext_i64(t2, val, memop);
1130     gen(t2, t1, t2);
1131     tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
1132
1133     tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);
1134     tcg_temp_free_i64(t1);
1135     tcg_temp_free_i64(t2);
1136 }
1137
1138 static void do_atomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1139                              TCGArg idx, MemOp memop, void * const table[])
1140 {
1141     memop = tcg_canonicalize_memop(memop, 1, 0);
1142
1143     if ((memop & MO_SIZE) == MO_64) {
1144         gen_atomic_op_i64 gen = table[memop & (MO_SIZE | MO_BSWAP)];
1145
1146         if (gen) {
1147             MemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
1148             TCGv_i64 a64 = maybe_extend_addr64(addr);
1149             gen(ret, cpu_env, a64, val, tcg_constant_i32(oi));
1150             maybe_free_addr64(a64);
1151             return;
1152         }
1153
1154         gen_helper_exit_atomic(cpu_env);
1155         /* Produce a result, so that we have a well-formed opcode stream
1156            with respect to uses of the result in the (dead) code following.  */
1157         tcg_gen_movi_i64(ret, 0);
1158     } else {
1159         TCGv_i32 v32 = tcg_temp_ebb_new_i32();
1160         TCGv_i32 r32 = tcg_temp_ebb_new_i32();
1161
1162         tcg_gen_extrl_i64_i32(v32, val);
1163         do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table);
1164         tcg_temp_free_i32(v32);
1165
1166         tcg_gen_extu_i32_i64(ret, r32);
1167         tcg_temp_free_i32(r32);
1168
1169         if (memop & MO_SIGN) {
1170             tcg_gen_ext_i64(ret, ret, memop);
1171         }
1172     }
1173 }
1174
1175 #define GEN_ATOMIC_HELPER(NAME, OP, NEW)                                \
1176 static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = {          \
1177     [MO_8] = gen_helper_atomic_##NAME##b,                               \
1178     [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le,                   \
1179     [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be,                   \
1180     [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le,                   \
1181     [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be,                   \
1182     WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le)     \
1183     WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be)     \
1184 };                                                                      \
1185 void tcg_gen_atomic_##NAME##_i32_chk(TCGv_i32 ret, TCGTemp *addr,       \
1186                                      TCGv_i32 val, TCGArg idx,          \
1187                                      MemOp memop, TCGType addr_type)    \
1188 {                                                                       \
1189     tcg_debug_assert(addr_type == tcg_ctx->addr_type);                  \
1190     tcg_debug_assert((memop & MO_SIZE) <= MO_32);                       \
1191     if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {                        \
1192         do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME);     \
1193     } else {                                                            \
1194         do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW,            \
1195                             tcg_gen_##OP##_i32);                        \
1196     }                                                                   \
1197 }                                                                       \
1198 void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr,       \
1199                                      TCGv_i64 val, TCGArg idx,          \
1200                                      MemOp memop, TCGType addr_type)    \
1201 {                                                                       \
1202     tcg_debug_assert(addr_type == tcg_ctx->addr_type);                  \
1203     tcg_debug_assert((memop & MO_SIZE) <= MO_64);                       \
1204     if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {                        \
1205         do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME);     \
1206     } else {                                                            \
1207         do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW,            \
1208                             tcg_gen_##OP##_i64);                        \
1209     }                                                                   \
1210 }
1211
1212 GEN_ATOMIC_HELPER(fetch_add, add, 0)
1213 GEN_ATOMIC_HELPER(fetch_and, and, 0)
1214 GEN_ATOMIC_HELPER(fetch_or, or, 0)
1215 GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
1216 GEN_ATOMIC_HELPER(fetch_smin, smin, 0)
1217 GEN_ATOMIC_HELPER(fetch_umin, umin, 0)
1218 GEN_ATOMIC_HELPER(fetch_smax, smax, 0)
1219 GEN_ATOMIC_HELPER(fetch_umax, umax, 0)
1220
1221 GEN_ATOMIC_HELPER(add_fetch, add, 1)
1222 GEN_ATOMIC_HELPER(and_fetch, and, 1)
1223 GEN_ATOMIC_HELPER(or_fetch, or, 1)
1224 GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
1225 GEN_ATOMIC_HELPER(smin_fetch, smin, 1)
1226 GEN_ATOMIC_HELPER(umin_fetch, umin, 1)
1227 GEN_ATOMIC_HELPER(smax_fetch, smax, 1)
1228 GEN_ATOMIC_HELPER(umax_fetch, umax, 1)
1229
1230 static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
1231 {
1232     tcg_gen_mov_i32(r, b);
1233 }
1234
1235 static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b)
1236 {
1237     tcg_gen_mov_i64(r, b);
1238 }
1239
1240 GEN_ATOMIC_HELPER(xchg, mov2, 0)
1241
1242 #undef GEN_ATOMIC_HELPER