1 #ifndef _ASM_X86_BARRIER_H
2 #define _ASM_X86_BARRIER_H
4 #include <asm/alternative.h>
8 * Force strict CPU ordering.
9 * And yes, this is required on UP too when we're talking
15 * Some non-Intel clones support out of order store. wmb() ceases to be a
18 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
19 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
20 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
22 #define mb() asm volatile("mfence":::"memory")
23 #define rmb() asm volatile("lfence":::"memory")
24 #define wmb() asm volatile("sfence" ::: "memory")
28 * array_index_mask_nospec() - generate a mask that is ~0UL when the
29 * bounds check succeeds and 0 otherwise
30 * @index: array element index
31 * @size: number of elements in array
36 static inline unsigned long array_index_mask_nospec(unsigned long index,
41 asm ("cmp %1,%2; sbb %0,%0;"
43 :"r"(size),"r" (index)
48 /* Override the default implementation from linux/nospec.h. */
49 #define array_index_mask_nospec array_index_mask_nospec
51 #ifdef CONFIG_X86_PPRO_FENCE
52 #define dma_rmb() rmb()
54 #define dma_rmb() barrier()
56 #define dma_wmb() barrier()
60 #define smp_rmb() dma_rmb()
61 #define smp_wmb() barrier()
62 #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
64 #define smp_mb() barrier()
65 #define smp_rmb() barrier()
66 #define smp_wmb() barrier()
67 #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
70 #define read_barrier_depends() do { } while (0)
71 #define smp_read_barrier_depends() do { } while (0)
73 #if defined(CONFIG_X86_PPRO_FENCE)
76 * For this option x86 doesn't have a strong TSO memory
77 * model and we should fall back to full barriers.
80 #define smp_store_release(p, v) \
82 compiletime_assert_atomic_type(*p); \
87 #define smp_load_acquire(p) \
89 typeof(*p) ___p1 = READ_ONCE(*p); \
90 compiletime_assert_atomic_type(*p); \
95 #else /* regular x86 TSO memory ordering */
97 #define smp_store_release(p, v) \
99 compiletime_assert_atomic_type(*p); \
104 #define smp_load_acquire(p) \
106 typeof(*p) ___p1 = READ_ONCE(*p); \
107 compiletime_assert_atomic_type(*p); \
114 /* Atomic operations are already serializing on x86 */
115 #define smp_mb__before_atomic() barrier()
116 #define smp_mb__after_atomic() barrier()
118 #endif /* _ASM_X86_BARRIER_H */