OSDN Git Service

x86: Implement array_index_mask_nospec
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / arch / x86 / include / asm / barrier.h
1 #ifndef _ASM_X86_BARRIER_H
2 #define _ASM_X86_BARRIER_H
3
4 #include <asm/alternative.h>
5 #include <asm/nops.h>
6
7 /*
8  * Force strict CPU ordering.
9  * And yes, this is required on UP too when we're talking
10  * to devices.
11  */
12
13 #ifdef CONFIG_X86_32
14 /*
15  * Some non-Intel clones support out of order store. wmb() ceases to be a
16  * nop for these.
17  */
18 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
19 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
20 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
21 #else
22 #define mb()    asm volatile("mfence":::"memory")
23 #define rmb()   asm volatile("lfence":::"memory")
24 #define wmb()   asm volatile("sfence" ::: "memory")
25 #endif
26
27 /**
28  * array_index_mask_nospec() - generate a mask that is ~0UL when the
29  *      bounds check succeeds and 0 otherwise
30  * @index: array element index
31  * @size: number of elements in array
32  *
33  * Returns:
34  *     0 - (index < size)
35  */
36 static inline unsigned long array_index_mask_nospec(unsigned long index,
37                 unsigned long size)
38 {
39         unsigned long mask;
40
41         asm ("cmp %1,%2; sbb %0,%0;"
42                         :"=r" (mask)
43                         :"r"(size),"r" (index)
44                         :"cc");
45         return mask;
46 }
47
48 /* Override the default implementation from linux/nospec.h. */
49 #define array_index_mask_nospec array_index_mask_nospec
50
51 #ifdef CONFIG_X86_PPRO_FENCE
52 #define dma_rmb()       rmb()
53 #else
54 #define dma_rmb()       barrier()
55 #endif
56 #define dma_wmb()       barrier()
57
58 #ifdef CONFIG_SMP
59 #define smp_mb()        mb()
60 #define smp_rmb()       dma_rmb()
61 #define smp_wmb()       barrier()
62 #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
63 #else /* !SMP */
64 #define smp_mb()        barrier()
65 #define smp_rmb()       barrier()
66 #define smp_wmb()       barrier()
67 #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
68 #endif /* SMP */
69
70 #define read_barrier_depends()          do { } while (0)
71 #define smp_read_barrier_depends()      do { } while (0)
72
73 #if defined(CONFIG_X86_PPRO_FENCE)
74
75 /*
76  * For this option x86 doesn't have a strong TSO memory
77  * model and we should fall back to full barriers.
78  */
79
80 #define smp_store_release(p, v)                                         \
81 do {                                                                    \
82         compiletime_assert_atomic_type(*p);                             \
83         smp_mb();                                                       \
84         WRITE_ONCE(*p, v);                                              \
85 } while (0)
86
87 #define smp_load_acquire(p)                                             \
88 ({                                                                      \
89         typeof(*p) ___p1 = READ_ONCE(*p);                               \
90         compiletime_assert_atomic_type(*p);                             \
91         smp_mb();                                                       \
92         ___p1;                                                          \
93 })
94
95 #else /* regular x86 TSO memory ordering */
96
97 #define smp_store_release(p, v)                                         \
98 do {                                                                    \
99         compiletime_assert_atomic_type(*p);                             \
100         barrier();                                                      \
101         WRITE_ONCE(*p, v);                                              \
102 } while (0)
103
104 #define smp_load_acquire(p)                                             \
105 ({                                                                      \
106         typeof(*p) ___p1 = READ_ONCE(*p);                               \
107         compiletime_assert_atomic_type(*p);                             \
108         barrier();                                                      \
109         ___p1;                                                          \
110 })
111
112 #endif
113
114 /* Atomic operations are already serializing on x86 */
115 #define smp_mb__before_atomic() barrier()
116 #define smp_mb__after_atomic()  barrier()
117
118 #endif /* _ASM_X86_BARRIER_H */