OSDN Git Service

6a62733deb71b498ec241a314711901f0fff39b8
[uclinux-h8/linux.git] / arch / h8300 / include / asm / atomic.h
1 #ifndef __ARCH_H8300_ATOMIC__
2 #define __ARCH_H8300_ATOMIC__
3
4 #include <linux/types.h>
5 #include <asm/cmpxchg.h>
6
7 /*
8  * Atomic operations that C can't guarantee us.  Useful for
9  * resource counting etc..
10  */
11
12 #define ATOMIC_INIT(i)  { (i) }
13
14 #define atomic_read(v)          (*(volatile int *)&(v)->counter)
15 #define atomic_set(v, i)        (((v)->counter) = i)
16
17 #include <linux/kernel.h>
18
19 static inline int atomic_add_return(int i, atomic_t *v)
20 {
21         unsigned short ccr;
22         int ret;
23
24         __asm__ __volatile__ (
25                 "stc ccr,%w2\n\t"
26                 "orc #0x80,ccr\n\t"
27                 "mov.l %1,%0\n\t"
28                 "add.l %3,%0\n\t"
29                 "mov.l %0,%1\n\t"
30                 "ldc %w2,ccr"
31                 : "=r"(ret), "+m"(v->counter), "=r"(ccr)
32                 : "ri"(i));
33         return ret;
34 }
35
36 #define atomic_add(i, v) atomic_add_return(i, v)
37 #define atomic_add_negative(a, v)       (atomic_add_return((a), (v)) < 0)
38
39 static inline int atomic_sub_return(int i, atomic_t *v)
40 {
41         unsigned short ccr;
42         int ret;
43
44         __asm__ __volatile__ (
45                 "stc ccr,%w2\n\t"
46                 "orc #0x80,ccr\n\t"
47                 "mov.l %1,%0\n\t"
48                 "sub.l %3,%0\n\t"
49                 "mov.l %0,%1\n\t"
50                 "ldc %w2,ccr"
51                 : "=r"(ret), "+m"(v->counter), "=r"(ccr)
52                 : "ri"(i));
53         return ret;
54 }
55
56 #define atomic_sub(i, v) atomic_sub_return(i, v)
57 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
58
59 static inline int atomic_inc_return(atomic_t *v)
60 {
61         unsigned short ccr;
62         int ret;
63
64         __asm__ __volatile__ (
65                 "stc ccr,%w2\n\t"
66                 "orc #0x80,ccr\n\t"
67                 "mov.l %1,%0\n\t"
68                 "inc.l #1,%0\n\t"
69                 "mov.l %0,%1\n\t"
70                 "ldc %w2,ccr"
71                 : "=r"(ret), "+m"(v->counter), "=r"(ccr));
72         return ret;
73 }
74
75 #define atomic_inc(v) atomic_inc_return(v)
76
77 /*
78  * atomic_inc_and_test - increment and test
79  * @v: pointer of type atomic_t
80  *
81  * Atomically increments @v by 1
82  * and returns true if the result is zero, or false for all
83  * other cases.
84  */
85 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
86
87 static inline int atomic_dec_return(atomic_t *v)
88 {
89         unsigned short ccr;
90         int ret;
91
92         __asm__ __volatile__ (
93                 "stc ccr,%w2\n\t"
94                 "orc #0x80,ccr\n\t"
95                 "mov.l %1,%0\n\t"
96                 "dec.l #1,%0\n\t"
97                 "mov.l %0,%1\n\t"
98                 "ldc %w2,ccr"
99                 : "=r"(ret), "+m"(v->counter), "=r"(ccr));
100         return ret;
101 }
102
103 #define atomic_dec(v) atomic_dec_return(v)
104
105 static inline int atomic_dec_and_test(atomic_t *v)
106 {
107         unsigned short ccr;
108         int ret;
109
110         __asm__ __volatile__ (
111                 "stc ccr,%w2\n\t"
112                 "orc #0x80,ccr\n\t"
113                 "mov.l %1,%0\n\t"
114                 "dec.l #1,%0\n\t"
115                 "mov.l %0,%1\n\t"
116                 "ldc %w2,ccr"
117                 : "=r"(ret), "+m"(v->counter), "=r"(ccr));
118         return ret == 0;
119 }
120
121 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
122 {
123         int ret;
124         unsigned short ccr;
125
126         __asm__ __volatile__ (
127                 "stc ccr,%w2\n\t"
128                 "orc #0x80,ccr\n\t"
129                 "mov.l %1,%0\n\t"
130                 "cmp.l %3,%0\n\t"
131                 "bne 1f\n\t"
132                 "mov.l %4,%1\n"
133                 "1:\tldc %w2,ccr"
134                 : "=r"(ret), "+m"(v->counter), "=r"(ccr)
135                 : "g"(old), "r"(new));
136         return ret;
137 }
138
139 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
140 {
141         int ret;
142         unsigned char ccr;
143
144         __asm__ __volatile__ (
145                 "stc ccr,%w2\n\t"
146                 "orc #0x80,ccr\n\t"
147                 "mov.l %1,%0\n\t"
148                 "cmp.l %4,%0\n\t"
149                 "beq 1f\n\t"
150                 "add.l %0,%3\n\t"
151                 "mov.l %3,%1\n"
152                 "1:\tldc %w2,ccr"
153                 : "=r"(ret), "+m"(v->counter), "=r"(ccr), "+r"(a)
154                 : "ri"(u));
155         return ret;
156 }
157
158 static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
159 {
160         unsigned char ccr;
161         unsigned long tmp;
162
163         __asm__ __volatile__(
164                 "stc ccr,%w3\n\t"
165                 "orc #0x80,ccr\n\t"
166                 "mov.l %0,%1\n\t"
167                 "and.l %2,%1\n\t"
168                 "mov.l %1,%0\n\t"
169                 "ldc %w3,ccr"
170                 : "=m"(*v), "=r"(tmp)
171                 : "g"(~(mask)), "r"(ccr));
172 }
173
174 static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
175 {
176         unsigned char ccr;
177         unsigned long tmp;
178
179         __asm__ __volatile__(
180                 "stc ccr,%w3\n\t"
181                 "orc #0x80,ccr\n\t"
182                 "mov.l %0,%1\n\t"
183                 "or.l %2,%1\n\t"
184                 "mov.l %1,%0\n\t"
185                 "ldc %w3,ccr"
186                 : "=m"(*v), "=r"(tmp)
187                 : "g"(~(mask)), "r"(ccr));
188 }
189
190 /* Atomic operations are already serializing */
191 #define smp_mb__before_atomic_dec()    barrier()
192 #define smp_mb__after_atomic_dec() barrier()
193 #define smp_mb__before_atomic_inc()    barrier()
194 #define smp_mb__after_atomic_inc() barrier()
195
196 #endif /* __ARCH_H8300_ATOMIC __ */