2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/barrier.h>
19 #include <asm/byteorder.h> /* sigh ... */
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
22 #include <asm/isa-rev.h>
24 #include <asm/sgidefs.h>
28 * These are the "slower" versions of the functions and are in bitops.c.
29 * These functions call raw_local_irq_{save,restore}().
31 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
32 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
33 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
34 int __mips_test_and_set_bit(unsigned long nr,
35 volatile unsigned long *addr);
36 int __mips_test_and_set_bit_lock(unsigned long nr,
37 volatile unsigned long *addr);
38 int __mips_test_and_clear_bit(unsigned long nr,
39 volatile unsigned long *addr);
40 int __mips_test_and_change_bit(unsigned long nr,
41 volatile unsigned long *addr);
45 * set_bit - Atomically set a bit in memory
47 * @addr: the address to start counting from
49 * This function is atomic and may not be reordered. See __set_bit()
50 * if you do not require the atomic guarantees.
51 * Note that @nr may be almost arbitrarily large; this function is not
52 * restricted to acting on a single-word quantity.
54 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
56 unsigned long *m = ((unsigned long *)addr) + (nr >> SZLONG_LOG);
57 int bit = nr & SZLONG_MASK;
60 if (!kernel_uses_llsc) {
61 __mips_set_bit(nr, addr);
65 if (R10000_LLSC_WAR) {
69 "1: " __LL "%0, %1 # set_bit \n"
74 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
75 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)
80 if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit) && (bit >= 16)) {
84 " " __LL "%0, %1 # set_bit \n"
85 " " __INS "%0, %3, %2, 1 \n"
87 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
88 : "ir" (bit), "r" (~0)
90 } while (unlikely(!temp));
98 " .set "MIPS_ISA_ARCH_LEVEL" \n"
99 " " __LL "%0, %1 # set_bit \n"
103 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
106 } while (unlikely(!temp));
110 * clear_bit - Clears a bit in memory
112 * @addr: Address to start counting from
114 * clear_bit() is atomic and may not be reordered. However, it does
115 * not contain a memory barrier, so if it is used for locking purposes,
116 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
117 * in order to ensure changes are visible on other processors.
119 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
121 unsigned long *m = ((unsigned long *)addr) + (nr >> SZLONG_LOG);
122 int bit = nr & SZLONG_MASK;
125 if (!kernel_uses_llsc) {
126 __mips_clear_bit(nr, addr);
130 if (R10000_LLSC_WAR) {
131 __asm__ __volatile__(
133 " .set arch=r4000 \n"
134 "1: " __LL "%0, %1 # clear_bit \n"
139 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
140 : "ir" (~(1UL << bit))
145 if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit)) {
148 __asm__ __volatile__(
149 " " __LL "%0, %1 # clear_bit \n"
150 " " __INS "%0, $0, %2, 1 \n"
152 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
155 } while (unlikely(!temp));
161 __asm__ __volatile__(
163 " .set "MIPS_ISA_ARCH_LEVEL" \n"
164 " " __LL "%0, %1 # clear_bit \n"
168 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
169 : "ir" (~(1UL << bit))
171 } while (unlikely(!temp));
175 * clear_bit_unlock - Clears a bit in memory
177 * @addr: Address to start counting from
179 * clear_bit() is atomic and implies release semantics before the memory
180 * operation. It can be used for an unlock.
182 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
184 smp_mb__before_atomic();
189 * change_bit - Toggle a bit in memory
191 * @addr: Address to start counting from
193 * change_bit() is atomic and may not be reordered.
194 * Note that @nr may be almost arbitrarily large; this function is not
195 * restricted to acting on a single-word quantity.
197 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
199 unsigned long *m = ((unsigned long *)addr) + (nr >> SZLONG_LOG);
200 int bit = nr & SZLONG_MASK;
203 if (!kernel_uses_llsc) {
204 __mips_change_bit(nr, addr);
208 if (R10000_LLSC_WAR) {
209 __asm__ __volatile__(
211 " .set arch=r4000 \n"
212 "1: " __LL "%0, %1 # change_bit \n"
217 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
225 __asm__ __volatile__(
227 " .set "MIPS_ISA_ARCH_LEVEL" \n"
228 " " __LL "%0, %1 # change_bit \n"
232 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
235 } while (unlikely(!temp));
239 * test_and_set_bit - Set a bit and return its old value
241 * @addr: Address to count from
243 * This operation is atomic and cannot be reordered.
244 * It also implies a memory barrier.
246 static inline int test_and_set_bit(unsigned long nr,
247 volatile unsigned long *addr)
249 unsigned long *m = ((unsigned long *)addr) + (nr >> SZLONG_LOG);
250 int bit = nr & SZLONG_MASK;
251 unsigned long res, temp;
253 smp_mb__before_llsc();
255 if (!kernel_uses_llsc) {
256 res = __mips_test_and_set_bit(nr, addr);
257 } else if (R10000_LLSC_WAR) {
258 __asm__ __volatile__(
260 " .set arch=r4000 \n"
261 "1: " __LL "%0, %1 # test_and_set_bit \n"
267 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
273 __asm__ __volatile__(
275 " .set "MIPS_ISA_ARCH_LEVEL" \n"
276 " " __LL "%0, %1 # test_and_set_bit \n"
280 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
283 } while (unlikely(!res));
285 res = temp & (1UL << bit);
294 * test_and_set_bit_lock - Set a bit and return its old value
296 * @addr: Address to count from
298 * This operation is atomic and implies acquire ordering semantics
299 * after the memory operation.
301 static inline int test_and_set_bit_lock(unsigned long nr,
302 volatile unsigned long *addr)
304 unsigned long *m = ((unsigned long *)addr) + (nr >> SZLONG_LOG);
305 int bit = nr & SZLONG_MASK;
306 unsigned long res, temp;
308 if (!kernel_uses_llsc) {
309 res = __mips_test_and_set_bit_lock(nr, addr);
310 } else if (R10000_LLSC_WAR) {
311 __asm__ __volatile__(
313 " .set arch=r4000 \n"
314 "1: " __LL "%0, %1 # test_and_set_bit \n"
320 : "=&r" (temp), "+m" (*m), "=&r" (res)
325 __asm__ __volatile__(
327 " .set "MIPS_ISA_ARCH_LEVEL" \n"
328 " " __LL "%0, %1 # test_and_set_bit \n"
332 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
335 } while (unlikely(!res));
337 res = temp & (1UL << bit);
345 * test_and_clear_bit - Clear a bit and return its old value
347 * @addr: Address to count from
349 * This operation is atomic and cannot be reordered.
350 * It also implies a memory barrier.
352 static inline int test_and_clear_bit(unsigned long nr,
353 volatile unsigned long *addr)
355 unsigned long *m = ((unsigned long *)addr) + (nr >> SZLONG_LOG);
356 int bit = nr & SZLONG_MASK;
357 unsigned long res, temp;
359 smp_mb__before_llsc();
361 if (!kernel_uses_llsc) {
362 res = __mips_test_and_clear_bit(nr, addr);
363 } else if (R10000_LLSC_WAR) {
364 __asm__ __volatile__(
366 " .set arch=r4000 \n"
367 "1: " __LL "%0, %1 # test_and_clear_bit \n"
374 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
377 } else if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(nr)) {
380 __asm__ __volatile__(
381 " " __LL "%0, %1 # test_and_clear_bit \n"
382 " " __EXT "%2, %0, %3, 1 \n"
383 " " __INS "%0, $0, %3, 1 \n"
385 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
388 } while (unlikely(!temp));
392 __asm__ __volatile__(
394 " .set "MIPS_ISA_ARCH_LEVEL" \n"
395 " " __LL "%0, %1 # test_and_clear_bit \n"
400 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
403 } while (unlikely(!res));
405 res = temp & (1UL << bit);
414 * test_and_change_bit - Change a bit and return its old value
416 * @addr: Address to count from
418 * This operation is atomic and cannot be reordered.
419 * It also implies a memory barrier.
421 static inline int test_and_change_bit(unsigned long nr,
422 volatile unsigned long *addr)
424 unsigned long *m = ((unsigned long *)addr) + (nr >> SZLONG_LOG);
425 int bit = nr & SZLONG_MASK;
426 unsigned long res, temp;
428 smp_mb__before_llsc();
430 if (!kernel_uses_llsc) {
431 res = __mips_test_and_change_bit(nr, addr);
432 } else if (R10000_LLSC_WAR) {
433 __asm__ __volatile__(
435 " .set arch=r4000 \n"
436 "1: " __LL "%0, %1 # test_and_change_bit \n"
442 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
448 __asm__ __volatile__(
450 " .set "MIPS_ISA_ARCH_LEVEL" \n"
451 " " __LL "%0, %1 # test_and_change_bit \n"
453 " " __SC "\t%2, %1 \n"
455 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
458 } while (unlikely(!res));
460 res = temp & (1UL << bit);
468 #include <asm-generic/bitops/non-atomic.h>
471 * __clear_bit_unlock - Clears a bit in memory
473 * @addr: Address to start counting from
475 * __clear_bit() is non-atomic and implies release semantics before the memory
476 * operation. It can be used for an unlock if no other CPUs can concurrently
477 * modify other bits in the word.
479 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
481 smp_mb__before_llsc();
482 __clear_bit(nr, addr);
487 * Return the bit position (0..63) of the most significant 1 bit in a word
488 * Returns -1 if no 1 bit exists
490 static __always_inline unsigned long __fls(unsigned long word)
494 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
495 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
498 " .set "MIPS_ISA_LEVEL" \n"
507 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
508 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
511 " .set "MIPS_ISA_LEVEL" \n"
520 num = BITS_PER_LONG - 1;
522 #if BITS_PER_LONG == 64
523 if (!(word & (~0ul << 32))) {
528 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
532 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
536 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
540 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
544 if (!(word & (~0ul << (BITS_PER_LONG-1))))
550 * __ffs - find first bit in word.
551 * @word: The word to search
553 * Returns 0..SZLONG-1
554 * Undefined if no bit exists, so code should check against 0 first.
556 static __always_inline unsigned long __ffs(unsigned long word)
558 return __fls(word & -word);
562 * fls - find last bit set.
563 * @word: The word to search
565 * This is defined the same way as ffs.
566 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
568 static inline int fls(unsigned int x)
572 if (!__builtin_constant_p(x) &&
573 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
576 " .set "MIPS_ISA_LEVEL" \n"
588 if (!(x & 0xffff0000u)) {
592 if (!(x & 0xff000000u)) {
596 if (!(x & 0xf0000000u)) {
600 if (!(x & 0xc0000000u)) {
604 if (!(x & 0x80000000u)) {
611 #include <asm-generic/bitops/fls64.h>
614 * ffs - find first bit set.
615 * @word: The word to search
617 * This is defined the same way as
618 * the libc and compiler builtin ffs routines, therefore
619 * differs in spirit from the above ffz (man ffs).
621 static inline int ffs(int word)
626 return fls(word & -word);
629 #include <asm-generic/bitops/ffz.h>
630 #include <asm-generic/bitops/find.h>
634 #include <asm-generic/bitops/sched.h>
636 #include <asm/arch_hweight.h>
637 #include <asm-generic/bitops/const_hweight.h>
639 #include <asm-generic/bitops/le.h>
640 #include <asm-generic/bitops/ext2-atomic.h>
642 #endif /* __KERNEL__ */
644 #endif /* _ASM_BITOPS_H */