--- /dev/null
+#ifndef __ALSA_IATOMIC__
+#define __ALSA_IATOMIC__
+
+#ifdef __i386__
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ */
+
+#define LOCK "lock ; "
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v. Note that the guaranteed useful range
+ * of an atomic_t is only 24 bits.
+ */
+static __inline__ void atomic_add(int i, atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "addl %1,%0"
+ :"=m" (v->counter)
+ :"ir" (i), "m" (v->counter));
+}
+
+/**
+ * atomic_sub - subtract the atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+static __inline__ void atomic_sub(int i, atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "subl %1,%0"
+ :"=m" (v->counter)
+ :"ir" (i), "m" (v->counter));
+}
+
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ LOCK "subl %2,%0; sete %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"ir" (i), "m" (v->counter) : "memory");
+ return c;
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "incl %0"
+ :"=m" (v->counter)
+ :"m" (v->counter));
+}
+
+/**
+ * atomic_dec - decrement atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "decl %0"
+ :"=m" (v->counter)
+ :"m" (v->counter));
+}
+
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+static __inline__ int atomic_dec_and_test(atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ LOCK "decl %0; sete %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"m" (v->counter) : "memory");
+ return c != 0;
+}
+
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+static __inline__ int atomic_inc_and_test(atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ LOCK "incl %0; sete %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"m" (v->counter) : "memory");
+ return c != 0;
+}
+
+/**
+ * atomic_add_negative - add and test if negative
+ * @v: pointer of type atomic_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+static __inline__ int atomic_add_negative(int i, atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ LOCK "addl %2,%0; sets %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"ir" (i), "m" (v->counter) : "memory");
+ return c;
+}
+
+/* These are x86-specific, used by some header files */
+#define atomic_clear_mask(mask, addr) \
+__asm__ __volatile__(LOCK "andl %0,%1" \
+: : "r" (~(mask)),"m" (*addr) : "memory")
+
+#define atomic_set_mask(mask, addr) \
+__asm__ __volatile__(LOCK "orl %0,%1" \
+: : "r" (mask),"m" (*addr) : "memory")
+
+#endif /* __i386__ */
+
+#ifdef __ia64__
+
+/*
+ * On IA-64, counter must always be volatile to ensure that that the
+ * memory accesses are ordered.
+ */
+typedef struct { volatile __s32 counter; } atomic_t;
+
+#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+static __inline__ int
+ia64_atomic_add (int i, atomic_t *v)
+{
+ __s32 old, new;
+ CMPXCHG_BUGCHECK_DECL
+
+ do {
+ CMPXCHG_BUGCHECK(v);
+ old = atomic_read(v);
+ new = old + i;
+ } while (ia64_cmpxchg("acq", v, old, old + i, sizeof(atomic_t)) != old);
+ return new;
+}
+
+static __inline__ int
+ia64_atomic_sub (int i, atomic_t *v)
+{
+ __s32 old, new;
+ CMPXCHG_BUGCHECK_DECL
+
+ do {
+ CMPXCHG_BUGCHECK(v);
+ old = atomic_read(v);
+ new = old - i;
+ } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
+ return new;
+}
+
+/*
+ * Atomically add I to V and return TRUE if the resulting value is
+ * negative.
+ */
+static __inline__ int
+atomic_add_negative (int i, atomic_t *v)
+{
+ return ia64_atomic_add(i, v) < 0;
+}
+
+#define atomic_add_return(i,v) \
+ ((__builtin_constant_p(i) && \
+ ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
+ || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
+ ? ia64_fetch_and_add(i, &(v)->counter) \
+ : ia64_atomic_add(i, v))
+
+#define atomic_sub_return(i,v) \
+ ((__builtin_constant_p(i) && \
+ ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
+ || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
+ ? ia64_fetch_and_add(-(i), &(v)->counter) \
+ : ia64_atomic_sub(i, v))
+
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) != 0)
+
+#define atomic_add(i,v) atomic_add_return((i), (v))
+#define atomic_sub(i,v) atomic_sub_return((i), (v))
+#define atomic_inc(v) atomic_add(1, (v))
+#define atomic_dec(v) atomic_sub(1, (v))
+
+#endif __ia64__
+
+#ifdef __alpha__
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc...
+ *
+ * But use these as seldom as possible since they are much slower
+ * than regular operations.
+ */
+
+
+/*
+ * Counter is volatile to make sure gcc doesn't try to be clever
+ * and move things around on us. We need to use _exactly_ the address
+ * the user gave us, not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) ((v)->counter = (i))
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+
+static __inline__ void atomic_add(int i, atomic_t * v)
+{
+ unsigned long temp;
+ __asm__ __volatile__(
+ "1: ldl_l %0,%1\n"
+ " addl %0,%2,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (v->counter)
+ :"Ir" (i), "m" (v->counter));
+}
+
+static __inline__ void atomic_sub(int i, atomic_t * v)
+{
+ unsigned long temp;
+ __asm__ __volatile__(
+ "1: ldl_l %0,%1\n"
+ " subl %0,%2,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (v->counter)
+ :"Ir" (i), "m" (v->counter));
+}
+
+/*
+ * Same as above, but return the result value
+ */
+static __inline__ long atomic_add_return(int i, atomic_t * v)
+{
+ long temp, result;
+ __asm__ __volatile__(
+ "1: ldl_l %0,%1\n"
+ " addl %0,%3,%2\n"
+ " addl %0,%3,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,2f\n"
+ " mb\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result)
+ :"Ir" (i), "m" (v->counter) : "memory");
+ return result;
+}
+
+static __inline__ long atomic_sub_return(int i, atomic_t * v)
+{
+ long temp, result;
+ __asm__ __volatile__(
+ "1: ldl_l %0,%1\n"
+ " subl %0,%3,%2\n"
+ " subl %0,%3,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,2f\n"
+ " mb\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result)
+ :"Ir" (i), "m" (v->counter) : "memory");
+ return result;
+}
+
+#define atomic_dec_return(v) atomic_sub_return(1,(v))
+#define atomic_inc_return(v) atomic_add_return(1,(v))
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+
+#define atomic_inc(v) atomic_add(1,(v))
+#define atomic_dec(v) atomic_sub(1,(v))
+
+#endif /* __alpha__ */
+
+#ifdef __ppc__
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
+extern void atomic_set_mask(unsigned long mask, unsigned long *addr);
+
+#define SMP_ISYNC "\n\tisync"
+
+static __inline__ void atomic_add(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_add\n\
+ add %0,%2,%0\n\
+ stwcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_add_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_add_return\n\
+ add %0,%1,%0\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ SMP_ISYNC
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic_sub(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_sub\n\
+ subf %0,%2,%0\n\
+ stwcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_sub_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_sub_return\n\
+ subf %0,%1,%0\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ SMP_ISYNC
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_inc\n\
+ addic %0,%0,1\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_inc_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1 # atomic_inc_return\n\
+ addic %0,%0,1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ SMP_ISYNC
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_dec\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_dec_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1 # atomic_dec_return\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ SMP_ISYNC
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1.
+ */
+static __inline__ int atomic_dec_if_positive(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
+ addic. %0,%0,-1\n\
+ blt- 2f\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ SMP_ISYNC
+ "\n\
+2:" : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#endif /* __ppc__ */
+
+#ifdef __mips__
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+/*
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/*
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_set(v,i) ((v)->counter = (i))
+
+#ifndef CONFIG_CPU_HAS_LLSC
+
+/*
+ * The MIPS I implementation is only atomic with respect to
+ * interrupts. R3000 based multiprocessor machines are rare anyway ...
+ *
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v. Note that the guaranteed useful range
+ * of an atomic_t is only 24 bits.
+ */
+extern __inline__ void atomic_add(int i, atomic_t * v)
+{
+ int flags;
+
+ save_flags(flags);
+ cli();
+ v->counter += i;
+ restore_flags(flags);
+}
+
+/*
+ * atomic_sub - subtract the atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+extern __inline__ void atomic_sub(int i, atomic_t * v)
+{
+ int flags;
+
+ save_flags(flags);
+ cli();
+ v->counter -= i;
+ restore_flags(flags);
+}
+
+extern __inline__ int atomic_add_return(int i, atomic_t * v)
+{
+ int temp, flags;
+
+ save_flags(flags);
+ cli();
+ temp = v->counter;
+ temp += i;
+ v->counter = temp;
+ restore_flags(flags);
+
+ return temp;
+}
+
+extern __inline__ int atomic_sub_return(int i, atomic_t * v)
+{
+ int temp, flags;
+
+ save_flags(flags);
+ cli();
+ temp = v->counter;
+ temp -= i;
+ v->counter = temp;
+ restore_flags(flags);
+
+ return temp;
+}
+
+#else
+
+/*
+ * ... while for MIPS II and better we can use ll/sc instruction. This
+ * implementation is SMP safe ...
+ */
+
+/*
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v. Note that the guaranteed useful range
+ * of an atomic_t is only 24 bits.
+ */
+extern __inline__ void atomic_add(int i, atomic_t * v)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ "1: ll %0, %1 # atomic_add\n"
+ " addu %0, %2 \n"
+ " sc %0, %1 \n"
+ " beqz %0, 1b \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+}
+
+/*
+ * atomic_sub - subtract the atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+extern __inline__ void atomic_sub(int i, atomic_t * v)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ "1: ll %0, %1 # atomic_sub\n"
+ " subu %0, %2 \n"
+ " sc %0, %1 \n"
+ " beqz %0, 1b \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+}
+
+/*
+ * Same as above, but return the result value
+ */
+extern __inline__ int atomic_add_return(int i, atomic_t * v)
+{
+ unsigned long temp, result;
+
+ __asm__ __volatile__(
+ ".set push # atomic_add_return\n"
+ ".set noreorder \n"
+ "1: ll %1, %2 \n"
+ " addu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " beqz %0, 1b \n"
+ " addu %0, %1, %3 \n"
+ ".set pop \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+
+ return result;
+}
+
+extern __inline__ int atomic_sub_return(int i, atomic_t * v)
+{
+ unsigned long temp, result;
+
+ __asm__ __volatile__(
+ ".set push \n"
+ ".set noreorder # atomic_sub_return\n"
+ "1: ll %1, %2 \n"
+ " subu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " beqz %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ ".set pop \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+
+ return result;
+}
+#endif
+
+#define atomic_dec_return(v) atomic_sub_return(1,(v))
+#define atomic_inc_return(v) atomic_add_return(1,(v))
+
+/*
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(1, (v)) == 0)
+
+/*
+ * atomic_dec_and_test - decrement by 1 and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+
+/*
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_inc(v) atomic_add(1,(v))
+
+/*
+ * atomic_dec - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_dec(v) atomic_sub(1,(v))
+
+/*
+ * atomic_add_negative - add and test if negative
+ * @v: pointer of type atomic_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ *
+ * Currently not implemented for MIPS.
+ */
+
+#endif __mips__
+
+#ifdef __arm__
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+static __inline__ void atomic_add(int i, volatile atomic_t *v)
+{
+ unsigned long flags;
+
+ __save_flags_cli(flags);
+ v->counter += i;
+ __restore_flags(flags);
+}
+
+static __inline__ void atomic_sub(int i, volatile atomic_t *v)
+{
+ unsigned long flags;
+
+ __save_flags_cli(flags);
+ v->counter -= i;
+ __restore_flags(flags);
+}
+
+static __inline__ void atomic_inc(volatile atomic_t *v)
+{
+ unsigned long flags;
+
+ __save_flags_cli(flags);
+ v->counter += 1;
+ __restore_flags(flags);
+}
+
+static __inline__ void atomic_dec(volatile atomic_t *v)
+{
+ unsigned long flags;
+
+ __save_flags_cli(flags);
+ v->counter -= 1;
+ __restore_flags(flags);
+}
+
+static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
+{
+ unsigned long flags;
+ int result;
+
+ __save_flags_cli(flags);
+ v->counter -= 1;
+ result = (v->counter == 0);
+ __restore_flags(flags);
+
+ return result;
+}
+
+static inline int atomic_add_negative(int i, volatile atomic_t *v)
+{
+ unsigned long flags;
+ int result;
+
+ __save_flags_cli(flags);
+ v->counter += i;
+ result = (v->counter < 0);
+ __restore_flags(flags);
+
+ return result;
+}
+
+static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned long flags;
+
+ __save_flags_cli(flags);
+ *addr &= ~mask;
+ __restore_flags(flags);
+}
+
+#endif /* __arm__ */
+
+#endif /* __ALSA_IATOMIC__ */