OSDN Git Service

Assembly optimization
[uclinux-h8/linux.git] / arch / h8300 / include / asm / atomic.h
index af42b6c..6a62733 100644 (file)
 
 static inline int atomic_add_return(int i, atomic_t *v)
 {
-       unsigned long flags;
+       unsigned short ccr;
        int ret;
 
-       local_irq_save(flags);
-       ret = v->counter += i;
-       local_irq_restore(flags);
+       __asm__ __volatile__ (
+               "stc ccr,%w2\n\t"
+               "orc #0x80,ccr\n\t"
+               "mov.l %1,%0\n\t"
+               "add.l %3,%0\n\t"
+               "mov.l %0,%1\n\t"
+               "ldc %w2,ccr"
+               : "=r"(ret), "+m"(v->counter), "=r"(ccr)
+               : "ri"(i));
        return ret;
 }
 
@@ -32,12 +38,18 @@ static inline int atomic_add_return(int i, atomic_t *v)
 
 static inline int atomic_sub_return(int i, atomic_t *v)
 {
-       unsigned long flags;
+       unsigned short ccr;
        int ret;
 
-       local_irq_save(flags);
-       ret = v->counter -= i;
-       local_irq_restore(flags);
+       __asm__ __volatile__ (
+               "stc ccr,%w2\n\t"
+               "orc #0x80,ccr\n\t"
+               "mov.l %1,%0\n\t"
+               "sub.l %3,%0\n\t"
+               "mov.l %0,%1\n\t"
+               "ldc %w2,ccr"
+               : "=r"(ret), "+m"(v->counter), "=r"(ccr)
+               : "ri"(i));
        return ret;
 }
 
@@ -46,13 +58,17 @@ static inline int atomic_sub_return(int i, atomic_t *v)
 
 static inline int atomic_inc_return(atomic_t *v)
 {
-       unsigned long flags;
+       unsigned short ccr;
        int ret;
 
-       local_irq_save(flags);
-       v->counter++;
-       ret = v->counter;
-       local_irq_restore(flags);
+       __asm__ __volatile__ (
+               "stc ccr,%w2\n\t"
+               "orc #0x80,ccr\n\t"
+               "mov.l %1,%0\n\t"
+               "inc.l #1,%0\n\t"
+               "mov.l %0,%1\n\t"
+               "ldc %w2,ccr"
+               : "=r"(ret), "+m"(v->counter), "=r"(ccr));
        return ret;
 }
 
@@ -70,13 +86,17 @@ static inline int atomic_inc_return(atomic_t *v)
 
 static inline int atomic_dec_return(atomic_t *v)
 {
-       unsigned long flags;
+       unsigned short ccr;
        int ret;
 
-       local_irq_save(flags);
-       --v->counter;
-       ret = v->counter;
-       local_irq_restore(flags);
+       __asm__ __volatile__ (
+               "stc ccr,%w2\n\t"
+               "orc #0x80,ccr\n\t"
+               "mov.l %1,%0\n\t"
+               "dec.l #1,%0\n\t"
+               "mov.l %0,%1\n\t"
+               "ldc %w2,ccr"
+               : "=r"(ret), "+m"(v->counter), "=r"(ccr));
        return ret;
 }
 
@@ -84,39 +104,54 @@ static inline int atomic_dec_return(atomic_t *v)
 
 static inline int atomic_dec_and_test(atomic_t *v)
 {
-       unsigned long flags;
+       unsigned short ccr;
        int ret;
 
-       local_irq_save(flags);
-       --v->counter;
-       ret = v->counter;
-       local_irq_restore(flags);
+       __asm__ __volatile__ (
+               "stc ccr,%w2\n\t"
+               "orc #0x80,ccr\n\t"
+               "mov.l %1,%0\n\t"
+               "dec.l #1,%0\n\t"
+               "mov.l %0,%1\n\t"
+               "ldc %w2,ccr"
+               : "=r"(ret), "+m"(v->counter), "=r"(ccr));
        return ret == 0;
 }
 
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        int ret;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       ret = v->counter;
-       if (likely(ret == old))
-               v->counter = new;
-       local_irq_restore(flags);
+       unsigned short ccr;
+
+       __asm__ __volatile__ (
+               "stc ccr,%w2\n\t"
+               "orc #0x80,ccr\n\t"
+               "mov.l %1,%0\n\t"
+               "cmp.l %3,%0\n\t"
+               "bne 1f\n\t"
+               "mov.l %4,%1\n"
+               "1:\tldc %w2,ccr"
+               : "=r"(ret), "+m"(v->counter), "=r"(ccr)
+               : "g"(old), "r"(new));
        return ret;
 }
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
        int ret;
-       unsigned long flags;
+       unsigned char ccr;
 
-       local_irq_save(flags);
-       ret = v->counter;
-       if (ret != u)
-               v->counter += a;
-       local_irq_restore(flags);
+       __asm__ __volatile__ (
+               "stc ccr,%w2\n\t"
+               "orc #0x80,ccr\n\t"
+               "mov.l %1,%0\n\t"
+               "cmp.l %4,%0\n\t"
+               "beq 1f\n\t"
+               "add.l %0,%3\n\t"
+               "mov.l %3,%1\n"
+               "1:\tldc %w2,ccr"
+               : "=r"(ret), "+m"(v->counter), "=r"(ccr), "+r"(a)
+               : "ri"(u));
        return ret;
 }
 
@@ -125,14 +160,15 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
        unsigned char ccr;
        unsigned long tmp;
 
-       __asm__ __volatile__("stc ccr,%w3\n\t"
-                            "orc #0x80,ccr\n\t"
-                            "mov.l %0,%1\n\t"
-                            "and.l %2,%1\n\t"
-                            "mov.l %1,%0\n\t"
-                            "ldc %w3,ccr"
-                            : "=m"(*v), "=r"(tmp)
-                            : "g"(~(mask)), "r"(ccr));
+       __asm__ __volatile__(
+               "stc ccr,%w3\n\t"
+               "orc #0x80,ccr\n\t"
+               "mov.l %0,%1\n\t"
+               "and.l %2,%1\n\t"
+               "mov.l %1,%0\n\t"
+               "ldc %w3,ccr"
+               : "=m"(*v), "=r"(tmp)
+               : "g"(~(mask)), "r"(ccr));
 }
 
 static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
@@ -140,14 +176,15 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
        unsigned char ccr;
        unsigned long tmp;
 
-       __asm__ __volatile__("stc ccr,%w3\n\t"
-                            "orc #0x80,ccr\n\t"
-                            "mov.l %0,%1\n\t"
-                            "or.l %2,%1\n\t"
-                            "mov.l %1,%0\n\t"
-                            "ldc %w3,ccr"
-                            : "=m"(*v), "=r"(tmp)
-                            : "g"(~(mask)), "r"(ccr));
+       __asm__ __volatile__(
+               "stc ccr,%w3\n\t"
+               "orc #0x80,ccr\n\t"
+               "mov.l %0,%1\n\t"
+               "or.l %2,%1\n\t"
+               "mov.l %1,%0\n\t"
+               "ldc %w3,ccr"
+               : "=m"(*v), "=r"(tmp)
+               : "g"(~(mask)), "r"(ccr));
 }
 
 /* Atomic operations are already serializing */