OSDN Git Service

Merge tag 'sound-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Nov 2012 19:21:28 +0000 (11:21 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Nov 2012 19:21:28 +0000 (11:21 -0800)
Pull sound fixes from Takashi Iwai:
 "The only large LOC is seen in WM5102 driver, just writing a bunch of
  register updates, but the actual code change is small.  Other than
  that, all small fixes suitable for rc6."

* tag 'sound-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound:
  ALSA: usb-audio: Fix mutex deadlock at disconnection
  ALSA: fm801: precedence bug in snd_fm801_tea575x_get_pins()
  ALSA: es1968: precedence bug in snd_es1968_tea575x_get_pins()
  ALSA: hda - Add a missing quirk entry for iMac 9,1
  ASoC: core: Double control update err for snd_soc_put_volsw_sx
  ASoC: dapm: Use card_list during DAPM shutdown
  ASoC: cs42l52: fix the return value of cs42l52_set_fmt()
  ASoC: bells: Correct type in sub speaker DAI name for WM5102
  ASoC: wm8978: pll incorrectly configured when codec is master
  ASoC: mxs-saif: Fix channel swap for 24-bit format
  ASoC: bells: Select WM1250-EV1 Springbank audio I/O module
  ASoC: bells: Add missing select of WM0010
  ASoC: mxs-saif: Add MODULE_ALIAS
  ASoC: wm5102: Write register value corrections after SYSCLK is enabled

22 files changed:
arch/mips/cavium-octeon/executive/cvmx-l2c.c
arch/mips/include/asm/bitops.h
arch/mips/include/asm/compat.h
arch/mips/include/asm/io.h
arch/mips/include/asm/irqflags.h
arch/mips/include/asm/thread_info.h
arch/mips/lib/Makefile
arch/mips/lib/bitops.c [new file with mode: 0644]
arch/mips/lib/mips-atomic.c [new file with mode: 0644]
arch/mips/mti-malta/malta-platform.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/x86.c
drivers/clk/ux500/u8500_clk.c
drivers/leds/ledtrig-cpu.c
kernel/futex.c
scripts/kconfig/expr.h
scripts/kconfig/list.h [new file with mode: 0644]
scripts/kconfig/lkc_proto.h
scripts/kconfig/mconf.c
scripts/kconfig/menu.c
security/device_cgroup.c
tools/power/x86/turbostat/turbostat.c

index d38246e..9f883bf 100644 (file)
@@ -30,6 +30,7 @@
  * measurement, and debugging facilities.
  */
 
+#include <linux/irqflags.h>
 #include <asm/octeon/cvmx.h>
 #include <asm/octeon/cvmx-l2c.h>
 #include <asm/octeon/cvmx-spinlock.h>
index 82ad35c..46ac73a 100644 (file)
@@ -14,7 +14,6 @@
 #endif
 
 #include <linux/compiler.h>
-#include <linux/irqflags.h>
 #include <linux/types.h>
 #include <asm/barrier.h>
 #include <asm/byteorder.h>             /* sigh ... */
 #define smp_mb__before_clear_bit()     smp_mb__before_llsc()
 #define smp_mb__after_clear_bit()      smp_llsc_mb()
 
+
+/*
+ * These are the "slower" versions of the functions and are in bitops.c.
+ * These functions call raw_local_irq_{save,restore}().
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
+int __mips_test_and_set_bit(unsigned long nr,
+                           volatile unsigned long *addr);
+int __mips_test_and_set_bit_lock(unsigned long nr,
+                                volatile unsigned long *addr);
+int __mips_test_and_clear_bit(unsigned long nr,
+                             volatile unsigned long *addr);
+int __mips_test_and_change_bit(unsigned long nr,
+                              volatile unsigned long *addr);
+
+
 /*
  * set_bit - Atomically set a bit in memory
  * @nr: the bit to set
@@ -57,7 +74,7 @@
 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 {
        unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
        unsigned long temp;
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
                        : "=&r" (temp), "+m" (*m)
                        : "ir" (1UL << bit));
                } while (unlikely(!temp));
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               *a |= mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               __mips_set_bit(nr, addr);
 }
 
 /*
@@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
        unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
        unsigned long temp;
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
                        : "=&r" (temp), "+m" (*m)
                        : "ir" (~(1UL << bit)));
                } while (unlikely(!temp));
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               *a &= ~mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               __mips_clear_bit(nr, addr);
 }
 
 /*
@@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
  */
 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
                        : "=&r" (temp), "+m" (*m)
                        : "ir" (1UL << bit));
                } while (unlikely(!temp));
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               *a ^= mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               __mips_change_bit(nr, addr);
 }
 
 /*
@@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 static inline int test_and_set_bit(unsigned long nr,
        volatile unsigned long *addr)
 {
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
        unsigned long res;
 
        smp_mb__before_llsc();
@@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
                } while (unlikely(!res));
 
                res = temp & (1UL << bit);
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               res = (mask & *a);
-               *a |= mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               res = __mips_test_and_set_bit(nr, addr);
 
        smp_llsc_mb();
 
@@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,
 static inline int test_and_set_bit_lock(unsigned long nr,
        volatile unsigned long *addr)
 {
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
        unsigned long res;
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
                } while (unlikely(!res));
 
                res = temp & (1UL << bit);
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               res = (mask & *a);
-               *a |= mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               res = __mips_test_and_set_bit_lock(nr, addr);
 
        smp_llsc_mb();
 
@@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
 static inline int test_and_clear_bit(unsigned long nr,
        volatile unsigned long *addr)
 {
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
        unsigned long res;
 
        smp_mb__before_llsc();
@@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
                } while (unlikely(!res));
 
                res = temp & (1UL << bit);
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               res = (mask & *a);
-               *a &= ~mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               res = __mips_test_and_clear_bit(nr, addr);
 
        smp_llsc_mb();
 
@@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,
 static inline int test_and_change_bit(unsigned long nr,
        volatile unsigned long *addr)
 {
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
        unsigned long res;
 
        smp_mb__before_llsc();
@@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
                } while (unlikely(!res));
 
                res = temp & (1UL << bit);
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               res = (mask & *a);
-               *a ^= mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               res = __mips_test_and_change_bit(nr, addr);
 
        smp_llsc_mb();
 
index 58277e0..3c5d146 100644 (file)
@@ -290,7 +290,7 @@ struct compat_shmid64_ds {
 
 static inline int is_compat_task(void)
 {
-       return test_thread_flag(TIF_32BIT);
+       return test_thread_flag(TIF_32BIT_ADDR);
 }
 
 #endif /* _ASM_COMPAT_H */
index 29d9c23..ff2e034 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/irqflags.h>
 
 #include <asm/addrspace.h>
 #include <asm/bug.h>
index 309cbcd..9f3384c 100644 (file)
 #include <linux/compiler.h>
 #include <asm/hazards.h>
 
-__asm__(
-       "       .macro  arch_local_irq_enable                           \n"
-       "       .set    push                                            \n"
-       "       .set    reorder                                         \n"
-       "       .set    noat                                            \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    $1, $2, 1       # SMTC - clear TCStatus.IXMT    \n"
-       "       ori     $1, 0x400                                       \n"
-       "       xori    $1, 0x400                                       \n"
-       "       mtc0    $1, $2, 1                                       \n"
-#elif defined(CONFIG_CPU_MIPSR2)
-       "       ei                                                      \n"
-#else
-       "       mfc0    $1,$12                                          \n"
-       "       ori     $1,0x1f                                         \n"
-       "       xori    $1,0x1e                                         \n"
-       "       mtc0    $1,$12                                          \n"
-#endif
-       "       irq_enable_hazard                                       \n"
-       "       .set    pop                                             \n"
-       "       .endm");
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
 
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC kernel needs to do a software replay of queued
-        * IPIs, at the cost of call overhead on each local_irq_enable()
-        */
-       smtc_ipi_replay();
-#endif
-       __asm__ __volatile__(
-               "arch_local_irq_enable"
-               : /* no outputs */
-               : /* no inputs */
-               : "memory");
-}
-
-
-/*
- * For cli() we have to insert nops to make sure that the new value
- * has actually arrived in the status register before the end of this
- * macro.
- * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
- * no nops at all.
- */
-/*
- * For TX49, operating only IE bit is not enough.
- *
- * If mfc0 $12 follows store and the mfc0 is last instruction of a
- * page and fetching the next instruction causes TLB miss, the result
- * of the mfc0 might wrongly contain EXL bit.
- *
- * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
- *
- * Workaround: mask EXL bit of the result or place a nop before mfc0.
- */
 __asm__(
        "       .macro  arch_local_irq_disable\n"
        "       .set    push                                            \n"
        "       .set    noat                                            \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    $1, $2, 1                                       \n"
-       "       ori     $1, 0x400                                       \n"
-       "       .set    noreorder                                       \n"
-       "       mtc0    $1, $2, 1                                       \n"
-#elif defined(CONFIG_CPU_MIPSR2)
        "       di                                                      \n"
-#else
-       "       mfc0    $1,$12                                          \n"
-       "       ori     $1,0x1f                                         \n"
-       "       xori    $1,0x1f                                         \n"
-       "       .set    noreorder                                       \n"
-       "       mtc0    $1,$12                                          \n"
-#endif
        "       irq_disable_hazard                                      \n"
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)
                : "memory");
 }
 
-__asm__(
-       "       .macro  arch_local_save_flags flags                     \n"
-       "       .set    push                                            \n"
-       "       .set    reorder                                         \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    \\flags, $2, 1                                  \n"
-#else
-       "       mfc0    \\flags, $12                                    \n"
-#endif
-       "       .set    pop                                             \n"
-       "       .endm                                                   \n");
-
-static inline unsigned long arch_local_save_flags(void)
-{
-       unsigned long flags;
-       asm volatile("arch_local_save_flags %0" : "=r" (flags));
-       return flags;
-}
 
 __asm__(
        "       .macro  arch_local_irq_save result                      \n"
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    \\result, $2, 1                                 \n"
-       "       ori     $1, \\result, 0x400                             \n"
-       "       .set    noreorder                                       \n"
-       "       mtc0    $1, $2, 1                                       \n"
-       "       andi    \\result, \\result, 0x400                       \n"
-#elif defined(CONFIG_CPU_MIPSR2)
        "       di      \\result                                        \n"
        "       andi    \\result, 1                                     \n"
-#else
-       "       mfc0    \\result, $12                                   \n"
-       "       ori     $1, \\result, 0x1f                              \n"
-       "       xori    $1, 0x1f                                        \n"
-       "       .set    noreorder                                       \n"
-       "       mtc0    $1, $12                                         \n"
-#endif
        "       irq_disable_hazard                                      \n"
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)
        return flags;
 }
 
+
 __asm__(
        "       .macro  arch_local_irq_restore flags                    \n"
        "       .set    push                                            \n"
        "       .set    noreorder                                       \n"
        "       .set    noat                                            \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "mfc0   $1, $2, 1                                               \n"
-       "andi   \\flags, 0x400                                          \n"
-       "ori    $1, 0x400                                               \n"
-       "xori   $1, 0x400                                               \n"
-       "or     \\flags, $1                                             \n"
-       "mtc0   \\flags, $2, 1                                          \n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#if defined(CONFIG_IRQ_CPU)
        /*
         * Slow, but doesn't suffer from a relatively unlikely race
         * condition we're having since days 1.
         */
        "       beqz    \\flags, 1f                                     \n"
-       "        di                                                     \n"
+       "       di                                                      \n"
        "       ei                                                      \n"
        "1:                                                             \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#else
        /*
         * Fast, dangerous.  Life is fun, life is good.
         */
        "       mfc0    $1, $12                                         \n"
        "       ins     $1, \\flags, 0, 1                               \n"
        "       mtc0    $1, $12                                         \n"
-#else
-       "       mfc0    $1, $12                                         \n"
-       "       andi    \\flags, 1                                      \n"
-       "       ori     $1, 0x1f                                        \n"
-       "       xori    $1, 0x1f                                        \n"
-       "       or      \\flags, $1                                     \n"
-       "       mtc0    \\flags, $12                                    \n"
 #endif
        "       irq_disable_hazard                                      \n"
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
 
-
 static inline void arch_local_irq_restore(unsigned long flags)
 {
        unsigned long __tmp1;
 
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC kernel needs to do a software replay of queued
-        * IPIs, at the cost of branch and call overhead on each
-        * local_irq_restore()
-        */
-       if (unlikely(!(flags & 0x0400)))
-               smtc_ipi_replay();
-#endif
-
        __asm__ __volatile__(
                "arch_local_irq_restore\t%0"
                : "=r" (__tmp1)
@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)
                : "0" (flags)
                : "memory");
 }
+#else
+/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
+void arch_local_irq_disable(void);
+unsigned long arch_local_irq_save(void);
+void arch_local_irq_restore(unsigned long flags);
+void __arch_local_irq_restore(unsigned long flags);
+#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
+
+
+__asm__(
+       "       .macro  arch_local_irq_enable                           \n"
+       "       .set    push                                            \n"
+       "       .set    reorder                                         \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    $1, $2, 1       # SMTC - clear TCStatus.IXMT    \n"
+       "       ori     $1, 0x400                                       \n"
+       "       xori    $1, 0x400                                       \n"
+       "       mtc0    $1, $2, 1                                       \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+       "       ei                                                      \n"
+#else
+       "       mfc0    $1,$12                                          \n"
+       "       ori     $1,0x1f                                         \n"
+       "       xori    $1,0x1e                                         \n"
+       "       mtc0    $1,$12                                          \n"
+#endif
+       "       irq_enable_hazard                                       \n"
+       "       .set    pop                                             \n"
+       "       .endm");
+
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC kernel needs to do a software replay of queued
+        * IPIs, at the cost of call overhead on each local_irq_enable()
+        */
+       smtc_ipi_replay();
+#endif
+       __asm__ __volatile__(
+               "arch_local_irq_enable"
+               : /* no outputs */
+               : /* no inputs */
+               : "memory");
+}
+
+
+__asm__(
+       "       .macro  arch_local_save_flags flags                     \n"
+       "       .set    push                                            \n"
+       "       .set    reorder                                         \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    \\flags, $2, 1                                  \n"
+#else
+       "       mfc0    \\flags, $12                                    \n"
+#endif
+       "       .set    pop                                             \n"
+       "       .endm                                                   \n");
+
+static inline unsigned long arch_local_save_flags(void)
+{
+       unsigned long flags;
+       asm volatile("arch_local_save_flags %0" : "=r" (flags));
+       return flags;
+}
+
 
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
 #endif
 }
 
-#endif
+#endif /* #ifndef __ASSEMBLY__ */
 
 /*
  * Do the CPU's IRQ-state tracing from assembly code.
index 8debe9e..18806a5 100644 (file)
@@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define TIF_LOAD_WATCH         25      /* If set, load watch registers */
 #define TIF_SYSCALL_TRACE      31      /* syscall trace active */
 
-#ifdef CONFIG_MIPS32_O32
-#define TIF_32BIT TIF_32BIT_REGS
-#elif defined(CONFIG_MIPS32_N32)
-#define TIF_32BIT _TIF_32BIT_ADDR
-#endif /* CONFIG_MIPS32_O32 */
-
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING                (1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
index c4a82e8..eeddc58 100644 (file)
@@ -2,8 +2,9 @@
 # Makefile for MIPS-specific library files..
 #
 
-lib-y  += csum_partial.o delay.o memcpy.o memset.o \
-          strlen_user.o strncpy_user.o strnlen_user.o uncached.o
+lib-y  += bitops.o csum_partial.o delay.o memcpy.o memset.o \
+          mips-atomic.o strlen_user.o strncpy_user.o \
+          strnlen_user.o uncached.o
 
 obj-y                  += iomap.o
 obj-$(CONFIG_PCI)      += iomap-pci.o
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
new file mode 100644 (file)
index 0000000..239a9c9
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
+ */
+#include <linux/bitops.h>
+#include <linux/irqflags.h>
+#include <linux/export.h>
+
+
+/**
+ * __mips_set_bit - Atomically set a bit in memory.  This is called by
+ * set_bit() if it cannot find a faster solution.
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       *a |= mask;
+       raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_set_bit);
+
+
+/**
+ * __mips_clear_bit - Clears a bit in memory.  This is called by clear_bit() if
+ * it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       *a &= ~mask;
+       raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_clear_bit);
+
+
+/**
+ * __mips_change_bit - Toggle a bit in memory.  This is called by change_bit()
+ * if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ */
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       *a ^= mask;
+       raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_change_bit);
+
+
+/**
+ * __mips_test_and_set_bit - Set a bit and return its old value.  This is
+ * called by test_and_set_bit() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit(unsigned long nr,
+                           volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+       unsigned long res;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       res = (mask & *a);
+       *a |= mask;
+       raw_local_irq_restore(flags);
+       return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit);
+
+
+/**
+ * __mips_test_and_set_bit_lock - Set a bit and return its old value.  This is
+ * called by test_and_set_bit_lock() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit_lock(unsigned long nr,
+                                volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+       unsigned long res;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       res = (mask & *a);
+       *a |= mask;
+       raw_local_irq_restore(flags);
+       return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
+
+
+/**
+ * __mips_test_and_clear_bit - Clear a bit and return its old value.  This is
+ * called by test_and_clear_bit() if it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ */
+int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+       unsigned long res;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       res = (mask & *a);
+       *a &= ~mask;
+       raw_local_irq_restore(flags);
+       return res;
+}
+EXPORT_SYMBOL(__mips_test_and_clear_bit);
+
+
+/**
+ * __mips_test_and_change_bit - Change a bit and return its old value.  This is
+ * called by test_and_change_bit() if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to count from
+ */
+int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+       unsigned long res;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       res = (mask & *a);
+       *a ^= mask;
+       raw_local_irq_restore(flags);
+       return res;
+}
+EXPORT_SYMBOL(__mips_test_and_change_bit);
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
new file mode 100644 (file)
index 0000000..e091430
--- /dev/null
@@ -0,0 +1,176 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <asm/irqflags.h>
+#include <asm/hazards.h>
+#include <linux/compiler.h>
+#include <linux/preempt.h>
+#include <linux/export.h>
+
+#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+
+/*
+ * For cli() we have to insert nops to make sure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
+__asm__(
+       "       .macro  arch_local_irq_disable\n"
+       "       .set    push                                            \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    $1, $2, 1                                       \n"
+       "       ori     $1, 0x400                                       \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1, $2, 1                                       \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+       /* see irqflags.h for inline function */
+#else
+       "       mfc0    $1,$12                                          \n"
+       "       ori     $1,0x1f                                         \n"
+       "       xori    $1,0x1f                                         \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1,$12                                          \n"
+#endif
+       "       irq_disable_hazard                                      \n"
+       "       .set    pop                                             \n"
+       "       .endm                                                   \n");
+
+void arch_local_irq_disable(void)
+{
+       preempt_disable();
+       __asm__ __volatile__(
+               "arch_local_irq_disable"
+               : /* no outputs */
+               : /* no inputs */
+               : "memory");
+       preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_disable);
+
+
+__asm__(
+       "       .macro  arch_local_irq_save result                      \n"
+       "       .set    push                                            \n"
+       "       .set    reorder                                         \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    \\result, $2, 1                                 \n"
+       "       ori     $1, \\result, 0x400                             \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1, $2, 1                                       \n"
+       "       andi    \\result, \\result, 0x400                       \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+       /* see irqflags.h for inline function */
+#else
+       "       mfc0    \\result, $12                                   \n"
+       "       ori     $1, \\result, 0x1f                              \n"
+       "       xori    $1, 0x1f                                        \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1, $12                                         \n"
+#endif
+       "       irq_disable_hazard                                      \n"
+       "       .set    pop                                             \n"
+       "       .endm                                                   \n");
+
+unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
+       preempt_disable();
+       asm volatile("arch_local_irq_save\t%0"
+                    : "=r" (flags)
+                    : /* no inputs */
+                    : "memory");
+       preempt_enable();
+       return flags;
+}
+EXPORT_SYMBOL(arch_local_irq_save);
+
+
+__asm__(
+       "       .macro  arch_local_irq_restore flags                    \n"
+       "       .set    push                                            \n"
+       "       .set    noreorder                                       \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "mfc0   $1, $2, 1                                               \n"
+       "andi   \\flags, 0x400                                          \n"
+       "ori    $1, 0x400                                               \n"
+       "xori   $1, 0x400                                               \n"
+       "or     \\flags, $1                                             \n"
+       "mtc0   \\flags, $2, 1                                          \n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+       /* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+       /* see irqflags.h for inline function */
+#else
+       "       mfc0    $1, $12                                         \n"
+       "       andi    \\flags, 1                                      \n"
+       "       ori     $1, 0x1f                                        \n"
+       "       xori    $1, 0x1f                                        \n"
+       "       or      \\flags, $1                                     \n"
+       "       mtc0    \\flags, $12                                    \n"
+#endif
+       "       irq_disable_hazard                                      \n"
+       "       .set    pop                                             \n"
+       "       .endm                                                   \n");
+
+void arch_local_irq_restore(unsigned long flags)
+{
+       unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC kernel needs to do a software replay of queued
+        * IPIs, at the cost of branch and call overhead on each
+        * local_irq_restore()
+        */
+       if (unlikely(!(flags & 0x0400)))
+               smtc_ipi_replay();
+#endif
+       preempt_disable();
+       __asm__ __volatile__(
+               "arch_local_irq_restore\t%0"
+               : "=r" (__tmp1)
+               : "0" (flags)
+               : "memory");
+       preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+void __arch_local_irq_restore(unsigned long flags)
+{
+       unsigned long __tmp1;
+
+       preempt_disable();
+       __asm__ __volatile__(
+               "arch_local_irq_restore\t%0"
+               : "=r" (__tmp1)
+               : "0" (flags)
+               : "memory");
+       preempt_enable();
+}
+EXPORT_SYMBOL(__arch_local_irq_restore);
+
+#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */
index 80562b8..7473217 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
 #include <linux/platform_device.h>
+#include <asm/mips-boards/maltaint.h>
 #include <mtd/mtd-abi.h>
 
 #define SMC_PORT(base, int)                                            \
@@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = {
        SMC_PORT(0x2F8, 3),
        {
                .mapbase        = 0x1f000900,   /* The CBUS UART */
-               .irq            = MIPS_CPU_IRQ_BASE + 2,
+               .irq            = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
                .uartclk        = 3686400,      /* Twice the usual clk! */
                .iotype         = UPIO_MEM32,
                .flags          = CBUS_UART_FLAGS,
index a10e460..58fc514 100644 (file)
@@ -24,6 +24,9 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
 
+       if (!static_cpu_has(X86_FEATURE_XSAVE))
+               return 0;
+
        best = kvm_find_cpuid_entry(vcpu, 1, 0);
        return best && (best->ecx & bit(X86_FEATURE_XSAVE));
 }
index 224a7e7..4f76417 100644 (file)
@@ -5781,6 +5781,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        int pending_vec, max_bits, idx;
        struct desc_ptr dt;
 
+       if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
+               return -EINVAL;
+
        dt.size = sregs->idt.limit;
        dt.address = sregs->idt.base;
        kvm_x86_ops->set_idt(vcpu, &dt);
index ca4a25e..e2c17d1 100644 (file)
@@ -40,7 +40,7 @@ void u8500_clk_init(void)
                                CLK_IS_ROOT|CLK_IGNORE_UNUSED,
                                32768);
        clk_register_clkdev(clk, "clk32k", NULL);
-       clk_register_clkdev(clk, NULL, "rtc-pl031");
+       clk_register_clkdev(clk, "apb_pclk", "rtc-pl031");
 
        /* PRCMU clocks */
        fw_version = prcmu_get_fw_version();
@@ -228,10 +228,17 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", U8500_CLKRST1_BASE,
                                BIT(2), 0);
+       clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
+
        clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", U8500_CLKRST1_BASE,
                                BIT(3), 0);
+       clk_register_clkdev(clk, "apb_pclk", "msp0");
+       clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0");
+
        clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", U8500_CLKRST1_BASE,
                                BIT(4), 0);
+       clk_register_clkdev(clk, "apb_pclk", "msp1");
+       clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1");
 
        clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", U8500_CLKRST1_BASE,
                                BIT(5), 0);
@@ -239,6 +246,7 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", U8500_CLKRST1_BASE,
                                BIT(6), 0);
+       clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
 
        clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", U8500_CLKRST1_BASE,
                                BIT(7), 0);
@@ -246,6 +254,7 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", U8500_CLKRST1_BASE,
                                BIT(8), 0);
+       clk_register_clkdev(clk, "apb_pclk", "slimbus0");
 
        clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", U8500_CLKRST1_BASE,
                                BIT(9), 0);
@@ -255,11 +264,16 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", U8500_CLKRST1_BASE,
                                BIT(10), 0);
+       clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
+
        clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", U8500_CLKRST1_BASE,
                                BIT(11), 0);
+       clk_register_clkdev(clk, "apb_pclk", "msp3");
+       clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3");
 
        clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", U8500_CLKRST2_BASE,
                                BIT(0), 0);
+       clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
 
        clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", U8500_CLKRST2_BASE,
                                BIT(1), 0);
@@ -279,12 +293,13 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", U8500_CLKRST2_BASE,
                                BIT(5), 0);
+       clk_register_clkdev(clk, "apb_pclk", "msp2");
+       clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2");
 
        clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", U8500_CLKRST2_BASE,
                                BIT(6), 0);
        clk_register_clkdev(clk, "apb_pclk", "sdi1");
 
-
        clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", U8500_CLKRST2_BASE,
                                BIT(7), 0);
        clk_register_clkdev(clk, "apb_pclk", "sdi3");
@@ -316,10 +331,15 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", U8500_CLKRST3_BASE,
                                BIT(1), 0);
+       clk_register_clkdev(clk, "apb_pclk", "ssp0");
+
        clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", U8500_CLKRST3_BASE,
                                BIT(2), 0);
+       clk_register_clkdev(clk, "apb_pclk", "ssp1");
+
        clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", U8500_CLKRST3_BASE,
                                BIT(3), 0);
+       clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
 
        clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", U8500_CLKRST3_BASE,
                                BIT(4), 0);
@@ -401,10 +421,17 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
                        U8500_CLKRST1_BASE, BIT(2), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "nmk-i2c.1");
+
        clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
                        U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "msp0");
+       clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0");
+
        clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
                        U8500_CLKRST1_BASE, BIT(4), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "msp1");
+       clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1");
 
        clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
                        U8500_CLKRST1_BASE, BIT(5), CLK_SET_RATE_GATE);
@@ -412,17 +439,25 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
                        U8500_CLKRST1_BASE, BIT(6), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "nmk-i2c.2");
+
        clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
-                       U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
-       /* FIXME: Redefinition of BIT(3). */
+                       U8500_CLKRST1_BASE, BIT(8), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "slimbus0");
+
        clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
                        U8500_CLKRST1_BASE, BIT(9), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "nmk-i2c.4");
+
        clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
                        U8500_CLKRST1_BASE, BIT(10), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "msp3");
+       clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3");
 
        /* Periph2 */
        clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
                        U8500_CLKRST2_BASE, BIT(0), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "nmk-i2c.3");
 
        clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
                        U8500_CLKRST2_BASE, BIT(2), CLK_SET_RATE_GATE);
@@ -430,6 +465,8 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
                        U8500_CLKRST2_BASE, BIT(3), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "msp2");
+       clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2");
 
        clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
                        U8500_CLKRST2_BASE, BIT(4), CLK_SET_RATE_GATE);
@@ -450,10 +487,15 @@ void u8500_clk_init(void)
        /* Periph3 */
        clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
                        U8500_CLKRST3_BASE, BIT(1), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "ssp0");
+
        clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
                        U8500_CLKRST3_BASE, BIT(2), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "ssp1");
+
        clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
                        U8500_CLKRST3_BASE, BIT(3), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "nmk-i2c.0");
 
        clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
                        U8500_CLKRST3_BASE, BIT(4), CLK_SET_RATE_GATE);
index b312056..4239b39 100644 (file)
@@ -33,8 +33,6 @@
 struct led_trigger_cpu {
        char name[MAX_NAME_LEN];
        struct led_trigger *_trig;
-       struct mutex lock;
-       int lock_is_inited;
 };
 
 static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig);
@@ -50,12 +48,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt)
 {
        struct led_trigger_cpu *trig = &__get_cpu_var(cpu_trig);
 
-       /* mutex lock should be initialized before calling mutex_call() */
-       if (!trig->lock_is_inited)
-               return;
-
-       mutex_lock(&trig->lock);
-
        /* Locate the correct CPU LED */
        switch (ledevt) {
        case CPU_LED_IDLE_END:
@@ -75,8 +67,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt)
                /* Will leave the LED as it is */
                break;
        }
-
-       mutex_unlock(&trig->lock);
 }
 EXPORT_SYMBOL(ledtrig_cpu);
 
@@ -117,14 +107,9 @@ static int __init ledtrig_cpu_init(void)
        for_each_possible_cpu(cpu) {
                struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
 
-               mutex_init(&trig->lock);
-
                snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
 
-               mutex_lock(&trig->lock);
                led_trigger_register_simple(trig->name, &trig->_trig);
-               trig->lock_is_inited = 1;
-               mutex_unlock(&trig->lock);
        }
 
        register_syscore_ops(&ledtrig_cpu_syscore_ops);
@@ -142,15 +127,9 @@ static void __exit ledtrig_cpu_exit(void)
        for_each_possible_cpu(cpu) {
                struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
 
-               mutex_lock(&trig->lock);
-
                led_trigger_unregister_simple(trig->_trig);
                trig->_trig = NULL;
                memset(trig->name, 0, MAX_NAME_LEN);
-               trig->lock_is_inited = 0;
-
-               mutex_unlock(&trig->lock);
-               mutex_destroy(&trig->lock);
        }
 
        unregister_syscore_ops(&ledtrig_cpu_syscore_ops);
index 3717e7b..20ef219 100644 (file)
@@ -716,7 +716,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
                                struct futex_pi_state **ps,
                                struct task_struct *task, int set_waiters)
 {
-       int lock_taken, ret, ownerdied = 0;
+       int lock_taken, ret, force_take = 0;
        u32 uval, newval, curval, vpid = task_pid_vnr(task);
 
 retry:
@@ -755,17 +755,15 @@ retry:
        newval = curval | FUTEX_WAITERS;
 
        /*
-        * There are two cases, where a futex might have no owner (the
-        * owner TID is 0): OWNER_DIED. We take over the futex in this
-        * case. We also do an unconditional take over, when the owner
-        * of the futex died.
-        *
-        * This is safe as we are protected by the hash bucket lock !
+        * Should we force take the futex? See below.
         */
-       if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
-               /* Keep the OWNER_DIED bit */
+       if (unlikely(force_take)) {
+               /*
+                * Keep the OWNER_DIED and the WAITERS bit and set the
+                * new TID value.
+                */
                newval = (curval & ~FUTEX_TID_MASK) | vpid;
-               ownerdied = 0;
+               force_take = 0;
                lock_taken = 1;
        }
 
@@ -775,7 +773,7 @@ retry:
                goto retry;
 
        /*
-        * We took the lock due to owner died take over.
+        * We took the lock due to forced take over.
         */
        if (unlikely(lock_taken))
                return 1;
@@ -790,20 +788,25 @@ retry:
                switch (ret) {
                case -ESRCH:
                        /*
-                        * No owner found for this futex. Check if the
-                        * OWNER_DIED bit is set to figure out whether
-                        * this is a robust futex or not.
+                        * We failed to find an owner for this
+                        * futex. So we have no pi_state to block
+                        * on. This can happen in two cases:
+                        *
+                        * 1) The owner died
+                        * 2) A stale FUTEX_WAITERS bit
+                        *
+                        * Re-read the futex value.
                         */
                        if (get_futex_value_locked(&curval, uaddr))
                                return -EFAULT;
 
                        /*
-                        * We simply start over in case of a robust
-                        * futex. The code above will take the futex
-                        * and return happy.
+                        * If the owner died or we have a stale
+                        * WAITERS bit the owner TID in the user space
+                        * futex is 0.
                         */
-                       if (curval & FUTEX_OWNER_DIED) {
-                               ownerdied = 1;
+                       if (!(curval & FUTEX_TID_MASK)) {
+                               force_take = 1;
                                goto retry;
                        }
                default:
index bd2e098..cdd4860 100644 (file)
@@ -12,7 +12,7 @@ extern "C" {
 
 #include <assert.h>
 #include <stdio.h>
-#include <sys/queue.h>
+#include "list.h"
 #ifndef __cplusplus
 #include <stdbool.h>
 #endif
@@ -175,12 +175,11 @@ struct menu {
 #define MENU_ROOT              0x0002
 
 struct jump_key {
-       CIRCLEQ_ENTRY(jump_key) entries;
+       struct list_head entries;
        size_t offset;
        struct menu *target;
        int index;
 };
-CIRCLEQ_HEAD(jk_head, jump_key);
 
 #define JUMP_NB                        9
 
diff --git a/scripts/kconfig/list.h b/scripts/kconfig/list.h
new file mode 100644 (file)
index 0000000..0ae730b
--- /dev/null
@@ -0,0 +1,91 @@
+#ifndef LIST_H
+#define LIST_H
+
+/*
+ * Copied from include/linux/...
+ */
+
+#undef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr:        the pointer to the member.
+ * @type:       the type of the container struct this is embedded in.
+ * @member:     the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({                      \
+       const typeof( ((type *)0)->member ) *__mptr = (ptr);    \
+       (type *)( (char *)__mptr - offsetof(type,member) );})
+
+
+struct list_head {
+       struct list_head *next, *prev;
+};
+
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+       struct list_head name = LIST_HEAD_INIT(name)
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr:       the &struct list_head pointer.
+ * @type:      the type of the struct this is embedded in.
+ * @member:    the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+       container_of(ptr, type, member)
+
+/**
+ * list_for_each_entry -       iterate over list of given type
+ * @pos:       the type * to use as a loop cursor.
+ * @head:      the head for your list.
+ * @member:    the name of the list_struct within the struct.
+ */
+#define list_for_each_entry(pos, head, member)                         \
+       for (pos = list_entry((head)->next, typeof(*pos), member);      \
+            &pos->member != (head);    \
+            pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(const struct list_head *head)
+{
+       return head->next == head;
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add(struct list_head *_new,
+                             struct list_head *prev,
+                             struct list_head *next)
+{
+       next->prev = _new;
+       _new->next = next;
+       _new->prev = prev;
+       prev->next = _new;
+}
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *_new, struct list_head *head)
+{
+       __list_add(_new, head->prev, head);
+}
+
+#endif
index 1d1c085..ef1a738 100644 (file)
@@ -21,9 +21,9 @@ P(menu_get_root_menu,struct menu *,(struct menu *menu));
 P(menu_get_parent_menu,struct menu *,(struct menu *menu));
 P(menu_has_help,bool,(struct menu *menu));
 P(menu_get_help,const char *,(struct menu *menu));
-P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct jk_head
+P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct list_head
                         *head));
-P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct jk_head
+P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct list_head
                                   *head));
 P(menu_get_ext_help,void,(struct menu *menu, struct gstr *help));
 
index 48f6744..53975cf 100644 (file)
@@ -312,7 +312,7 @@ static void set_config_filename(const char *config_filename)
 
 
 struct search_data {
-       struct jk_head *head;
+       struct list_head *head;
        struct menu **targets;
        int *keys;
 };
@@ -323,7 +323,7 @@ static void update_text(char *buf, size_t start, size_t end, void *_data)
        struct jump_key *pos;
        int k = 0;
 
-       CIRCLEQ_FOREACH(pos, data->head, entries) {
+       list_for_each_entry(pos, data->head, entries) {
                if (pos->offset >= start && pos->offset < end) {
                        char header[4];
 
@@ -375,7 +375,7 @@ again:
 
        sym_arr = sym_re_search(dialog_input);
        do {
-               struct jk_head head = CIRCLEQ_HEAD_INITIALIZER(head);
+               LIST_HEAD(head);
                struct menu *targets[JUMP_NB];
                int keys[JUMP_NB + 1], i;
                struct search_data data = {
index a3cade6..e98a05c 100644 (file)
@@ -508,7 +508,7 @@ const char *menu_get_help(struct menu *menu)
 }
 
 static void get_prompt_str(struct gstr *r, struct property *prop,
-                          struct jk_head *head)
+                          struct list_head *head)
 {
        int i, j;
        struct menu *submenu[8], *menu, *location = NULL;
@@ -544,12 +544,13 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
                } else
                        jump->target = location;
 
-               if (CIRCLEQ_EMPTY(head))
+               if (list_empty(head))
                        jump->index = 0;
                else
-                       jump->index = CIRCLEQ_LAST(head)->index + 1;
+                       jump->index = list_entry(head->prev, struct jump_key,
+                                                entries)->index + 1;
 
-               CIRCLEQ_INSERT_TAIL(head, jump, entries);
+               list_add_tail(&jump->entries, head);
        }
 
        if (i > 0) {
@@ -573,7 +574,8 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
 /*
  * head is optional and may be NULL
  */
-void get_symbol_str(struct gstr *r, struct symbol *sym, struct jk_head *head)
+void get_symbol_str(struct gstr *r, struct symbol *sym,
+                   struct list_head *head)
 {
        bool hit;
        struct property *prop;
@@ -612,7 +614,7 @@ void get_symbol_str(struct gstr *r, struct symbol *sym, struct jk_head *head)
        str_append(r, "\n\n");
 }
 
-struct gstr get_relations_str(struct symbol **sym_arr, struct jk_head *head)
+struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head)
 {
        struct symbol *sym;
        struct gstr res = str_new();
index 842c254..b08d20c 100644 (file)
@@ -164,8 +164,8 @@ static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
        struct dev_exception_item *ex, *tmp;
 
        list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
-               list_del(&ex->list);
-               kfree(ex);
+               list_del_rcu(&ex->list);
+               kfree_rcu(ex, rcu);
        }
 }
 
@@ -298,7 +298,7 @@ static int may_access(struct dev_cgroup *dev_cgroup,
        struct dev_exception_item *ex;
        bool match = false;
 
-       list_for_each_entry(ex, &dev_cgroup->exceptions, list) {
+       list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
                if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
                        continue;
                if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
@@ -352,6 +352,8 @@ static int parent_has_perm(struct dev_cgroup *childcg,
  */
 static inline int may_allow_all(struct dev_cgroup *parent)
 {
+       if (!parent)
+               return 1;
        return parent->behavior == DEVCG_DEFAULT_ALLOW;
 }
 
@@ -376,11 +378,14 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
        int count, rc;
        struct dev_exception_item ex;
        struct cgroup *p = devcgroup->css.cgroup;
-       struct dev_cgroup *parent = cgroup_to_devcgroup(p->parent);
+       struct dev_cgroup *parent = NULL;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       if (p->parent)
+               parent = cgroup_to_devcgroup(p->parent);
+
        memset(&ex, 0, sizeof(ex));
        b = buffer;
 
@@ -391,11 +396,14 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
                        if (!may_allow_all(parent))
                                return -EPERM;
                        dev_exception_clean(devcgroup);
+                       devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
+                       if (!parent)
+                               break;
+
                        rc = dev_exceptions_copy(&devcgroup->exceptions,
                                                 &parent->exceptions);
                        if (rc)
                                return rc;
-                       devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
                        break;
                case DEVCG_DENY:
                        dev_exception_clean(devcgroup);
index 2655ae9..ea095ab 100644 (file)
@@ -206,8 +206,10 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
        retval = pread(fd, msr, sizeof *msr, offset);
        close(fd);
 
-       if (retval != sizeof *msr)
+       if (retval != sizeof *msr) {
+               fprintf(stderr, "%s offset 0x%zx read failed\n", pathname, offset);
                return -1;
+       }
 
        return 0;
 }
@@ -1101,7 +1103,9 @@ void turbostat_loop()
 
 restart:
        retval = for_all_cpus(get_counters, EVEN_COUNTERS);
-       if (retval) {
+       if (retval < -1) {
+               exit(retval);
+       } else if (retval == -1) {
                re_initialize();
                goto restart;
        }
@@ -1114,7 +1118,9 @@ restart:
                }
                sleep(interval_sec);
                retval = for_all_cpus(get_counters, ODD_COUNTERS);
-               if (retval) {
+               if (retval < -1) {
+                       exit(retval);
+               } else if (retval == -1) {
                        re_initialize();
                        goto restart;
                }
@@ -1126,7 +1132,9 @@ restart:
                flush_stdout();
                sleep(interval_sec);
                retval = for_all_cpus(get_counters, EVEN_COUNTERS);
-               if (retval) {
+               if (retval < -1) {
+                       exit(retval);
+               } else if (retval == -1) {
                        re_initialize();
                        goto restart;
                }
@@ -1545,8 +1553,11 @@ void turbostat_init()
 int fork_it(char **argv)
 {
        pid_t child_pid;
+       int status;
 
-       for_all_cpus(get_counters, EVEN_COUNTERS);
+       status = for_all_cpus(get_counters, EVEN_COUNTERS);
+       if (status)
+               exit(status);
        /* clear affinity side-effect of get_counters() */
        sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
        gettimeofday(&tv_even, (struct timezone *)NULL);
@@ -1556,7 +1567,6 @@ int fork_it(char **argv)
                /* child */
                execvp(argv[0], argv);
        } else {
-               int status;
 
                /* parent */
                if (child_pid == -1) {
@@ -1568,7 +1578,7 @@ int fork_it(char **argv)
                signal(SIGQUIT, SIG_IGN);
                if (waitpid(child_pid, &status, 0) == -1) {
                        perror("wait");
-                       exit(1);
+                       exit(status);
                }
        }
        /*
@@ -1585,7 +1595,7 @@ int fork_it(char **argv)
 
        fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
 
-       return 0;
+       return status;
 }
 
 void cmdline(int argc, char **argv)
@@ -1594,7 +1604,7 @@ void cmdline(int argc, char **argv)
 
        progname = argv[0];
 
-       while ((opt = getopt(argc, argv, "+pPSvisc:sC:m:M:")) != -1) {
+       while ((opt = getopt(argc, argv, "+pPSvi:sc:sC:m:M:")) != -1) {
                switch (opt) {
                case 'p':
                        show_core_only++;