OSDN Git Service

powerpc/64: Implement and use soft_enabled_return API
authorMadhavan Srinivasan <maddy@linux.vnet.ibm.com>
Wed, 20 Dec 2017 03:55:46 +0000 (09:25 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Fri, 19 Jan 2018 11:36:59 +0000 (22:36 +1100)
Add a new wrapper function, soft_enabled_return(), added to return
paca->soft_enabled value.

Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/kernel/time.c

index d046d9f..f979188 100644 (file)
@@ -49,6 +49,18 @@ extern void unknown_exception(struct pt_regs *regs);
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
 
+static inline notrace unsigned long soft_enabled_return(void)
+{
+       unsigned long flags;
+
+       asm volatile(
+               "lbz %0,%1(13)"
+               : "=r" (flags)
+               : "i" (offsetof(struct paca_struct, soft_enabled)));
+
+       return flags;
+}
+
 /*
  * The "memory" clobber acts as both a compiler barrier
  * for the critical section and as a clobber because
@@ -66,14 +78,7 @@ static inline notrace void soft_enabled_set(unsigned long enable)
 
 static inline unsigned long arch_local_save_flags(void)
 {
-       unsigned long flags;
-
-       asm volatile(
-               "lbz %0,%1(13)"
-               : "=r" (flags)
-               : "i" (offsetof(struct paca_struct, soft_enabled)));
-
-       return flags;
+       return soft_enabled_return();
 }
 
 static inline void arch_local_irq_disable(void)
index 320b845..daa6e9e 100644 (file)
@@ -244,7 +244,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
 void accumulate_stolen_time(void)
 {
        u64 sst, ust;
-       u8 save_soft_enabled = local_paca->soft_enabled;
+       unsigned long save_soft_enabled = soft_enabled_return();
        struct cpu_accounting_data *acct = &local_paca->accounting;
 
        /* We are called early in the exception entry, before