Add a new wrapper function, soft_enabled_return(), added to return
paca->soft_enabled value.
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
+static inline notrace unsigned long soft_enabled_return(void)
+{
+ unsigned long flags;
+
+ asm volatile(
+ "lbz %0,%1(13)"
+ : "=r" (flags)
+ : "i" (offsetof(struct paca_struct, soft_enabled)));
+
+ return flags;
+}
+
/*
* The "memory" clobber acts as both a compiler barrier
* for the critical section and as a clobber because
static inline unsigned long arch_local_save_flags(void)
{
- unsigned long flags;
-
- asm volatile(
- "lbz %0,%1(13)"
- : "=r" (flags)
- : "i" (offsetof(struct paca_struct, soft_enabled)));
-
- return flags;
+ return soft_enabled_return();
}
static inline void arch_local_irq_disable(void)
void accumulate_stolen_time(void)
{
u64 sst, ust;
- u8 save_soft_enabled = local_paca->soft_enabled;
+ unsigned long save_soft_enabled = soft_enabled_return();
struct cpu_accounting_data *acct = &local_paca->accounting;
/* We are called early in the exception entry, before