OSDN Git Service

s390/mm: ensure switch_mm() is executed with interrupts disabled
authorAlexander Gordeev <agordeev@linux.ibm.com>
Thu, 6 May 2021 18:06:00 +0000 (20:06 +0200)
committerVasily Gorbik <gor@linux.ibm.com>
Mon, 7 Jun 2021 15:06:59 +0000 (17:06 +0200)
Architecture callback switch_mm() is allowed to be called with
enabled interrupts. However, our implementation of switch_mm()
does not expect that. Let's follow other architectures and make
sure switch_mm() is always executed with interrupts disabled,
regardless of what happens with the generic kernel code in the
future.

Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
arch/s390/include/asm/mmu_context.h

index e7cffc7..c7937f3 100644 (file)
@@ -70,8 +70,8 @@ static inline int init_new_context(struct task_struct *tsk,
        return 0;
 }
 
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
-                            struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+                                     struct task_struct *tsk)
 {
        int cpu = smp_processor_id();
 
@@ -85,6 +85,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        if (prev != next)
                cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
 }
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+                            struct task_struct *tsk)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       switch_mm_irqs_off(prev, next, tsk);
+       local_irq_restore(flags);
+}
 
 #define finish_arch_post_lock_switch finish_arch_post_lock_switch
 static inline void finish_arch_post_lock_switch(void)