diff options
author | Alexander Gordeev <agordeev@linux.ibm.com> | 2021-05-06 20:06:00 +0200 |
---|---|---|
committer | Vasily Gorbik <gor@linux.ibm.com> | 2021-06-07 17:06:59 +0200 |
commit | bdb8c9353ead1a4176e9ba034ea7ceb210c8a59c (patch) | |
tree | 6670cffbce3805d715ebaed93fef07cf425ca7b6 | |
parent | 27c1dac0b6d8c3b640e24a1b762c7244fbb116fd (diff) |
s390/mm: ensure switch_mm() is executed with interrupts disabled
Architecture callback switch_mm() is allowed to be called with
enabled interrupts. However, our implementation of switch_mm()
does not expect that. Let's follow other architectures and make
sure switch_mm() is always executed with interrupts disabled,
regardless of what happens with the generic kernel code in the
future.
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index e7cffc7b5c2f..c7937f369e62 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -70,8 +70,8 @@ static inline int init_new_context(struct task_struct *tsk, return 0; } -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) +static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) { int cpu = smp_processor_id(); @@ -85,6 +85,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, if (prev != next) cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); } +#define switch_mm_irqs_off switch_mm_irqs_off + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev, next, tsk); + local_irq_restore(flags); +} #define finish_arch_post_lock_switch finish_arch_post_lock_switch static inline void finish_arch_post_lock_switch(void) |