aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2021-06-22 11:52:58 -0700
committerAndy Lutomirski <luto@kernel.org>2022-01-09 12:23:48 -0800
commit61c1e1d11a23618095cb553e3bdb22bf43f32ac3 (patch)
tree39e32d5d1b536c4cb956ae0f6b00e9f7848a9172
parentdb699bc56f966b7bed4a4ae93c998538495a269e (diff)
downloadlinux-sched/lazymm.tar.gz
x86/mm: Opt in to IRQs-off activate_mm()sched/lazymm
We gain nothing by having the core code enable IRQs right before calling activate_mm() only for us to turn them right back off again in switch_mm(). This will save a few cycles, so execve() should be blazingly fast with this patch applied! Signed-off-by: Andy Lutomirski <luto@kernel.org>
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/mmu_context.h8
2 files changed, 5 insertions, 4 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5060c38bf5602f..908a596619f2a2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -119,6 +119,7 @@ config X86
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_THP_SWAP if X86_64
select ARCH_HAS_PARANOID_L1D_FLUSH
+ select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select BUILDTIME_TABLE_SORT
select CLKEVT_I8253
select CLOCKSOURCE_VALIDATE_LAST_CYCLE
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 2ca4fc4a8a0ae7..f028f1b68bc0fe 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -132,10 +132,10 @@ extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
#define switch_mm_irqs_off switch_mm_irqs_off
-#define activate_mm(prev, next) \
-do { \
- paravirt_activate_mm((prev), (next)); \
- switch_mm((prev), (next), NULL); \
+#define activate_mm(prev, next) \
+do { \
+ paravirt_activate_mm((prev), (next)); \
+ switch_mm_irqs_off((prev), (next), NULL); \
} while (0);
#ifdef CONFIG_X86_32