aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2023-02-10 17:02:05 +0100
committerPeter Zijlstra <peterz@infradead.org>2023-11-29 09:21:51 +0100
commit02ce3199c4e6ef07838ecf4639f0115ae6b0bda9 (patch)
treea8d0c7f58c6cbaf025b1faa0e806a80b5dca6e3e
parentd27a79cb14db1b9adcdacfab2b8c134079fb9e2f (diff)
downloadqueue-x86/lazy.tar.gz
x86: Avoid lazy mm refcountingx86/lazy
XXX: arch/x86/power/cpu.c: load_mm_ldt(current->active_mm); /* This does lldt */ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-rw-r--r--arch/Kconfig36
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--kernel/fork.c3
3 files changed, 24 insertions, 16 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index f4b210ab061291..fafa4e13234f98 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -475,6 +475,26 @@ config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
irqs disabled over activate_mm. Architectures that do IPI based TLB
shootdowns should enable this.
+# This option allows MMU_LAZY_TLB_REFCOUNT=n. It ensures no CPUs are using an
+# mm as a lazy tlb beyond its last reference count, by shooting down these
+# users before the mm is deallocated. __mmdrop() first IPIs all CPUs that may
+# be using the mm as a lazy tlb, so that they may switch themselves to using
+# init_mm for their active mm. mm_cpumask(mm) is used to determine which CPUs
+# may be using mm as a lazy tlb mm.
+#
+# To implement this, an arch *must*:
+# - At the time of the final mmdrop of the mm, ensure mm_cpumask(mm) contains
+# at least all possible CPUs in which the mm is lazy.
+# - It must meet the requirements for MMU_LAZY_TLB_REFCOUNT=n (see above).
+config MMU_LAZY_TLB_SHOOTDOWN
+ bool
+
+# This option allows MMU_LAZY_TLB_REFCOUNT=n. It ensures no CPUs are using an
+# mm as a lazy tlb beyond its last reference count by relying on
+# tlb_gather_mmu_fullmm().
+config MMU_LAZY_TLB_IMPLICIT_SHOOTDOWN
+ bool
+
# Use normal mm refcounting for MMU_LAZY_TLB kernel thread references.
# MMU_LAZY_TLB_REFCOUNT=n can improve the scalability of context switching
# to/from kernel threads when the same mm is running on a lot of CPUs (a large
@@ -491,21 +511,7 @@ config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
# converted already).
config MMU_LAZY_TLB_REFCOUNT
def_bool y
- depends on !MMU_LAZY_TLB_SHOOTDOWN
-
-# This option allows MMU_LAZY_TLB_REFCOUNT=n. It ensures no CPUs are using an
-# mm as a lazy tlb beyond its last reference count, by shooting down these
-# users before the mm is deallocated. __mmdrop() first IPIs all CPUs that may
-# be using the mm as a lazy tlb, so that they may switch themselves to using
-# init_mm for their active mm. mm_cpumask(mm) is used to determine which CPUs
-# may be using mm as a lazy tlb mm.
-#
-# To implement this, an arch *must*:
-# - At the time of the final mmdrop of the mm, ensure mm_cpumask(mm) contains
-# at least all possible CPUs in which the mm is lazy.
-# - It must meet the requirements for MMU_LAZY_TLB_REFCOUNT=n (see above).
-config MMU_LAZY_TLB_SHOOTDOWN
- bool
+ depends on !MMU_LAZY_TLB_SHOOTDOWN && !MMU_LAZY_TLB_IMPLICIT_SHOOTDOWN
config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 76a238993181c6..afc409f2ea613a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -305,6 +305,7 @@ config X86
select FUNCTION_ALIGNMENT_4B
imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI
select HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
+ select MMU_LAZY_TLB_IMPLICIT_SHOOTDOWN
config INSTRUCTION_DECODER
def_bool y
diff --git a/kernel/fork.c b/kernel/fork.c
index 10917c3e1f0366..5830e7aa4f8d64 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -861,7 +861,8 @@ static void do_shoot_lazy_tlb(void *arg)
static void cleanup_lazy_tlbs(struct mm_struct *mm)
{
- if (!IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) {
+ if (!IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN) ||
+ IS_ENABLED(CONFIG_MMU_LAZY_TLB_IMPLICIT_SHOOTDOWN)) {
/*
* In this case, lazy tlb mms are refounted and would not reach
* __mmdrop until all CPUs have switched away and mmdrop()ed.