diff options
author | Dave Hansen <dave.hansen@linux.intel.com> | 2023-05-10 08:47:17 -0700 |
---|---|---|
committer | Dave Hansen <dave.hansen@linux.intel.com> | 2023-05-10 08:47:17 -0700 |
commit | af8df0419877e8b61a7af0c477a59f6e350112c4 (patch) | |
tree | 915b4f6ed651d9ad99cb0fb34b973a850ecebc24 | |
parent | 803754e1b08d76ad4ff3c586b115a6c8d018099b (diff) | |
download | devel-shstk-reorder.tar.gz |
pcid globalshstk-reorder
-rw-r--r-- | arch/x86/include/asm/cpufeatures.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 20 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 25 |
3 files changed, 43 insertions, 3 deletions
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 3d98ce9f41fe30..0588865fa061e7 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -483,5 +483,6 @@ #define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */ #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ #define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ +#define X86_BUG_INVLPG_MISS_GLOBAL X86_BUG(30) /* INVLPG may miss invalidating some global TLB enties */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cc686e5039beb3..253084a54a682c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1563,6 +1563,25 @@ static void __init cpu_parse_early_param(void) add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); } +static const struct x86_cpu_id invpcid_miss_ids[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_L, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_N, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE_P, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE_S, X86_FEATURE_ANY }, +}; + +static void __init cpu_set_invlpg_bug_bit(struct cpuinfo_x86 *c) +{ + const struct x86_cpu_id *id = x86_match_cpu(invpcid_miss_ids); + + if (!id) + return; + + setup_force_cpu_bug(X86_BUG_INVLPG_MISS_GLOBAL); +} + /* * Do minimum CPU detection early. * Fields really needed: vendor, cpuid_level, family, model, mask, @@ -1615,6 +1634,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) setup_force_cpu_cap(X86_FEATURE_ALWAYS); cpu_set_bug_bits(c); + cpu_set_invlpg_bug_bit(c); sld_setup(c); diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 92d73ccede70d6..416fbd81c0ad77 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -1044,9 +1044,19 @@ static void do_kernel_range_flush(void *info) void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - /* Balance as user space task's flush, a bit conservative */ - if (end == TLB_FLUSH_ALL || - (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) { + bool do_all = false; + + /* + * Check first for the conditions that force a full + * flush instead of doing a true ranged flush. + */ + do_all |= (end == TLB_FLUSH_ALL); + /* Revert to flushing all if INVPCID is broken: */ + do_all |= boot_cpu_has_bug(X86_BUG_INVLPG_MISS_GLOBAL); + /* Balance like user space task's flush, a bit conservative: */ + do_all |= (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT; + + if (do_all) { on_each_cpu(do_flush_tlb_all, NULL, 1); } else { struct flush_tlb_info *info; @@ -1090,6 +1100,15 @@ void flush_tlb_one_kernel(unsigned long addr) count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); /* + * INVLPG may miss flushing global entries on some + * CPUs. Do a full TLB flush instead. + */ + if (boot_cpu_has_bug(X86_BUG_INVLPG_MISS_GLOBAL)) { + __flush_tlb_all(); + return; + } + + /* * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its * paravirt equivalent. Even with PCID, this is sufficient: we only * use PCID if we also use global PTEs for the kernel mapping, and |