summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2024-04-08 13:15:59 -0700
committerAndrew Morton <akpm@linux-foundation.org>2024-04-08 13:15:59 -0700
commit286744c270714c7354686750f5706f5895c0045f (patch)
tree8b8febbe799a3b364b315ac03accb22685c2e8ca
parentc2bb0cb688e6970859ca33cccd1f845d106341cd (diff)
download25-new-286744c270714c7354686750f5706f5895c0045f.tar.gz
foo
-rw-r--r--patches/arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.patch92
-rw-r--r--patches/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.patch119
-rw-r--r--patches/fix-missing-vmalloch-includes-fix-6.patch31
-rw-r--r--patches/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.patch44
-rw-r--r--patches/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.patch52
-rw-r--r--patches/khugepaged-use-a-folio-throughout-collapse_file-fix.patch27
-rw-r--r--patches/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.patch41
-rw-r--r--patches/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.patch85
-rw-r--r--patches/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch148
-rw-r--r--patches/mm-mmap-make-accountable_mapping-return-bool.patch38
-rw-r--r--patches/mm-mmap-make-vma_wants_writenotify-return-bool.patch72
-rw-r--r--patches/mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.patch32
-rw-r--r--patches/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.patch57
-rw-r--r--patches/mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.patch69
-rw-r--r--patches/mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.patch133
-rw-r--r--patches/old/ocfs2-fix-races-between-hole-punching-and-aiodio.patch (renamed from patches/ocfs2-fix-races-between-hole-punching-and-aiodio.patch)0
-rw-r--r--patches/old/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.patch (renamed from patches/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.patch)0
-rw-r--r--patches/old/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.patch (renamed from patches/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.patch)0
-rw-r--r--patches/old/ocfs2-use-coarse-time-for-new-created-files.patch (renamed from patches/ocfs2-use-coarse-time-for-new-created-files.patch)0
-rw-r--r--pc/arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.pc1
-rw-r--r--pc/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.pc1
-rw-r--r--pc/devel-series32
-rw-r--r--pc/fix-missing-vmalloch-includes-fix-6.pc1
-rw-r--r--pc/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.pc1
-rw-r--r--pc/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.pc1
-rw-r--r--pc/khugepaged-use-a-folio-throughout-collapse_file-fix.pc1
-rw-r--r--pc/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.pc1
-rw-r--r--pc/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.pc2
-rw-r--r--pc/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.pc1
-rw-r--r--pc/mm-mmap-make-accountable_mapping-return-bool.pc1
-rw-r--r--pc/mm-mmap-make-vma_wants_writenotify-return-bool.pc2
-rw-r--r--pc/mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.pc1
-rw-r--r--pc/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.pc1
-rw-r--r--pc/mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.pc1
-rw-r--r--pc/mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.pc1
-rw-r--r--pc/ocfs2-fix-races-between-hole-punching-and-aiodio.pc1
-rw-r--r--pc/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.pc1
-rw-r--r--pc/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.pc1
-rw-r--r--pc/ocfs2-use-coarse-time-for-new-created-files.pc1
-rw-r--r--txt/arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt13
-rw-r--r--txt/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt21
-rw-r--r--txt/fix-missing-vmalloch-includes-fix-6.txt13
-rw-r--r--txt/hugetlb-convert-hugetlb_fault-to-use-struct-vm_fault.txt2
-rw-r--r--txt/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.txt11
-rw-r--r--txt/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.txt11
-rw-r--r--txt/khugepaged-use-a-folio-throughout-collapse_file-fix.txt9
-rw-r--r--txt/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.txt24
-rw-r--r--txt/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.txt12
-rw-r--r--txt/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.txt2
-rw-r--r--txt/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.txt118
-rw-r--r--txt/mm-mmap-make-accountable_mapping-return-bool.txt12
-rw-r--r--txt/mm-mmap-make-vma_wants_writenotify-return-bool.txt12
-rw-r--r--txt/mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.txt14
-rw-r--r--txt/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.txt31
-rw-r--r--txt/mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.txt19
-rw-r--r--txt/mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.txt31
-rw-r--r--txt/old/ocfs2-fix-races-between-hole-punching-and-aiodio.txt (renamed from txt/ocfs2-fix-races-between-hole-punching-and-aiodio.txt)0
-rw-r--r--txt/old/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.txt (renamed from txt/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.txt)0
-rw-r--r--txt/old/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.txt (renamed from txt/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.txt)0
-rw-r--r--txt/old/ocfs2-use-coarse-time-for-new-created-files.txt (renamed from txt/ocfs2-use-coarse-time-for-new-created-files.txt)0
-rw-r--r--txt/old/x86-mm-pat-fix-vm_pat-handling-in-cow-mappings.txt1
-rw-r--r--txt/s390-mm-accelerate-pagefault-when-badaccess.txt1
62 files changed, 1437 insertions, 13 deletions
diff --git a/patches/arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.patch b/patches/arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.patch
new file mode 100644
index 000000000..05948b801
--- /dev/null
+++ b/patches/arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.patch
@@ -0,0 +1,92 @@
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+Subject: arm: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS
+Date: Sun, 7 Apr 2024 16:12:11 +0800
+
+If bad map or access, directly set si_code to SEGV_MAPRR or SEGV_ACCERR,
+also set fault to 0 and goto error handling, which make us to drop the
+arch's special vm fault reason.
+
+Link: https://lkml.kernel.org/r/20240407081211.2292362-3-wangkefeng.wang@huawei.com
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: Will Deacon <will@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ arch/arm/mm/fault.c | 28 +++++++++++++---------------
+ 1 file changed, 13 insertions(+), 15 deletions(-)
+
+--- a/arch/arm/mm/fault.c~arm-mm-drop-vm_fault_badmap-vm_fault_badaccess
++++ a/arch/arm/mm/fault.c
+@@ -226,9 +226,6 @@ void do_bad_area(unsigned long addr, uns
+ }
+
+ #ifdef CONFIG_MMU
+-#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
+-#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
+-
+ static inline bool is_permission_fault(unsigned int fsr)
+ {
+ int fs = fsr_fs(fsr);
+@@ -295,7 +292,8 @@ do_page_fault(unsigned long addr, unsign
+ if (!(vma->vm_flags & vm_flags)) {
+ vma_end_read(vma);
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+- fault = VM_FAULT_BADACCESS;
++ fault = 0;
++ code = SEGV_ACCERR;
+ goto bad_area;
+ }
+ fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
+@@ -321,7 +319,8 @@ lock_mmap:
+ retry:
+ vma = lock_mm_and_find_vma(mm, addr, regs);
+ if (unlikely(!vma)) {
+- fault = VM_FAULT_BADMAP;
++ fault = 0;
++ code = SEGV_MAPERR;
+ goto bad_area;
+ }
+
+@@ -329,10 +328,13 @@ retry:
+ * ok, we have a good vm_area for this memory access, check the
+ * permissions on the VMA allow for the fault which occurred.
+ */
+- if (!(vma->vm_flags & vm_flags))
+- fault = VM_FAULT_BADACCESS;
+- else
+- fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
++ if (!(vma->vm_flags & vm_flags)) {
++ fault = 0;
++ code = SEGV_ACCERR;
++ goto bad_area;
++ }
++
++ fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
+
+ /* If we need to retry but a fatal signal is pending, handle the
+ * signal first. We do not need to release the mmap_lock because
+@@ -358,10 +360,8 @@ retry:
+ mmap_read_unlock(mm);
+ done:
+
+- /*
+- * Handle the "normal" case first - VM_FAULT_MAJOR
+- */
+- if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
++ /* Handle the "normal" case first */
++ if (likely(!(fault & VM_FAULT_ERROR)))
+ return 0;
+
+ bad_area:
+@@ -395,8 +395,6 @@ bad_area:
+ * isn't in our memory map..
+ */
+ sig = SIGSEGV;
+- code = fault == VM_FAULT_BADACCESS ?
+- SEGV_ACCERR : SEGV_MAPERR;
+ }
+
+ __do_user_fault(addr, fsr, sig, code, regs);
+_
diff --git a/patches/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.patch b/patches/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.patch
new file mode 100644
index 000000000..868cd6d1f
--- /dev/null
+++ b/patches/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.patch
@@ -0,0 +1,119 @@
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+Subject: arm64: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS
+Date: Sun, 7 Apr 2024 16:12:10 +0800
+
+Patch series "mm: remove arch's private VM_FAULT_BADMAP/BADACCESS:.
+
+Directly set SEGV_MAPRR or SEGV_ACCERR for arm/arm64 to remove the last
+two arch's private vm_fault reasons.
+
+
+This patch (of 2):
+
+If bad map or access, directly set si_code to SEGV_MAPRR or SEGV_ACCERR,
+also set fault to 0 and goto error handling, which make us to drop the
+arch's special vm fault reason.
+
+Link: https://lkml.kernel.org/r/20240407081211.2292362-2-wangkefeng.wang@huawei.com
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: Will Deacon <will@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ arch/arm64/mm/fault.c | 41 +++++++++++++++++-----------------------
+ 1 file changed, 18 insertions(+), 23 deletions(-)
+
+--- a/arch/arm64/mm/fault.c~arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess
++++ a/arch/arm64/mm/fault.c
+@@ -500,9 +500,6 @@ static bool is_write_abort(unsigned long
+ return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
+ }
+
+-#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
+-#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
+-
+ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
+ struct pt_regs *regs)
+ {
+@@ -513,6 +510,7 @@ static int __kprobes do_page_fault(unsig
+ unsigned int mm_flags = FAULT_FLAG_DEFAULT;
+ unsigned long addr = untagged_addr(far);
+ struct vm_area_struct *vma;
++ int si_code;
+
+ if (kprobe_page_fault(regs, esr))
+ return 0;
+@@ -572,9 +570,10 @@ static int __kprobes do_page_fault(unsig
+
+ if (!(vma->vm_flags & vm_flags)) {
+ vma_end_read(vma);
+- fault = VM_FAULT_BADACCESS;
++ fault = 0;
++ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+- goto done;
++ goto bad_area;
+ }
+ fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
+ if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+@@ -599,15 +598,18 @@ lock_mmap:
+ retry:
+ vma = lock_mm_and_find_vma(mm, addr, regs);
+ if (unlikely(!vma)) {
+- fault = VM_FAULT_BADMAP;
+- goto done;
++ fault = 0;
++ si_code = SEGV_MAPERR;
++ goto bad_area;
+ }
+
+- if (!(vma->vm_flags & vm_flags))
+- fault = VM_FAULT_BADACCESS;
+- else
+- fault = handle_mm_fault(vma, addr, mm_flags, regs);
++ if (!(vma->vm_flags & vm_flags)) {
++ fault = 0;
++ si_code = SEGV_ACCERR;
++ goto bad_area;
++ }
+
++ fault = handle_mm_fault(vma, addr, mm_flags, regs);
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+@@ -626,13 +628,11 @@ retry:
+ mmap_read_unlock(mm);
+
+ done:
+- /*
+- * Handle the "normal" (no error) case first.
+- */
+- if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
+- VM_FAULT_BADACCESS))))
++ /* Handle the "normal" (no error) case first. */
++ if (likely(!(fault & VM_FAULT_ERROR)))
+ return 0;
+
++bad_area:
+ /*
+ * If we are in kernel mode at this point, we have no context to
+ * handle this fault with.
+@@ -667,13 +667,8 @@ done:
+
+ arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name);
+ } else {
+- /*
+- * Something tried to access memory that isn't in our memory
+- * map.
+- */
+- arm64_force_sig_fault(SIGSEGV,
+- fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR,
+- far, inf->name);
++ /* Something tried to access memory that out of memory map */
++ arm64_force_sig_fault(SIGSEGV, si_code, far, inf->name);
+ }
+
+ return 0;
+_
diff --git a/patches/fix-missing-vmalloch-includes-fix-6.patch b/patches/fix-missing-vmalloch-includes-fix-6.patch
new file mode 100644
index 000000000..1ef5f5786
--- /dev/null
+++ b/patches/fix-missing-vmalloch-includes-fix-6.patch
@@ -0,0 +1,31 @@
+From: Suren Baghdasaryan <surenb@google.com>
+Subject: fixup! fix missing vmalloc.h includes
+Date: Fri, 5 Apr 2024 15:51:15 -0700
+
+fix arc build
+
+Link: https://lkml.kernel.org/r/20240405225115.431056-1-surenb@google.com
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202404050828.5pKgmCLu-lkp@intel.com/
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Cc: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: kernel test robot <lkp@intel.com>
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ arch/arc/include/asm/mmu-arcv2.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arc/include/asm/mmu-arcv2.h~fix-missing-vmalloch-includes-fix-6
++++ a/arch/arc/include/asm/mmu-arcv2.h
+@@ -9,6 +9,8 @@
+ #ifndef _ASM_ARC_MMU_ARCV2_H
+ #define _ASM_ARC_MMU_ARCV2_H
+
++#include <soc/arc/aux.h>
++
+ /*
+ * TLB Management regs
+ */
+_
diff --git a/patches/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.patch b/patches/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.patch
new file mode 100644
index 000000000..921ef8f19
--- /dev/null
+++ b/patches/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.patch
@@ -0,0 +1,44 @@
+From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
+Subject: hugetlb: simplify hugetlb_no_page() arguments
+Date: Mon, 8 Apr 2024 10:17:54 -0700
+
+To simplify the function arguments, as suggested by Oscar and Muchun.
+
+Link: https://lkml.kernel.org/r/ZhQtN8y5zud8iI1u@fedora
+Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Suggested-by: Muchun Song <muchun.song@linux.dev>
+Suggested-by: Oscar Salvador <osalvador@suse.de>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/hugetlb.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/mm/hugetlb.c~hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix
++++ a/mm/hugetlb.c
+@@ -6185,11 +6185,11 @@ static bool hugetlb_pte_stable(struct hs
+ return same;
+ }
+
+-static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
+- struct vm_area_struct *vma,
+- struct address_space *mapping,
++static vm_fault_t hugetlb_no_page(struct address_space *mapping,
+ struct vm_fault *vmf)
+ {
++ struct vm_area_struct *vma = vmf->vma;
++ struct mm_struct *mm = vma->vm_mm;
+ struct hstate *h = hstate_vma(vma);
+ vm_fault_t ret = VM_FAULT_SIGBUS;
+ int anon_rmap = 0;
+@@ -6483,7 +6483,7 @@ vm_fault_t hugetlb_fault(struct mm_struc
+ * hugetlb_no_page will drop vma lock and hugetlb fault
+ * mutex internally, which make us return immediately.
+ */
+- return hugetlb_no_page(mm, vma, mapping, &vmf);
++ return hugetlb_no_page(mapping, &vmf);
+ }
+
+ ret = 0;
+_
diff --git a/patches/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.patch b/patches/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.patch
new file mode 100644
index 000000000..3e28e5144
--- /dev/null
+++ b/patches/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.patch
@@ -0,0 +1,52 @@
+From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
+Subject: hugetlb: Simplify hugetlb_wp() arguments
+Date: Mon, 8 Apr 2024 10:21:44 -0700
+
+simplify the function arguments, per Oscar and Muchun.
+
+Link: https://lkml.kernel.org/r/ZhQtoFNZBNwBCeXn@fedora
+Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Suggested-by: Muchun Song <muchun.song@linux.dev>
+Suggested-by: Oscar Salvador <osalvador@suse.de>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/hugetlb.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/mm/hugetlb.c~hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix
++++ a/mm/hugetlb.c
+@@ -5915,10 +5915,11 @@ static void unmap_ref_private(struct mm_
+ * cannot race with other handlers or page migration.
+ * Keep the pte_same checks anyway to make transition from the mutex easier.
+ */
+-static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
+- struct folio *pagecache_folio,
++static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
+ struct vm_fault *vmf)
+ {
++ struct vm_area_struct *vma = vmf->vma;
++ struct mm_struct *mm = vma->vm_mm;
+ const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
+ pte_t pte = huge_ptep_get(vmf->pte);
+ struct hstate *h = hstate_vma(vma);
+@@ -6364,7 +6365,7 @@ static vm_fault_t hugetlb_no_page(struct
+ hugetlb_count_add(pages_per_huge_page(h), mm);
+ if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+ /* Optimization, do the COW without a second fault */
+- ret = hugetlb_wp(mm, vma, folio, vmf);
++ ret = hugetlb_wp(folio, vmf);
+ }
+
+ spin_unlock(vmf->ptl);
+@@ -6577,7 +6578,7 @@ vm_fault_t hugetlb_fault(struct mm_struc
+
+ if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
+ if (!huge_pte_write(vmf.orig_pte)) {
+- ret = hugetlb_wp(mm, vma, pagecache_folio, &vmf);
++ ret = hugetlb_wp(pagecache_folio, &vmf);
+ goto out_put_page;
+ } else if (likely(flags & FAULT_FLAG_WRITE)) {
+ vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte);
+_
diff --git a/patches/khugepaged-use-a-folio-throughout-collapse_file-fix.patch b/patches/khugepaged-use-a-folio-throughout-collapse_file-fix.patch
new file mode 100644
index 000000000..b9e3f8b3b
--- /dev/null
+++ b/patches/khugepaged-use-a-folio-throughout-collapse_file-fix.patch
@@ -0,0 +1,27 @@
+From: Matthew Wilcox <willy@infradead.org>
+Subject: khugepaged-use-a-folio-throughout-collapse_file-fix
+Date: Sun, 7 Apr 2024 04:43:27 +0100
+
+the unlikely() is embedded in IS_ERR() so no need to keep it here
+
+Link: https://lkml.kernel.org/r/ZhIWX8K0E2tSyMSr@casper.infradead.org
+Signed-off-by: Matthew Wilcox <willy@infradead.org>
+Reported-by: Vishal Moola <vishal.moola@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/khugepaged.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/khugepaged.c~khugepaged-use-a-folio-throughout-collapse_file-fix
++++ a/mm/khugepaged.c
+@@ -1865,7 +1865,7 @@ static int collapse_file(struct mm_struc
+ /* drain lru cache to help isolate_lru_page() */
+ lru_add_drain();
+ folio = filemap_lock_folio(mapping, index);
+- if (unlikely(folio == NULL)) {
++ if (IS_ERR(folio)) {
+ result = SCAN_FAIL;
+ goto xa_unlocked;
+ }
+_
diff --git a/patches/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.patch b/patches/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.patch
new file mode 100644
index 000000000..e6734ad4f
--- /dev/null
+++ b/patches/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.patch
@@ -0,0 +1,41 @@
+From: Klara Modin <klarasmodin@gmail.com>
+Subject: mm/memprofiling: explicitly include irqflags.h in alloc_tag.h
+Date: Sun, 7 Apr 2024 15:32:52 +0200
+
+linux/alloc_tag.h uses the macro this_cpu_inc which eventually expands to:
+
+ #define this_cpu_generic_to_op(pcp, val, op) \
+ do { \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ raw_cpu_generic_to_op(pcp, val, op); \
+ raw_local_irq_restore(__flags); \
+ } while (0)
+
+The macros raw_local_irq_save and raw_local_irq_restore are defined in
+linux/irqflags.h which is not included implicitly on all configs.
+Therefore, include it explicitly.
+
+Link: https://lkml.kernel.org/r/20240407133252.173636-1-klarasmodin@gmail.com
+Fixes: ac906a377c67 ("lib: add allocation tagging support for memory allocation profiling")
+Link: https://lore.kernel.org/lkml/6b8149f3-80e6-413c-abcb-1925ecda9d8c@gmail.com/
+Signed-off-by: Klara Modin <klarasmodin@gmail.com>
+Acked-by: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/alloc_tag.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/linux/alloc_tag.h~lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3
++++ a/include/linux/alloc_tag.h
+@@ -12,6 +12,7 @@
+ #include <asm/percpu.h>
+ #include <linux/cpumask.h>
+ #include <linux/static_key.h>
++#include <linux/irqflags.h>
+
+ struct alloc_tag_counters {
+ u64 bytes;
+_
diff --git a/patches/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.patch b/patches/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.patch
new file mode 100644
index 000000000..0fd79e34c
--- /dev/null
+++ b/patches/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.patch
@@ -0,0 +1,85 @@
+From: Barry Song <v-songbaohua@oppo.com>
+Subject: mm: fix powerpc build issue
+Date: Sun, 7 Apr 2024 12:23:35 +1200
+
+On powerpc, PMD_ORDER is not a constant variable. We should transition
+to using alloc_percpu instead of a static percpu array.
+
+Link: https://lkml.kernel.org/r/20240407042247.201412-1-21cnbao@gmail.com
+Signed-off-by: Barry Song <v-songbaohua@oppo.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202404061754.n8jmZ6s3-lkp@intel.com/
+Tested-by: Yujie Liu <yujie.liu@intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/huge_mm.h | 9 ++++-----
+ mm/huge_memory.c | 12 ++++++++++--
+ 2 files changed, 14 insertions(+), 7 deletions(-)
+
+--- a/include/linux/huge_mm.h~mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix
++++ a/include/linux/huge_mm.h
+@@ -271,16 +271,15 @@ enum mthp_stat_item {
+ };
+
+ struct mthp_stat {
+- unsigned long stats[PMD_ORDER + 1][__MTHP_STAT_COUNT];
++ unsigned long stats[0][__MTHP_STAT_COUNT];
+ };
+
+-DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
++extern struct mthp_stat __percpu *mthp_stats;
+
+ static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+ {
+- if (unlikely(order > PMD_ORDER))
+- return;
+- this_cpu_inc(mthp_stats.stats[order][item]);
++ if (likely(order <= PMD_ORDER))
++ raw_cpu_ptr(mthp_stats)->stats[order][item]++;
+ }
+
+ #define transparent_hugepage_use_zero_page() \
+--- a/mm/huge_memory.c~mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix
++++ a/mm/huge_memory.c
+@@ -526,7 +526,7 @@ static const struct kobj_type thpsize_kt
+ .sysfs_ops = &kobj_sysfs_ops,
+ };
+
+-DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}};
++struct mthp_stat __percpu *mthp_stats;
+
+ static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
+ {
+@@ -534,7 +534,7 @@ static unsigned long sum_mthp_stat(int o
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+- struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
++ struct mthp_stat *this = per_cpu_ptr(mthp_stats, cpu);
+
+ sum += this->stats[order][item];
+ }
+@@ -636,6 +636,13 @@ static int __init hugepage_init_sysfs(st
+ goto remove_hp_group;
+ }
+
++ mthp_stats = __alloc_percpu((PMD_ORDER + 1) * sizeof(mthp_stats->stats[0]),
++ sizeof(unsigned long));
++ if (!mthp_stats) {
++ err = -ENOMEM;
++ goto remove_hp_group;
++ }
++
+ orders = THP_ORDERS_ALL_ANON;
+ order = highest_order(orders);
+ while (orders) {
+@@ -673,6 +680,7 @@ static void __init hugepage_exit_sysfs(s
+ sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
+ sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
+ kobject_put(hugepage_kobj);
++ free_percpu(mthp_stats);
+ }
+ #else
+ static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
+_
diff --git a/patches/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch b/patches/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch
new file mode 100644
index 000000000..fc8800d22
--- /dev/null
+++ b/patches/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch
@@ -0,0 +1,148 @@
+From: Miaohe Lin <linmiaohe@huawei.com>
+Subject: mm/memory-failure: fix deadlock when hugetlb_optimize_vmemmap is enabled
+Date: Sun, 7 Apr 2024 16:54:56 +0800
+
+When I did hard offline test with hugetlb pages, below deadlock occurs:
+
+======================================================
+WARNING: possible circular locking dependency detected
+6.8.0-11409-gf6cef5f8c37f #1 Not tainted
+------------------------------------------------------
+bash/46904 is trying to acquire lock:
+ffffffffabe68910 (cpu_hotplug_lock){++++}-{0:0}, at: static_key_slow_dec+0x16/0x60
+
+but task is already holding lock:
+ffffffffabf92ea8 (pcp_batch_high_lock){+.+.}-{3:3}, at: zone_pcp_disable+0x16/0x40
+
+which lock already depends on the new lock.
+
+the existing dependency chain (in reverse order) is:
+
+-> #1 (pcp_batch_high_lock){+.+.}-{3:3}:
+ __mutex_lock+0x6c/0x770
+ page_alloc_cpu_online+0x3c/0x70
+ cpuhp_invoke_callback+0x397/0x5f0
+ __cpuhp_invoke_callback_range+0x71/0xe0
+ _cpu_up+0xeb/0x210
+ cpu_up+0x91/0xe0
+ cpuhp_bringup_mask+0x49/0xb0
+ bringup_nonboot_cpus+0xb7/0xe0
+ smp_init+0x25/0xa0
+ kernel_init_freeable+0x15f/0x3e0
+ kernel_init+0x15/0x1b0
+ ret_from_fork+0x2f/0x50
+ ret_from_fork_asm+0x1a/0x30
+
+-> #0 (cpu_hotplug_lock){++++}-{0:0}:
+ __lock_acquire+0x1298/0x1cd0
+ lock_acquire+0xc0/0x2b0
+ cpus_read_lock+0x2a/0xc0
+ static_key_slow_dec+0x16/0x60
+ __hugetlb_vmemmap_restore_folio+0x1b9/0x200
+ dissolve_free_huge_page+0x211/0x260
+ __page_handle_poison+0x45/0xc0
+ memory_failure+0x65e/0xc70
+ hard_offline_page_store+0x55/0xa0
+ kernfs_fop_write_iter+0x12c/0x1d0
+ vfs_write+0x387/0x550
+ ksys_write+0x64/0xe0
+ do_syscall_64+0xca/0x1e0
+ entry_SYSCALL_64_after_hwframe+0x6d/0x75
+
+other info that might help us debug this:
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(pcp_batch_high_lock);
+ lock(cpu_hotplug_lock);
+ lock(pcp_batch_high_lock);
+ rlock(cpu_hotplug_lock);
+
+ *** DEADLOCK ***
+
+5 locks held by bash/46904:
+ #0: ffff98f6c3bb23f0 (sb_writers#5){.+.+}-{0:0}, at: ksys_write+0x64/0xe0
+ #1: ffff98f6c328e488 (&of->mutex){+.+.}-{3:3}, at: kernfs_fop_write_iter+0xf8/0x1d0
+ #2: ffff98ef83b31890 (kn->active#113){.+.+}-{0:0}, at: kernfs_fop_write_iter+0x100/0x1d0
+ #3: ffffffffabf9db48 (mf_mutex){+.+.}-{3:3}, at: memory_failure+0x44/0xc70
+ #4: ffffffffabf92ea8 (pcp_batch_high_lock){+.+.}-{3:3}, at: zone_pcp_disable+0x16/0x40
+
+stack backtrace:
+CPU: 10 PID: 46904 Comm: bash Kdump: loaded Not tainted 6.8.0-11409-gf6cef5f8c37f #1
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x68/0xa0
+ check_noncircular+0x129/0x140
+ __lock_acquire+0x1298/0x1cd0
+ lock_acquire+0xc0/0x2b0
+ cpus_read_lock+0x2a/0xc0
+ static_key_slow_dec+0x16/0x60
+ __hugetlb_vmemmap_restore_folio+0x1b9/0x200
+ dissolve_free_huge_page+0x211/0x260
+ __page_handle_poison+0x45/0xc0
+ memory_failure+0x65e/0xc70
+ hard_offline_page_store+0x55/0xa0
+ kernfs_fop_write_iter+0x12c/0x1d0
+ vfs_write+0x387/0x550
+ ksys_write+0x64/0xe0
+ do_syscall_64+0xca/0x1e0
+ entry_SYSCALL_64_after_hwframe+0x6d/0x75
+RIP: 0033:0x7fc862314887
+Code: 10 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 48 89 54 24 18 48 89 74 24
+RSP: 002b:00007fff19311268 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+RAX: ffffffffffffffda RBX: 000000000000000c RCX: 00007fc862314887
+RDX: 000000000000000c RSI: 000056405645fe10 RDI: 0000000000000001
+RBP: 000056405645fe10 R08: 00007fc8623d1460 R09: 000000007fffffff
+R10: 0000000000000000 R11: 0000000000000246 R12: 000000000000000c
+R13: 00007fc86241b780 R14: 00007fc862417600 R15: 00007fc862416a00
+
+In short, below scene breaks the lock dependency chain:
+
+ memory_failure
+ __page_handle_poison
+ zone_pcp_disable -- lock(pcp_batch_high_lock)
+ dissolve_free_huge_page
+ __hugetlb_vmemmap_restore_folio
+ static_key_slow_dec
+ cpus_read_lock -- rlock(cpu_hotplug_lock)
+
+Fix this by calling drain_all_pages() instead.
+
+Link: https://lkml.kernel.org/r/20240407085456.2798193-1-linmiaohe@huawei.com
+Fixes: 510d25c92ec4a ("mm/hwpoison: disable pcp for page_handle_poison()")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/memory-failure.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/mm/memory-failure.c~mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled
++++ a/mm/memory-failure.c
+@@ -154,11 +154,17 @@ static int __page_handle_poison(struct p
+ {
+ int ret;
+
+- zone_pcp_disable(page_zone(page));
++ /*
++ * zone_pcp_disable() can't be used here. It will hold pcp_batch_high_lock and
++ * dissolve_free_huge_page() might hold cpu_hotplug_lock via static_key_slow_dec()
++ * when hugetlb vmemmap optimization is enabled. This will break current lock
++ * dependency chain and leads to deadlock.
++ */
+ ret = dissolve_free_huge_page(page);
+- if (!ret)
++ if (!ret) {
++ drain_all_pages(page_zone(page));
+ ret = take_page_off_buddy(page);
+- zone_pcp_enable(page_zone(page));
++ }
+
+ return ret;
+ }
+_
diff --git a/patches/mm-mmap-make-accountable_mapping-return-bool.patch b/patches/mm-mmap-make-accountable_mapping-return-bool.patch
new file mode 100644
index 000000000..1abcae439
--- /dev/null
+++ b/patches/mm-mmap-make-accountable_mapping-return-bool.patch
@@ -0,0 +1,38 @@
+From: Hao Ge <gehao@kylinos.cn>
+Subject: mm/mmap: make accountable_mapping return bool
+Date: Sun, 7 Apr 2024 14:38:43 +0800
+
+accountable_mapping can return bool,so we change it
+
+Link: https://lkml.kernel.org/r/20240407063843.804274-1-gehao@kylinos.cn
+Signed-off-by: Hao Ge <gehao@kylinos.cn>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Lorenzo Stoakes <lstoakes@gmail.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/mmap.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/mmap.c~mm-mmap-make-accountable_mapping-return-bool
++++ a/mm/mmap.c
+@@ -1549,14 +1549,14 @@ bool vma_wants_writenotify(struct vm_are
+ * We account for memory if it's a private writeable mapping,
+ * not hugepages and VM_NORESERVE wasn't set.
+ */
+-static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
++static inline bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
+ {
+ /*
+ * hugetlb has its own accounting separate from the core VM
+ * VM_HUGETLB may not be set yet so we cannot check for that flag.
+ */
+ if (file && is_file_hugepages(file))
+- return 0;
++ return false;
+
+ return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
+ }
+_
diff --git a/patches/mm-mmap-make-vma_wants_writenotify-return-bool.patch b/patches/mm-mmap-make-vma_wants_writenotify-return-bool.patch
new file mode 100644
index 000000000..4ca6e519c
--- /dev/null
+++ b/patches/mm-mmap-make-vma_wants_writenotify-return-bool.patch
@@ -0,0 +1,72 @@
+From: Hao Ge <gehao@kylinos.cn>
+Subject: mm/mmap: make vma_wants_writenotify return bool
+Date: Sun, 7 Apr 2024 14:26:53 +0800
+
+vma_wants_writenotify should return bool,so we change it
+
+Link: https://lkml.kernel.org/r/20240407062653.803142-1-gehao@kylinos.cn
+Signed-off-by: Hao Ge <gehao@kylinos.cn>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Lorenzo Stoakes <lstoakes@gmail.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/mm.h | 2 +-
+ mm/mmap.c | 12 ++++++------
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/include/linux/mm.h~mm-mmap-make-vma_wants_writenotify-return-bool
++++ a/include/linux/mm.h
+@@ -2575,7 +2575,7 @@ extern unsigned long move_page_tables(st
+ MM_CP_UFFD_WP_RESOLVE)
+
+ bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
+-int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
++bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
+ static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
+ {
+ /*
+--- a/mm/mmap.c~mm-mmap-make-vma_wants_writenotify-return-bool
++++ a/mm/mmap.c
+@@ -1514,32 +1514,32 @@ bool vma_needs_dirty_tracking(struct vm_
+ * to the private version (using protection_map[] without the
+ * VM_SHARED bit).
+ */
+-int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
++bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
+ {
+ /* If it was private or non-writable, the write bit is already clear */
+ if (!vma_is_shared_writable(vma))
+- return 0;
++ return false;
+
+ /* The backer wishes to know when pages are first written to? */
+ if (vm_ops_needs_writenotify(vma->vm_ops))
+- return 1;
++ return true;
+
+ /* The open routine did something to the protections that pgprot_modify
+ * won't preserve? */
+ if (pgprot_val(vm_page_prot) !=
+ pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
+- return 0;
++ return false;
+
+ /*
+ * Do we need to track softdirty? hugetlb does not support softdirty
+ * tracking yet.
+ */
+ if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
+- return 1;
++ return true;
+
+ /* Do we need write faults for uffd-wp tracking? */
+ if (userfaultfd_wp(vma))
+- return 1;
++ return true;
+
+ /* Can the mapping track the dirty pages? */
+ return vma_fs_can_writeback(vma);
+_
diff --git a/patches/mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.patch b/patches/mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.patch
new file mode 100644
index 000000000..8abe05bb0
--- /dev/null
+++ b/patches/mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.patch
@@ -0,0 +1,32 @@
+From: Suren Baghdasaryan <surenb@google.com>
+Subject: fixup! increase PERCPU_MODULE_RESERVE to accommodate allocation tags
+Date: Sat, 6 Apr 2024 14:40:43 -0700
+
+The increase of per-cpu reserved area for modules was not enough to
+accommodate all allocation tags in certain use cases. Increase some more
+to fix the issue.
+
+Link: https://lkml.kernel.org/r/20240406214044.1114406-1-surenb@google.com
+Fixes: a11cb5c8e248 ("mm: percpu: increase PERCPU_MODULE_RESERVE to accommodate allocation tags")
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Reported-by: Klara Modin <klarasmodin@gmail.com>
+Tested-by: Klara Modin <klarasmodin@gmail.com>
+Cc: Kent Overstreet <kent.overstreet@linux.dev>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/percpu.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/percpu.h~mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix
++++ a/include/linux/percpu.h
+@@ -15,7 +15,7 @@
+ /* enough to cover all DEFINE_PER_CPUs in modules */
+ #ifdef CONFIG_MODULES
+ #ifdef CONFIG_MEM_ALLOC_PROFILING
+-#define PERCPU_MODULE_RESERVE (8 << 12)
++#define PERCPU_MODULE_RESERVE (8 << 13)
+ #else
+ #define PERCPU_MODULE_RESERVE (8 << 10)
+ #endif
+_
diff --git a/patches/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.patch b/patches/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.patch
new file mode 100644
index 000000000..9ff03d743
--- /dev/null
+++ b/patches/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.patch
@@ -0,0 +1,57 @@
+From: Peter Xu <peterx@redhat.com>
+Subject: mm/userfaultfd: Allow hugetlb change protection upon poison entry
+Date: Fri, 5 Apr 2024 19:19:20 -0400
+
+After UFFDIO_POISON, there can be two kinds of hugetlb pte markers, either
+the POISON one or UFFD_WP one.
+
+Allow change protection to run on a poisoned marker just like !hugetlb
+cases, ignoring the marker irrelevant of the permission.
+
+Here the two bits are mutual exclusive. For example, when install a
+poisoned entry it must not be UFFD_WP already (by checking pte_none()
+before such install). And it also means if UFFD_WP is set there must have
+no POISON bit set. It makes sense because UFFD_WP is a bit to reflect
+permission, and permissions do not apply if the pte is poisoned and
+destined to sigbus.
+
+So here we simply check uffd_wp bit set first, do nothing otherwise.
+
+Attach the Fixes to UFFDIO_POISON work, as before that it should not be
+possible to have poison entry for hugetlb (e.g., hugetlb doesn't do swap,
+so no chance of swapin errors).
+
+Link: https://lkml.kernel.org/r/20240405231920.1772199-1-peterx@redhat.com
+Link: https://lore.kernel.org/r/000000000000920d5e0615602dd1@google.com
+Reported-by: syzbot+b07c8ac8eee3d4d8440f@syzkaller.appspotmail.com
+Fixes: fc71884a5f59 ("mm: userfaultfd: add new UFFDIO_POISON ioctl")
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: <stable@vger.kernel.org> [6.6+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/hugetlb.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/mm/hugetlb.c~mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry
++++ a/mm/hugetlb.c
+@@ -7044,9 +7044,13 @@ long hugetlb_change_protection(struct vm
+ if (!pte_same(pte, newpte))
+ set_huge_pte_at(mm, address, ptep, newpte, psize);
+ } else if (unlikely(is_pte_marker(pte))) {
+- /* No other markers apply for now. */
+- WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
+- if (uffd_wp_resolve)
++ /*
++ * Do nothing on a poison marker; page is
++ * corrupted, permissons do not apply. Here
++ * pte_marker_uffd_wp()==true implies !poison
++ * because they're mutual exclusive.
++ */
++ if (pte_marker_uffd_wp(pte) && uffd_wp_resolve)
+ /* Safe to modify directly (non-present->none). */
+ huge_pte_clear(mm, address, ptep, psize);
+ } else if (!huge_pte_none(pte)) {
+_
diff --git a/patches/mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.patch b/patches/mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.patch
new file mode 100644
index 000000000..f9d540389
--- /dev/null
+++ b/patches/mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.patch
@@ -0,0 +1,69 @@
+From: Huang Ying <ying.huang@intel.com>
+Subject: mm,swap: add document about RCU read lock and swapoff interaction
+Date: Sun, 7 Apr 2024 14:54:50 +0800
+
+During reviewing a patch to fix the race condition between
+free_swap_and_cache() and swapoff() [1], it was found that the document
+about how to prevent racing with swapoff isn't clear enough. Especially
+RCU read lock can prevent swapoff from freeing data structures. So, the
+document is added as comments.
+
+[1] https://lore.kernel.org/linux-mm/c8fe62d0-78b8-527a-5bef-ee663ccdc37a@huawei.com/
+
+Link: https://lkml.kernel.org/r/20240407065450.498821-1-ying.huang@intel.com
+Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
+Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/swapfile.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/mm/swapfile.c~mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction
++++ a/mm/swapfile.c
+@@ -1238,16 +1238,15 @@ static unsigned char __swap_entry_free_l
+
+ /*
+ * When we get a swap entry, if there aren't some other ways to
+- * prevent swapoff, such as the folio in swap cache is locked, page
+- * table lock is held, etc., the swap entry may become invalid because
+- * of swapoff. Then, we need to enclose all swap related functions
+- * with get_swap_device() and put_swap_device(), unless the swap
+- * functions call get/put_swap_device() by themselves.
++ * prevent swapoff, such as the folio in swap cache is locked, RCU
++ * reader side is locked, etc., the swap entry may become invalid
++ * because of swapoff. Then, we need to enclose all swap related
++ * functions with get_swap_device() and put_swap_device(), unless the
++ * swap functions call get/put_swap_device() by themselves.
+ *
+- * Note that when only holding the PTL, swapoff might succeed immediately
+- * after freeing a swap entry. Therefore, immediately after
+- * __swap_entry_free(), the swap info might become stale and should not
+- * be touched without a prior get_swap_device().
++ * RCU reader side lock (including any spinlock) is sufficient to
++ * prevent swapoff, because synchronize_rcu() is called in swapoff()
++ * before freeing data structures.
+ *
+ * Check whether swap entry is valid in the swap device. If so,
+ * return pointer to swap_info_struct, and keep the swap entry valid
+@@ -2531,10 +2530,11 @@ SYSCALL_DEFINE1(swapoff, const char __us
+
+ /*
+ * Wait for swap operations protected by get/put_swap_device()
+- * to complete.
+- *
+- * We need synchronize_rcu() here to protect the accessing to
+- * the swap cache data structure.
++ * to complete. Because of synchronize_rcu() here, all swap
++ * operations protected by RCU reader side lock (including any
++ * spinlock) will be waited too. This makes it easy to
++ * prevent folio_test_swapcache() and the following swap cache
++ * operations from racing with swapoff.
+ */
+ percpu_ref_kill(&p->users);
+ synchronize_rcu();
+_
diff --git a/patches/mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.patch b/patches/mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.patch
new file mode 100644
index 000000000..0993847cf
--- /dev/null
+++ b/patches/mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.patch
@@ -0,0 +1,133 @@
+From: Oscar Salvador <osalvador@suse.de>
+Subject: mm,swapops: update check in is_pfn_swap_entry for hwpoison entries
+Date: Sun, 7 Apr 2024 15:05:37 +0200
+
+Tony reported that the Machine check recovery was broken in v6.9-rc1, as
+he was hitting a VM_BUG_ON when injecting uncorrectable memory errors to
+DRAM.
+
+After some more digging and debugging on his side, he realized that this
+went back to v6.1, with the introduction of 'commit 0d206b5d2e0d
+("mm/swap: add swp_offset_pfn() to fetch PFN from swap entry")'. That
+commit, among other things, introduced swp_offset_pfn(), replacing
+hwpoison_entry_to_pfn() in its favour.
+
+The patch also introduced a VM_BUG_ON() check for is_pfn_swap_entry(), but
+is_pfn_swap_entry() never got updated to cover hwpoison entries, which
+means that we would hit the VM_BUG_ON whenever we would call
+swp_offset_pfn() for such entries on environments with CONFIG_DEBUG_VM
+set. Fix this by updating the check to cover hwpoison entries as well,
+and update the comment while we are it.
+
+Link: https://lkml.kernel.org/r/20240407130537.16977-1-osalvador@suse.de
+Fixes: 0d206b5d2e0d ("mm/swap: add swp_offset_pfn() to fetch PFN from swap entry")
+Signed-off-by: Oscar Salvador <osalvador@suse.de>
+Reported-by: Tony Luck <tony.luck@intel.com>
+Closes: https://lore.kernel.org/all/Zg8kLSl2yAlA3o5D@agluck-desk3/
+Tested-by: Tony Luck <tony.luck@intel.com>
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Acked-by: Miaohe Lin <linmiaohe@huawei.com>
+Cc: <stable@vger.kernel.org> [6.1.x]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/swapops.h | 65 +++++++++++++++++++-------------------
+ 1 file changed, 33 insertions(+), 32 deletions(-)
+
+--- a/include/linux/swapops.h~mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries
++++ a/include/linux/swapops.h
+@@ -390,6 +390,35 @@ static inline bool is_migration_entry_di
+ }
+ #endif /* CONFIG_MIGRATION */
+
++#ifdef CONFIG_MEMORY_FAILURE
++
++/*
++ * Support for hardware poisoned pages
++ */
++static inline swp_entry_t make_hwpoison_entry(struct page *page)
++{
++ BUG_ON(!PageLocked(page));
++ return swp_entry(SWP_HWPOISON, page_to_pfn(page));
++}
++
++static inline int is_hwpoison_entry(swp_entry_t entry)
++{
++ return swp_type(entry) == SWP_HWPOISON;
++}
++
++#else
++
++static inline swp_entry_t make_hwpoison_entry(struct page *page)
++{
++ return swp_entry(0, 0);
++}
++
++static inline int is_hwpoison_entry(swp_entry_t swp)
++{
++ return 0;
++}
++#endif
++
+ typedef unsigned long pte_marker;
+
+ #define PTE_MARKER_UFFD_WP BIT(0)
+@@ -483,8 +512,9 @@ static inline struct folio *pfn_swap_ent
+
+ /*
+ * A pfn swap entry is a special type of swap entry that always has a pfn stored
+- * in the swap offset. They are used to represent unaddressable device memory
+- * and to restrict access to a page undergoing migration.
++ * in the swap offset. They can either be used to represent unaddressable device
++ * memory, to restrict access to a page undergoing migration or to represent a
++ * pfn which has been hwpoisoned and unmapped.
+ */
+ static inline bool is_pfn_swap_entry(swp_entry_t entry)
+ {
+@@ -492,7 +522,7 @@ static inline bool is_pfn_swap_entry(swp
+ BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
+
+ return is_migration_entry(entry) || is_device_private_entry(entry) ||
+- is_device_exclusive_entry(entry);
++ is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
+ }
+
+ struct page_vma_mapped_walk;
+@@ -561,35 +591,6 @@ static inline int is_pmd_migration_entry
+ }
+ #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+-#ifdef CONFIG_MEMORY_FAILURE
+-
+-/*
+- * Support for hardware poisoned pages
+- */
+-static inline swp_entry_t make_hwpoison_entry(struct page *page)
+-{
+- BUG_ON(!PageLocked(page));
+- return swp_entry(SWP_HWPOISON, page_to_pfn(page));
+-}
+-
+-static inline int is_hwpoison_entry(swp_entry_t entry)
+-{
+- return swp_type(entry) == SWP_HWPOISON;
+-}
+-
+-#else
+-
+-static inline swp_entry_t make_hwpoison_entry(struct page *page)
+-{
+- return swp_entry(0, 0);
+-}
+-
+-static inline int is_hwpoison_entry(swp_entry_t swp)
+-{
+- return 0;
+-}
+-#endif
+-
+ static inline int non_swap_entry(swp_entry_t entry)
+ {
+ return swp_type(entry) >= MAX_SWAPFILES;
+_
diff --git a/patches/ocfs2-fix-races-between-hole-punching-and-aiodio.patch b/patches/old/ocfs2-fix-races-between-hole-punching-and-aiodio.patch
index c7915750b..c7915750b 100644
--- a/patches/ocfs2-fix-races-between-hole-punching-and-aiodio.patch
+++ b/patches/old/ocfs2-fix-races-between-hole-punching-and-aiodio.patch
diff --git a/patches/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.patch b/patches/old/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.patch
index 789ed7d10..789ed7d10 100644
--- a/patches/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.patch
+++ b/patches/old/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.patch
diff --git a/patches/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.patch b/patches/old/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.patch
index 97448a78a..97448a78a 100644
--- a/patches/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.patch
+++ b/patches/old/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.patch
diff --git a/patches/ocfs2-use-coarse-time-for-new-created-files.patch b/patches/old/ocfs2-use-coarse-time-for-new-created-files.patch
index 96b6dd7e7..96b6dd7e7 100644
--- a/patches/ocfs2-use-coarse-time-for-new-created-files.patch
+++ b/patches/old/ocfs2-use-coarse-time-for-new-created-files.patch
diff --git a/pc/arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.pc b/pc/arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.pc
new file mode 100644
index 000000000..af2fa0892
--- /dev/null
+++ b/pc/arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.pc
@@ -0,0 +1 @@
+arch/arm/mm/fault.c
diff --git a/pc/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.pc b/pc/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.pc
new file mode 100644
index 000000000..a7784d3ec
--- /dev/null
+++ b/pc/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.pc
@@ -0,0 +1 @@
+arch/arm64/mm/fault.c
diff --git a/pc/devel-series b/pc/devel-series
index 64f59cbd6..7332029b8 100644
--- a/pc/devel-series
+++ b/pc/devel-series
@@ -80,6 +80,13 @@ mmpage_owner-fix-refcount-imbalance.patch
mmpage_owner-fix-accounting-of-pages-when-migrating.patch
mmpage_owner-fix-printing-of-stack-records.patch
#
+mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.patch
+#
+#mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch: acks?
+mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.patch
+#
+mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.patch
+#
### hfe
#
#ENDBRANCH mm-hotfixes-unstable
@@ -142,6 +149,7 @@ fix-missing-vmalloch-includes-fix-2.patch
fix-missing-vmalloch-includes-fix-3.patch
fix-missing-vmalloch-includes-fix-4.patch
fix-missing-vmalloch-includes-fix-5.patch
+fix-missing-vmalloch-includes-fix-6.patch
#asm-generic-ioh-kill-vmalloch-dependency.patch: https://lkml.kernel.org/r/202403290536.7f9zGl5Q-lkp@intel.com https://lkml.kernel.org/r/202404031246.aq5Yr5KO-lkp@intel.com https://lkml.kernel.org/r/202404050934.bdQwGSAA-lkp@intel.com https://lkml.kernel.org/r/202404050828.5pKgmCLu-lkp@intel.com
asm-generic-ioh-kill-vmalloch-dependency.patch
mm-slub-mark-slab_free_freelist_hook-__always_inline.patch
@@ -158,9 +166,11 @@ lib-prevent-module-unloading-if-memory-is-not-freed.patch
lib-add-allocation-tagging-support-for-memory-allocation-profiling.patch
lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix.patch
lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-2.patch
+lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.patch
lib-introduce-support-for-page-allocation-tagging.patch
lib-introduce-early-boot-parameter-to-avoid-page_ext-memory-overhead.patch
mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags.patch
+mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.patch
change-alloc_pages-name-in-dma_map_ops-to-avoid-name-conflicts.patch
mm-enable-page-allocation-tagging.patch
mm-enable-page-allocation-tagging-fix.patch
@@ -429,8 +439,8 @@ khugepaged-convert-alloc_charge_hpage-to-alloc_charge_folio.patch
khugepaged-remove-hpage-from-collapse_huge_page.patch
khugepaged-pass-a-folio-to-__collapse_huge_page_copy.patch
khugepaged-remove-hpage-from-collapse_file.patch
-#khugepaged-use-a-folio-throughout-collapse_file.patch: https://lkml.kernel.org/r/ZhBrHNET9X5RiBuF@fedora
khugepaged-use-a-folio-throughout-collapse_file.patch
+khugepaged-use-a-folio-throughout-collapse_file-fix.patch
khugepaged-use-a-folio-throughout-hpage_collapse_scan_file.patch
#
proc-convert-clear_refs_pte_range-to-use-a-folio.patch
@@ -449,11 +459,13 @@ mm-swap-remove-cluster_flag_huge-from-swap_cluster_info-flags.patch
#mm-swap-free_swap_and_cache_nr-as-batched-free_swap_and_cache.patch: https://lkml.kernel.org/r/051052af-3b56-4290-98d3-fd5a1eb11ce1@redhat.com https://lkml.kernel.org/r/7d5b2f03-dc36-477d-8d5c-4eb8d45db398@redhat.com
mm-swap-free_swap_and_cache_nr-as-batched-free_swap_and_cache.patch
mm-swap-simplify-struct-percpu_cluster.patch
+#mm-swap-allow-storage-of-all-mthp-orders.patch: TBU
mm-swap-allow-storage-of-all-mthp-orders.patch
mm-vmscan-avoid-split-during-shrink_folio_list.patch
mm-madvise-avoid-split-during-madv_pageout-and-madv_cold.patch
mm-madvise-avoid-split-during-madv_pageout-and-madv_cold-fix.patch
#
+#arm64-mm-cleanup-__do_page_fault.patch: https://lkml.kernel.org/r/20240407171902.5958-A-hca@linux.ibm.com
arm64-mm-cleanup-__do_page_fault.patch
arm64-mm-accelerate-pagefault-when-vm_fault_badaccess.patch
arm-mm-accelerate-pagefault-when-vm_fault_badaccess.patch
@@ -462,6 +474,9 @@ riscv-mm-accelerate-pagefault-when-badaccess.patch
s390-mm-accelerate-pagefault-when-badaccess.patch
x86-mm-accelerate-pagefault-when-badaccess.patch
#
+arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.patch
+arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.patch
+#
mm-remove-struct-page-from-get_shadow_from_swap_cache.patch
#
hugetlb-convert-alloc_buddy_hugetlb_folio-to-use-a-folio.patch
@@ -475,9 +490,10 @@ mm-use-gup-fast-instead-fast-gup-in-remaining-comments.patch
mm-ksm-remove-redundant-code-in-ksm_fork.patch
#
hugetlb-convert-hugetlb_fault-to-use-struct-vm_fault.patch
-#hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault.patch: https://lkml.kernel.org/r/Zg6iG8cxpopXuFCo@localhost.localdomain
hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault.patch
+hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.patch
hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault.patch
+hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.patch
#
selftests-break-the-dependency-upon-local-header-files.patch
selftests-mm-fix-additional-build-errors-for-selftests.patch
@@ -497,12 +513,19 @@ mm-set-pageblock_order-to-hpage_pmd_order-in-case-with-config_hugetlb_page-but-t
#
#mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch+1: docs? acks? TBU?
mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch
+mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.patch
mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.patch
#
memory-tier-dax-kmem-introduce-an-abstract-layer-for-finding-allocating-and-putting-memory-types.patch
#memory-tier-create-cpuless-memory-tiers-after-obtaining-hmat-info.patch: https://lkml.kernel.org/r/20240405150244.00004b49@Huawei.com
memory-tier-create-cpuless-memory-tiers-after-obtaining-hmat-info.patch
#
+mm-mmap-make-vma_wants_writenotify-return-bool.patch
+#
+mm-mmap-make-accountable_mapping-return-bool.patch
+#
+mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.patch
+#
#
#
#
@@ -613,11 +636,6 @@ kgdb-add-has_ioport-dependency.patch
devres-switch-to-use-dev_err_probe-for-unification.patch
devres-dont-use-proxy-headers.patch
#
-#ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.patch+N: Fixes? stable?
-ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.patch
-ocfs2-fix-races-between-hole-punching-and-aiodio.patch
-ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.patch
-ocfs2-use-coarse-time-for-new-created-files.patch
#
vmcore-replace-strncpy-with-strscpy_pad.patch
#
diff --git a/pc/fix-missing-vmalloch-includes-fix-6.pc b/pc/fix-missing-vmalloch-includes-fix-6.pc
new file mode 100644
index 000000000..49a018be1
--- /dev/null
+++ b/pc/fix-missing-vmalloch-includes-fix-6.pc
@@ -0,0 +1 @@
+arch/arc/include/asm/mmu-arcv2.h
diff --git a/pc/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.pc b/pc/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.pc
new file mode 100644
index 000000000..6dc98425d
--- /dev/null
+++ b/pc/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.pc
@@ -0,0 +1 @@
+mm/hugetlb.c
diff --git a/pc/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.pc b/pc/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.pc
new file mode 100644
index 000000000..6dc98425d
--- /dev/null
+++ b/pc/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.pc
@@ -0,0 +1 @@
+mm/hugetlb.c
diff --git a/pc/khugepaged-use-a-folio-throughout-collapse_file-fix.pc b/pc/khugepaged-use-a-folio-throughout-collapse_file-fix.pc
new file mode 100644
index 000000000..e676704dd
--- /dev/null
+++ b/pc/khugepaged-use-a-folio-throughout-collapse_file-fix.pc
@@ -0,0 +1 @@
+mm/khugepaged.c
diff --git a/pc/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.pc b/pc/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.pc
new file mode 100644
index 000000000..b6ea657ed
--- /dev/null
+++ b/pc/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.pc
@@ -0,0 +1 @@
+include/linux/alloc_tag.h
diff --git a/pc/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.pc b/pc/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.pc
new file mode 100644
index 000000000..751f878cc
--- /dev/null
+++ b/pc/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.pc
@@ -0,0 +1,2 @@
+include/linux/huge_mm.h
+mm/huge_memory.c
diff --git a/pc/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.pc b/pc/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.pc
new file mode 100644
index 000000000..709648673
--- /dev/null
+++ b/pc/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.pc
@@ -0,0 +1 @@
+mm/memory-failure.c
diff --git a/pc/mm-mmap-make-accountable_mapping-return-bool.pc b/pc/mm-mmap-make-accountable_mapping-return-bool.pc
new file mode 100644
index 000000000..c1fb0e081
--- /dev/null
+++ b/pc/mm-mmap-make-accountable_mapping-return-bool.pc
@@ -0,0 +1 @@
+mm/mmap.c
diff --git a/pc/mm-mmap-make-vma_wants_writenotify-return-bool.pc b/pc/mm-mmap-make-vma_wants_writenotify-return-bool.pc
new file mode 100644
index 000000000..629cc8d21
--- /dev/null
+++ b/pc/mm-mmap-make-vma_wants_writenotify-return-bool.pc
@@ -0,0 +1,2 @@
+include/linux/mm.h
+mm/mmap.c
diff --git a/pc/mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.pc b/pc/mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.pc
new file mode 100644
index 000000000..e97b92935
--- /dev/null
+++ b/pc/mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.pc
@@ -0,0 +1 @@
+include/linux/percpu.h
diff --git a/pc/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.pc b/pc/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.pc
new file mode 100644
index 000000000..6dc98425d
--- /dev/null
+++ b/pc/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.pc
@@ -0,0 +1 @@
+mm/hugetlb.c
diff --git a/pc/mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.pc b/pc/mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.pc
new file mode 100644
index 000000000..b6b7df785
--- /dev/null
+++ b/pc/mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.pc
@@ -0,0 +1 @@
+mm/swapfile.c
diff --git a/pc/mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.pc b/pc/mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.pc
new file mode 100644
index 000000000..7f662690b
--- /dev/null
+++ b/pc/mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.pc
@@ -0,0 +1 @@
+include/linux/swapops.h
diff --git a/pc/ocfs2-fix-races-between-hole-punching-and-aiodio.pc b/pc/ocfs2-fix-races-between-hole-punching-and-aiodio.pc
deleted file mode 100644
index 5c1813767..000000000
--- a/pc/ocfs2-fix-races-between-hole-punching-and-aiodio.pc
+++ /dev/null
@@ -1 +0,0 @@
-fs/ocfs2/file.c
diff --git a/pc/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.pc b/pc/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.pc
deleted file mode 100644
index 1d26b1907..000000000
--- a/pc/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.pc
+++ /dev/null
@@ -1 +0,0 @@
-fs/ocfs2/aops.c
diff --git a/pc/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.pc b/pc/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.pc
deleted file mode 100644
index 4d916780b..000000000
--- a/pc/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.pc
+++ /dev/null
@@ -1 +0,0 @@
-fs/ocfs2/namei.c
diff --git a/pc/ocfs2-use-coarse-time-for-new-created-files.pc b/pc/ocfs2-use-coarse-time-for-new-created-files.pc
deleted file mode 100644
index 4d916780b..000000000
--- a/pc/ocfs2-use-coarse-time-for-new-created-files.pc
+++ /dev/null
@@ -1 +0,0 @@
-fs/ocfs2/namei.c
diff --git a/txt/arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt b/txt/arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt
new file mode 100644
index 000000000..9767e155b
--- /dev/null
+++ b/txt/arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt
@@ -0,0 +1,13 @@
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+Subject: arm: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS
+Date: Sun, 7 Apr 2024 16:12:11 +0800
+
+If bad map or access, directly set si_code to SEGV_MAPRR or SEGV_ACCERR,
+also set fault to 0 and goto error handling, which make us to drop the
+arch's special vm fault reason.
+
+Link: https://lkml.kernel.org/r/20240407081211.2292362-3-wangkefeng.wang@huawei.com
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: Will Deacon <will@kernel.org>
diff --git a/txt/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt b/txt/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt
new file mode 100644
index 000000000..578205409
--- /dev/null
+++ b/txt/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt
@@ -0,0 +1,21 @@
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+Subject: arm64: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS
+Date: Sun, 7 Apr 2024 16:12:10 +0800
+
+Patch series "mm: remove arch's private VM_FAULT_BADMAP/BADACCESS:.
+
+Directly set SEGV_MAPRR or SEGV_ACCERR for arm/arm64 to remove the last
+two arch's private vm_fault reasons.
+
+
+This patch (of 2):
+
+If bad map or access, directly set si_code to SEGV_MAPRR or SEGV_ACCERR,
+also set fault to 0 and goto error handling, which make us to drop the
+arch's special vm fault reason.
+
+Link: https://lkml.kernel.org/r/20240407081211.2292362-2-wangkefeng.wang@huawei.com
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: Will Deacon <will@kernel.org>
diff --git a/txt/fix-missing-vmalloch-includes-fix-6.txt b/txt/fix-missing-vmalloch-includes-fix-6.txt
new file mode 100644
index 000000000..d0340109c
--- /dev/null
+++ b/txt/fix-missing-vmalloch-includes-fix-6.txt
@@ -0,0 +1,13 @@
+From: Suren Baghdasaryan <surenb@google.com>
+Subject: fixup! fix missing vmalloc.h includes
+Date: Fri, 5 Apr 2024 15:51:15 -0700
+
+fix arc build
+
+Link: https://lkml.kernel.org/r/20240405225115.431056-1-surenb@google.com
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202404050828.5pKgmCLu-lkp@intel.com/
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Cc: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: kernel test robot <lkp@intel.com>
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
diff --git a/txt/hugetlb-convert-hugetlb_fault-to-use-struct-vm_fault.txt b/txt/hugetlb-convert-hugetlb_fault-to-use-struct-vm_fault.txt
index aa808cda0..85b7892cb 100644
--- a/txt/hugetlb-convert-hugetlb_fault-to-use-struct-vm_fault.txt
+++ b/txt/hugetlb-convert-hugetlb_fault-to-use-struct-vm_fault.txt
@@ -20,5 +20,5 @@ Link: https://lkml.kernel.org/r/20240401202651.31440-1-vishal.moola@gmail.com
Link: https://lkml.kernel.org/r/20240401202651.31440-2-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Reviewed-by: Muchun Song <muchun.song@linux.dev>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
-Cc: Muchun Song <muchun.song@linux.dev>
diff --git a/txt/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.txt b/txt/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.txt
new file mode 100644
index 000000000..f4bfa0a78
--- /dev/null
+++ b/txt/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault-fix.txt
@@ -0,0 +1,11 @@
+From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
+Subject: hugetlb: simplify hugetlb_no_page() arguments
+Date: Mon, 8 Apr 2024 10:17:54 -0700
+
+To simplify the function arguments, as suggested by Oscar and Muchun.
+
+Link: https://lkml.kernel.org/r/ZhQtN8y5zud8iI1u@fedora
+Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Suggested-by: Muchun Song <muchun.song@linux.dev>
+Suggested-by: Oscar Salvador <osalvador@suse.de>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
diff --git a/txt/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.txt b/txt/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.txt
new file mode 100644
index 000000000..9a69d5672
--- /dev/null
+++ b/txt/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault-fix.txt
@@ -0,0 +1,11 @@
+From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
+Subject: hugetlb: Simplify hugetlb_wp() arguments
+Date: Mon, 8 Apr 2024 10:21:44 -0700
+
+simplify the function arguments, per Oscar and Muchun.
+
+Link: https://lkml.kernel.org/r/ZhQtoFNZBNwBCeXn@fedora
+Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Suggested-by: Muchun Song <muchun.song@linux.dev>
+Suggested-by: Oscar Salvador <osalvador@suse.de>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
diff --git a/txt/khugepaged-use-a-folio-throughout-collapse_file-fix.txt b/txt/khugepaged-use-a-folio-throughout-collapse_file-fix.txt
new file mode 100644
index 000000000..028c27500
--- /dev/null
+++ b/txt/khugepaged-use-a-folio-throughout-collapse_file-fix.txt
@@ -0,0 +1,9 @@
+From: Matthew Wilcox <willy@infradead.org>
+Subject: khugepaged-use-a-folio-throughout-collapse_file-fix
+Date: Sun, 7 Apr 2024 04:43:27 +0100
+
+the unlikely() is embedded in IS_ERR() so no need to keep it here
+
+Link: https://lkml.kernel.org/r/ZhIWX8K0E2tSyMSr@casper.infradead.org
+Signed-off-by: Matthew Wilcox <willy@infradead.org>
+Reported-by: Vishal Moola <vishal.moola@gmail.com>
diff --git a/txt/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.txt b/txt/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.txt
new file mode 100644
index 000000000..080d4e964
--- /dev/null
+++ b/txt/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.txt
@@ -0,0 +1,24 @@
+From: Klara Modin <klarasmodin@gmail.com>
+Subject: mm/memprofiling: explicitly include irqflags.h in alloc_tag.h
+Date: Sun, 7 Apr 2024 15:32:52 +0200
+
+linux/alloc_tag.h uses the macro this_cpu_inc which eventually expands to:
+
+ #define this_cpu_generic_to_op(pcp, val, op) \
+ do { \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ raw_cpu_generic_to_op(pcp, val, op); \
+ raw_local_irq_restore(__flags); \
+ } while (0)
+
+The macros raw_local_irq_save and raw_local_irq_restore are defined in
+linux/irqflags.h which is not included implicitly on all configs.
+Therefore, include it explicitly.
+
+Link: https://lkml.kernel.org/r/20240407133252.173636-1-klarasmodin@gmail.com
+Fixes: ac906a377c67 ("lib: add allocation tagging support for memory allocation profiling")
+Link: https://lore.kernel.org/lkml/6b8149f3-80e6-413c-abcb-1925ecda9d8c@gmail.com/
+Signed-off-by: Klara Modin <klarasmodin@gmail.com>
+Acked-by: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
diff --git a/txt/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.txt b/txt/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.txt
new file mode 100644
index 000000000..fe1b0f84b
--- /dev/null
+++ b/txt/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.txt
@@ -0,0 +1,12 @@
+From: Barry Song <v-songbaohua@oppo.com>
+Subject: mm: fix powerpc build issue
+Date: Sun, 7 Apr 2024 12:23:35 +1200
+
+On powerpc, PMD_ORDER is not a constant variable. We should transition
+to using alloc_percpu instead of a static percpu array.
+
+Link: https://lkml.kernel.org/r/20240407042247.201412-1-21cnbao@gmail.com
+Signed-off-by: Barry Song <v-songbaohua@oppo.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202404061754.n8jmZ6s3-lkp@intel.com/
+Tested-by: Yujie Liu <yujie.liu@intel.com>
diff --git a/txt/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.txt b/txt/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.txt
index b9623e0ba..2c0c361a0 100644
--- a/txt/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.txt
+++ b/txt/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.txt
@@ -26,7 +26,7 @@ Link: https://lkml.kernel.org/r/20240404162515.527802-2-fvdl@google.com
Fixes: cf11e85fc08c ("mm: hugetlb: optionally allocate gigantic hugepages using cma")
Signed-off-by: Frank van der Linden <fvdl@google.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
-Cc: David Hildenbrand <david@redhat.com>
+Acked-by: David Hildenbrand <david@redhat.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: <stable@vger.kernel.org>
diff --git a/txt/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.txt b/txt/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.txt
new file mode 100644
index 000000000..ea6f16696
--- /dev/null
+++ b/txt/mm-memory-failure-fix-deadlock-when-hugetlb_optimize_vmemmap-is-enabled.txt
@@ -0,0 +1,118 @@
+From: Miaohe Lin <linmiaohe@huawei.com>
+Subject: mm/memory-failure: fix deadlock when hugetlb_optimize_vmemmap is enabled
+Date: Sun, 7 Apr 2024 16:54:56 +0800
+
+When I did hard offline test with hugetlb pages, below deadlock occurs:
+
+======================================================
+WARNING: possible circular locking dependency detected
+6.8.0-11409-gf6cef5f8c37f #1 Not tainted
+------------------------------------------------------
+bash/46904 is trying to acquire lock:
+ffffffffabe68910 (cpu_hotplug_lock){++++}-{0:0}, at: static_key_slow_dec+0x16/0x60
+
+but task is already holding lock:
+ffffffffabf92ea8 (pcp_batch_high_lock){+.+.}-{3:3}, at: zone_pcp_disable+0x16/0x40
+
+which lock already depends on the new lock.
+
+the existing dependency chain (in reverse order) is:
+
+-> #1 (pcp_batch_high_lock){+.+.}-{3:3}:
+ __mutex_lock+0x6c/0x770
+ page_alloc_cpu_online+0x3c/0x70
+ cpuhp_invoke_callback+0x397/0x5f0
+ __cpuhp_invoke_callback_range+0x71/0xe0
+ _cpu_up+0xeb/0x210
+ cpu_up+0x91/0xe0
+ cpuhp_bringup_mask+0x49/0xb0
+ bringup_nonboot_cpus+0xb7/0xe0
+ smp_init+0x25/0xa0
+ kernel_init_freeable+0x15f/0x3e0
+ kernel_init+0x15/0x1b0
+ ret_from_fork+0x2f/0x50
+ ret_from_fork_asm+0x1a/0x30
+
+-> #0 (cpu_hotplug_lock){++++}-{0:0}:
+ __lock_acquire+0x1298/0x1cd0
+ lock_acquire+0xc0/0x2b0
+ cpus_read_lock+0x2a/0xc0
+ static_key_slow_dec+0x16/0x60
+ __hugetlb_vmemmap_restore_folio+0x1b9/0x200
+ dissolve_free_huge_page+0x211/0x260
+ __page_handle_poison+0x45/0xc0
+ memory_failure+0x65e/0xc70
+ hard_offline_page_store+0x55/0xa0
+ kernfs_fop_write_iter+0x12c/0x1d0
+ vfs_write+0x387/0x550
+ ksys_write+0x64/0xe0
+ do_syscall_64+0xca/0x1e0
+ entry_SYSCALL_64_after_hwframe+0x6d/0x75
+
+other info that might help us debug this:
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(pcp_batch_high_lock);
+ lock(cpu_hotplug_lock);
+ lock(pcp_batch_high_lock);
+ rlock(cpu_hotplug_lock);
+
+ *** DEADLOCK ***
+
+5 locks held by bash/46904:
+ #0: ffff98f6c3bb23f0 (sb_writers#5){.+.+}-{0:0}, at: ksys_write+0x64/0xe0
+ #1: ffff98f6c328e488 (&of->mutex){+.+.}-{3:3}, at: kernfs_fop_write_iter+0xf8/0x1d0
+ #2: ffff98ef83b31890 (kn->active#113){.+.+}-{0:0}, at: kernfs_fop_write_iter+0x100/0x1d0
+ #3: ffffffffabf9db48 (mf_mutex){+.+.}-{3:3}, at: memory_failure+0x44/0xc70
+ #4: ffffffffabf92ea8 (pcp_batch_high_lock){+.+.}-{3:3}, at: zone_pcp_disable+0x16/0x40
+
+stack backtrace:
+CPU: 10 PID: 46904 Comm: bash Kdump: loaded Not tainted 6.8.0-11409-gf6cef5f8c37f #1
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x68/0xa0
+ check_noncircular+0x129/0x140
+ __lock_acquire+0x1298/0x1cd0
+ lock_acquire+0xc0/0x2b0
+ cpus_read_lock+0x2a/0xc0
+ static_key_slow_dec+0x16/0x60
+ __hugetlb_vmemmap_restore_folio+0x1b9/0x200
+ dissolve_free_huge_page+0x211/0x260
+ __page_handle_poison+0x45/0xc0
+ memory_failure+0x65e/0xc70
+ hard_offline_page_store+0x55/0xa0
+ kernfs_fop_write_iter+0x12c/0x1d0
+ vfs_write+0x387/0x550
+ ksys_write+0x64/0xe0
+ do_syscall_64+0xca/0x1e0
+ entry_SYSCALL_64_after_hwframe+0x6d/0x75
+RIP: 0033:0x7fc862314887
+Code: 10 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 48 89 54 24 18 48 89 74 24
+RSP: 002b:00007fff19311268 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+RAX: ffffffffffffffda RBX: 000000000000000c RCX: 00007fc862314887
+RDX: 000000000000000c RSI: 000056405645fe10 RDI: 0000000000000001
+RBP: 000056405645fe10 R08: 00007fc8623d1460 R09: 000000007fffffff
+R10: 0000000000000000 R11: 0000000000000246 R12: 000000000000000c
+R13: 00007fc86241b780 R14: 00007fc862417600 R15: 00007fc862416a00
+
+In short, below scene breaks the lock dependency chain:
+
+ memory_failure
+ __page_handle_poison
+ zone_pcp_disable -- lock(pcp_batch_high_lock)
+ dissolve_free_huge_page
+ __hugetlb_vmemmap_restore_folio
+ static_key_slow_dec
+ cpus_read_lock -- rlock(cpu_hotplug_lock)
+
+Fix this by calling drain_all_pages() instead.
+
+Link: https://lkml.kernel.org/r/20240407085456.2798193-1-linmiaohe@huawei.com
+Fixes: 510d25c92ec4a ("mm/hwpoison: disable pcp for page_handle_poison()")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: <stable@vger.kernel.org>
diff --git a/txt/mm-mmap-make-accountable_mapping-return-bool.txt b/txt/mm-mmap-make-accountable_mapping-return-bool.txt
new file mode 100644
index 000000000..c2987bb66
--- /dev/null
+++ b/txt/mm-mmap-make-accountable_mapping-return-bool.txt
@@ -0,0 +1,12 @@
+From: Hao Ge <gehao@kylinos.cn>
+Subject: mm/mmap: make accountable_mapping return bool
+Date: Sun, 7 Apr 2024 14:38:43 +0800
+
+accountable_mapping can return bool,so we change it
+
+Link: https://lkml.kernel.org/r/20240407063843.804274-1-gehao@kylinos.cn
+Signed-off-by: Hao Ge <gehao@kylinos.cn>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Lorenzo Stoakes <lstoakes@gmail.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
diff --git a/txt/mm-mmap-make-vma_wants_writenotify-return-bool.txt b/txt/mm-mmap-make-vma_wants_writenotify-return-bool.txt
new file mode 100644
index 000000000..6f5c3d199
--- /dev/null
+++ b/txt/mm-mmap-make-vma_wants_writenotify-return-bool.txt
@@ -0,0 +1,12 @@
+From: Hao Ge <gehao@kylinos.cn>
+Subject: mm/mmap: make vma_wants_writenotify return bool
+Date: Sun, 7 Apr 2024 14:26:53 +0800
+
+vma_wants_writenotify should return bool,so we change it
+
+Link: https://lkml.kernel.org/r/20240407062653.803142-1-gehao@kylinos.cn
+Signed-off-by: Hao Ge <gehao@kylinos.cn>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Lorenzo Stoakes <lstoakes@gmail.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
diff --git a/txt/mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.txt b/txt/mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.txt
new file mode 100644
index 000000000..998481b09
--- /dev/null
+++ b/txt/mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags-fix.txt
@@ -0,0 +1,14 @@
+From: Suren Baghdasaryan <surenb@google.com>
+Subject: fixup! increase PERCPU_MODULE_RESERVE to accommodate allocation tags
+Date: Sat, 6 Apr 2024 14:40:43 -0700
+
+The increase of per-cpu reserved area for modules was not enough to
+accommodate all allocation tags in certain use cases. Increase some more
+to fix the issue.
+
+Link: https://lkml.kernel.org/r/20240406214044.1114406-1-surenb@google.com
+Fixes: a11cb5c8e248 ("mm: percpu: increase PERCPU_MODULE_RESERVE to accommodate allocation tags")
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Reported-by: Klara Modin <klarasmodin@gmail.com>
+Tested-by: Klara Modin <klarasmodin@gmail.com>
+Cc: Kent Overstreet <kent.overstreet@linux.dev>
diff --git a/txt/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.txt b/txt/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.txt
new file mode 100644
index 000000000..3c9c70090
--- /dev/null
+++ b/txt/mm-userfaultfd-allow-hugetlb-change-protection-upon-poison-entry.txt
@@ -0,0 +1,31 @@
+From: Peter Xu <peterx@redhat.com>
+Subject: mm/userfaultfd: Allow hugetlb change protection upon poison entry
+Date: Fri, 5 Apr 2024 19:19:20 -0400
+
+After UFFDIO_POISON, there can be two kinds of hugetlb pte markers, either
+the POISON one or UFFD_WP one.
+
+Allow change protection to run on a poisoned marker just like !hugetlb
+cases, ignoring the marker irrelevant of the permission.
+
+Here the two bits are mutual exclusive. For example, when install a
+poisoned entry it must not be UFFD_WP already (by checking pte_none()
+before such install). And it also means if UFFD_WP is set there must have
+no POISON bit set. It makes sense because UFFD_WP is a bit to reflect
+permission, and permissions do not apply if the pte is poisoned and
+destined to sigbus.
+
+So here we simply check uffd_wp bit set first, do nothing otherwise.
+
+Attach the Fixes to UFFDIO_POISON work, as before that it should not be
+possible to have poison entry for hugetlb (e.g., hugetlb doesn't do swap,
+so no chance of swapin errors).
+
+Link: https://lkml.kernel.org/r/20240405231920.1772199-1-peterx@redhat.com
+Link: https://lore.kernel.org/r/000000000000920d5e0615602dd1@google.com
+Reported-by: syzbot+b07c8ac8eee3d4d8440f@syzkaller.appspotmail.com
+Fixes: fc71884a5f59 ("mm: userfaultfd: add new UFFDIO_POISON ioctl")
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: <stable@vger.kernel.org> [6.6+]
diff --git a/txt/mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.txt b/txt/mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.txt
new file mode 100644
index 000000000..8cb9cce6f
--- /dev/null
+++ b/txt/mmswap-add-document-about-rcu-read-lock-and-swapoff-interaction.txt
@@ -0,0 +1,19 @@
+From: Huang Ying <ying.huang@intel.com>
+Subject: mm,swap: add document about RCU read lock and swapoff interaction
+Date: Sun, 7 Apr 2024 14:54:50 +0800
+
+During reviewing a patch to fix the race condition between
+free_swap_and_cache() and swapoff() [1], it was found that the document
+about how to prevent racing with swapoff isn't clear enough. Especially
+RCU read lock can prevent swapoff from freeing data structures. So, the
+document is added as comments.
+
+[1] https://lore.kernel.org/linux-mm/c8fe62d0-78b8-527a-5bef-ee663ccdc37a@huawei.com/
+
+Link: https://lkml.kernel.org/r/20240407065450.498821-1-ying.huang@intel.com
+Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
+Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Minchan Kim <minchan@kernel.org>
diff --git a/txt/mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.txt b/txt/mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.txt
new file mode 100644
index 000000000..9b407ad3b
--- /dev/null
+++ b/txt/mmswapops-update-check-in-is_pfn_swap_entry-for-hwpoison-entries.txt
@@ -0,0 +1,31 @@
+From: Oscar Salvador <osalvador@suse.de>
+Subject: mm,swapops: update check in is_pfn_swap_entry for hwpoison entries
+Date: Sun, 7 Apr 2024 15:05:37 +0200
+
+Tony reported that the Machine check recovery was broken in v6.9-rc1, as
+he was hitting a VM_BUG_ON when injecting uncorrectable memory errors to
+DRAM.
+
+After some more digging and debugging on his side, he realized that this
+went back to v6.1, with the introduction of 'commit 0d206b5d2e0d
+("mm/swap: add swp_offset_pfn() to fetch PFN from swap entry")'. That
+commit, among other things, introduced swp_offset_pfn(), replacing
+hwpoison_entry_to_pfn() in its favour.
+
+The patch also introduced a VM_BUG_ON() check for is_pfn_swap_entry(), but
+is_pfn_swap_entry() never got updated to cover hwpoison entries, which
+means that we would hit the VM_BUG_ON whenever we would call
+swp_offset_pfn() for such entries on environments with CONFIG_DEBUG_VM
+set. Fix this by updating the check to cover hwpoison entries as well,
+and update the comment while we are it.
+
+Link: https://lkml.kernel.org/r/20240407130537.16977-1-osalvador@suse.de
+Fixes: 0d206b5d2e0d ("mm/swap: add swp_offset_pfn() to fetch PFN from swap entry")
+Signed-off-by: Oscar Salvador <osalvador@suse.de>
+Reported-by: Tony Luck <tony.luck@intel.com>
+Closes: https://lore.kernel.org/all/Zg8kLSl2yAlA3o5D@agluck-desk3/
+Tested-by: Tony Luck <tony.luck@intel.com>
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Acked-by: Miaohe Lin <linmiaohe@huawei.com>
+Cc: <stable@vger.kernel.org> [6.1.x]
diff --git a/txt/ocfs2-fix-races-between-hole-punching-and-aiodio.txt b/txt/old/ocfs2-fix-races-between-hole-punching-and-aiodio.txt
index 05fca49da..05fca49da 100644
--- a/txt/ocfs2-fix-races-between-hole-punching-and-aiodio.txt
+++ b/txt/old/ocfs2-fix-races-between-hole-punching-and-aiodio.txt
diff --git a/txt/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.txt b/txt/old/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.txt
index 0ccdc35a8..0ccdc35a8 100644
--- a/txt/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.txt
+++ b/txt/old/ocfs2-return-real-error-code-in-ocfs2_dio_wr_get_block.txt
diff --git a/txt/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.txt b/txt/old/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.txt
index 56163dac4..56163dac4 100644
--- a/txt/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.txt
+++ b/txt/old/ocfs2-update-inode-fsync-transaction-id-in-ocfs2_unlink-and-ocfs2_link.txt
diff --git a/txt/ocfs2-use-coarse-time-for-new-created-files.txt b/txt/old/ocfs2-use-coarse-time-for-new-created-files.txt
index e065d8dae..e065d8dae 100644
--- a/txt/ocfs2-use-coarse-time-for-new-created-files.txt
+++ b/txt/old/ocfs2-use-coarse-time-for-new-created-files.txt
diff --git a/txt/old/x86-mm-pat-fix-vm_pat-handling-in-cow-mappings.txt b/txt/old/x86-mm-pat-fix-vm_pat-handling-in-cow-mappings.txt
index 73816f472..698a4af31 100644
--- a/txt/old/x86-mm-pat-fix-vm_pat-handling-in-cow-mappings.txt
+++ b/txt/old/x86-mm-pat-fix-vm_pat-handling-in-cow-mappings.txt
@@ -111,6 +111,7 @@ Closes: https://lkml.kernel.org/r/20240227122814.3781907-1-mawupeng1@huawei.com
Fixes: b1a86e15dc03 ("x86, pat: remove the dependency on 'vm_pgoff' in track/untrack pfn vma routines")
Fixes: 5899329b1910 ("x86: PAT: implement track/untrack of pfnmap regions for x86 - v3")
Acked-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Wupeng Ma <mawupeng1@huawei.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
diff --git a/txt/s390-mm-accelerate-pagefault-when-badaccess.txt b/txt/s390-mm-accelerate-pagefault-when-badaccess.txt
index 23915a29c..bdac1b7f9 100644
--- a/txt/s390-mm-accelerate-pagefault-when-badaccess.txt
+++ b/txt/s390-mm-accelerate-pagefault-when-badaccess.txt
@@ -9,6 +9,7 @@ event with VMA_LOCK_SUCCESS.
Link: https://lkml.kernel.org/r/20240403083805.1818160-7-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andy Lutomirski <luto@kernel.org>