summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2024-04-17 14:42:58 -0700
committerAndrew Morton <akpm@linux-foundation.org>2024-04-17 14:42:58 -0700
commit174cc02429a7dac220de9b7c6cba1c78be9db39c (patch)
tree3ed41e2a597c2afdac8f6997d7e49ed028aab018
parent11a743235ddef7807167259e5fedc4871aa5e9b6 (diff)
download25-new-174cc02429a7dac220de9b7c6cba1c78be9db39c.tar.gz
foo
-rw-r--r--patches/cpumask-delete-unused-reset_cpu_possible_mask.patch29
-rw-r--r--patches/doc-split-bufferrst-out-of-api-summaryrst-fix.patch35
-rw-r--r--patches/fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.patch53
-rw-r--r--patches/fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.patch52
-rw-r--r--patches/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.patch32
-rw-r--r--patches/lib-introduce-early-boot-parameter-to-avoid-page_ext-memory-overhead.patch2
-rw-r--r--patches/mm-always-initialise-folio-_deferred_list-fix.patch32
-rw-r--r--patches/mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.patch32
-rw-r--r--patches/mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.patch47
-rw-r--r--patches/mm-page_table_check-support-userfault-wr-protect-entries.patch65
-rw-r--r--patches/mm-zswap-optimize-zswap-pool-size-tracking.patch32
-rw-r--r--patches/mm-zswap-remove-nr_zswap_stored-atomic.patch18
-rw-r--r--patches/null-pointer-dereference-while-shrinking-zswap.patch57
-rw-r--r--patches/old/mm-page_table_check-support-userfault-wr-protect-entries.patch155
-rw-r--r--patches/s390-netiucv-remove-function-pointer-cast.patch63
-rw-r--r--patches/s390-smsgiucv_app-remove-function-pointer-cast.patch55
-rw-r--r--patches/s390-vmlogrdr-remove-function-pointer-cast.patch72
-rw-r--r--patches/selftest-mm-mseal-read-only-elf-memory-segment-fix.patch524
-rw-r--r--patches/selftests-harness-remove-use-of-line_max-fix-fix-fix.patch59
-rw-r--r--pc/cpumask-delete-unused-reset_cpu_possible_mask.pc1
-rw-r--r--pc/devel-series26
-rw-r--r--pc/doc-split-bufferrst-out-of-api-summaryrst-fix.pc1
-rw-r--r--pc/fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.pc1
-rw-r--r--pc/fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.pc1
-rw-r--r--pc/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.pc1
-rw-r--r--pc/mm-always-initialise-folio-_deferred_list-fix.pc1
-rw-r--r--pc/mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.pc1
-rw-r--r--pc/mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.pc1
-rw-r--r--pc/mm-page_table_check-support-userfault-wr-protect-entries.pc1
-rw-r--r--pc/null-pointer-dereference-while-shrinking-zswap.pc1
-rw-r--r--pc/s390-netiucv-remove-function-pointer-cast.pc1
-rw-r--r--pc/s390-smsgiucv_app-remove-function-pointer-cast.pc1
-rw-r--r--pc/s390-vmlogrdr-remove-function-pointer-cast.pc1
-rw-r--r--pc/selftest-mm-mseal-read-only-elf-memory-segment-fix.pc2
-rw-r--r--pc/selftests-harness-remove-use-of-line_max-fix-fix-fix.pc1
-rw-r--r--txt/buffer-add-kernel-doc-for-bforget-and-__bforget.txt1
-rw-r--r--txt/buffer-add-kernel-doc-for-block_dirty_folio.txt1
-rw-r--r--txt/buffer-add-kernel-doc-for-brelse-and-__brelse.txt1
-rw-r--r--txt/buffer-add-kernel-doc-for-try_to_free_buffers.txt1
-rw-r--r--txt/buffer-fix-__bread-and-__bread_gfp-kernel-doc.txt1
-rw-r--r--txt/cpumask-delete-unused-reset_cpu_possible_mask.txt8
-rw-r--r--txt/doc-split-bufferrst-out-of-api-summaryrst-fix.txt12
-rw-r--r--txt/doc-split-bufferrst-out-of-api-summaryrst.txt1
-rw-r--r--txt/fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.txt27
-rw-r--r--txt/fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.txt16
-rw-r--r--txt/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.txt14
-rw-r--r--txt/mm-always-initialise-folio-_deferred_list-fix.txt15
-rw-r--r--txt/mm-always-initialise-folio-_deferred_list.txt1
-rw-r--r--txt/mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.txt14
-rw-r--r--txt/mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.txt24
-rw-r--r--txt/mm-page_table_check-support-userfault-wr-protect-entries.txt6
-rw-r--r--txt/null-pointer-dereference-while-shrinking-zswap.txt16
-rw-r--r--txt/old/mm-page_table_check-support-userfault-wr-protect-entries.txt54
-rw-r--r--txt/s390-netiucv-remove-function-pointer-cast.txt26
-rw-r--r--txt/s390-smsgiucv_app-remove-function-pointer-cast.txt25
-rw-r--r--txt/s390-vmlogrdr-remove-function-pointer-cast.txt36
-rw-r--r--txt/selftest-mm-mseal-read-only-elf-memory-segment-fix.txt11
-rw-r--r--txt/selftests-harness-remove-use-of-line_max-fix-fix-fix.txt42
-rw-r--r--txt/selftests-harness-remove-use-of-line_max-fix-fix.txt14
-rw-r--r--txt/selftests-harness-remove-use-of-line_max-fix.txt13
-rw-r--r--txt/selftests-harness-remove-use-of-line_max.txt1
61 files changed, 1785 insertions, 52 deletions
diff --git a/patches/cpumask-delete-unused-reset_cpu_possible_mask.patch b/patches/cpumask-delete-unused-reset_cpu_possible_mask.patch
new file mode 100644
index 000000000..009466a3d
--- /dev/null
+++ b/patches/cpumask-delete-unused-reset_cpu_possible_mask.patch
@@ -0,0 +1,29 @@
+From: Alexey Dobriyan <adobriyan@gmail.com>
+Subject: cpumask: delete unused reset_cpu_possible_mask()
+Date: Wed, 17 Apr 2024 23:11:23 +0300
+
+Link: https://lkml.kernel.org/r/20240417201123.2961-1-adobriyan@gmail.com
+Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
+Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Cc: Yury Norov <yury.norov@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/cpumask.h | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/include/linux/cpumask.h~cpumask-delete-unused-reset_cpu_possible_mask
++++ a/include/linux/cpumask.h
+@@ -1017,11 +1017,6 @@ void init_cpu_present(const struct cpuma
+ void init_cpu_possible(const struct cpumask *src);
+ void init_cpu_online(const struct cpumask *src);
+
+-static inline void reset_cpu_possible_mask(void)
+-{
+- bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS);
+-}
+-
+ static inline void
+ set_cpu_possible(unsigned int cpu, bool possible)
+ {
+_
diff --git a/patches/doc-split-bufferrst-out-of-api-summaryrst-fix.patch b/patches/doc-split-bufferrst-out-of-api-summaryrst-fix.patch
new file mode 100644
index 000000000..fb2a25723
--- /dev/null
+++ b/patches/doc-split-bufferrst-out-of-api-summaryrst-fix.patch
@@ -0,0 +1,35 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: doc-split-bufferrst-out-of-api-summaryrst-fix
+Date: Wed, 17 Apr 2024 02:57:46 +0100
+
+fix kerneldoc warning
+
+Documentation/filesystems/index.rst:50: WARNING: toctree contains reference to nonexisting document 'filesystems/buffer'
+
+Link: https://lkml.kernel.org/r/20240417015933.453505-1-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reported-by: Randy Dunlap <rdunlap@infradead.org>
+Tested-by: Randy Dunlap <rdunlap@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ Documentation/filesystems/buffer.rst | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- /dev/null
++++ a/Documentation/filesystems/buffer.rst
+@@ -0,0 +1,13 @@
++Buffer Heads
++============
++
++Linux uses buffer heads to maintain state about individual filesystem blocks.
++Buffer heads are deprecated and new filesystems should use iomap instead.
++
++Functions
++---------
++
++.. kernel-doc:: include/linux/buffer_head.h
++.. kernel-doc:: fs/buffer.c
++ :export:
++
+_
diff --git a/patches/fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.patch b/patches/fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.patch
new file mode 100644
index 000000000..9e3b4fb64
--- /dev/null
+++ b/patches/fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.patch
@@ -0,0 +1,53 @@
+From: David Hildenbrand <david@redhat.com>
+Subject: fs/proc/task_mmu: convert pagemap_hugetlb_range() to work on folios
+Date: Wed, 17 Apr 2024 11:23:12 +0200
+
+Patch series "fs/proc/task_mmu: convert hugetlb functions to work on folis".
+
+Let's convert two more functions, getting rid of two more page_mapcount()
+calls.
+
+
+This patch (of 2):
+
+Let's get rid of another page_mapcount() check and simply use
+folio_likely_mapped_shared(), which is precise for hugetlb folios.
+
+While at it, also check for PMD table sharing, like we do in
+smaps_hugetlb_range().
+
+No functional change intended, except that we would now detect hugetlb
+folios shared via PMD table sharing correctly.
+
+Link: https://lkml.kernel.org/r/20240417092313.753919-1-david@redhat.com
+Link: https://lkml.kernel.org/r/20240417092313.753919-2-david@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Cc: Muchun Song <muchun.song@linux.dev>
+
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ fs/proc/task_mmu.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/fs/proc/task_mmu.c~fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios
++++ a/fs/proc/task_mmu.c
+@@ -1578,12 +1578,13 @@ static int pagemap_hugetlb_range(pte_t *
+
+ pte = huge_ptep_get(ptep);
+ if (pte_present(pte)) {
+- struct page *page = pte_page(pte);
++ struct folio *folio = page_folio(pte_page(pte));
+
+- if (!PageAnon(page))
++ if (!folio_test_anon(folio))
+ flags |= PM_FILE;
+
+- if (page_mapcount(page) == 1)
++ if (!folio_likely_mapped_shared(folio) &&
++ !hugetlb_pmd_shared(ptep))
+ flags |= PM_MMAP_EXCLUSIVE;
+
+ if (huge_pte_uffd_wp(pte))
+_
diff --git a/patches/fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.patch b/patches/fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.patch
new file mode 100644
index 000000000..fd190303f
--- /dev/null
+++ b/patches/fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.patch
@@ -0,0 +1,52 @@
+From: David Hildenbrand <david@redhat.com>
+Subject: fs/proc/task_mmu: convert smaps_hugetlb_range() to work on folios
+Date: Wed, 17 Apr 2024 11:23:13 +0200
+
+Let's get rid of another page_mapcount() check and simply use
+folio_likely_mapped_shared(), which is precise for hugetlb folios.
+
+While at it, use huge_ptep_get() + pte_page() instead of ptep_get() +
+vm_normal_page(), just like we do in pagemap_hugetlb_range().
+
+No functional change intended.
+
+Link: https://lkml.kernel.org/r/20240417092313.753919-3-david@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Cc: Muchun Song <muchun.song@linux.dev>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ fs/proc/task_mmu.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/fs/proc/task_mmu.c~fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios
++++ a/fs/proc/task_mmu.c
+@@ -730,19 +730,20 @@ static int smaps_hugetlb_range(pte_t *pt
+ {
+ struct mem_size_stats *mss = walk->private;
+ struct vm_area_struct *vma = walk->vma;
+- struct page *page = NULL;
+- pte_t ptent = ptep_get(pte);
++ pte_t ptent = huge_ptep_get(pte);
++ struct folio *folio = NULL;
+
+ if (pte_present(ptent)) {
+- page = vm_normal_page(vma, addr, ptent);
++ folio = page_folio(pte_page(ptent));
+ } else if (is_swap_pte(ptent)) {
+ swp_entry_t swpent = pte_to_swp_entry(ptent);
+
+ if (is_pfn_swap_entry(swpent))
+- page = pfn_swap_entry_to_page(swpent);
++ folio = pfn_swap_entry_folio(swpent);
+ }
+- if (page) {
+- if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
++ if (folio) {
++ if (folio_likely_mapped_shared(folio) ||
++ hugetlb_pmd_shared(pte))
+ mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
+ else
+ mss->private_hugetlb += huge_page_size(hstate_vma(vma));
+_
diff --git a/patches/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.patch b/patches/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.patch
new file mode 100644
index 000000000..003288819
--- /dev/null
+++ b/patches/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.patch
@@ -0,0 +1,32 @@
+From: Suren Baghdasaryan <surenb@google.com>
+Subject: lib: fix alloc_tag_init() to prevent passing NULL to PTR_ERR()
+Date: Tue, 16 Apr 2024 17:33:49 -0700
+
+codetag_register_type() never returns NULL, yet IS_ERR_OR_NULL() is used
+to check its return value. This leads to a warning about possibility of
+passing NULL to PTR_ERR(). Fix that by using IS_ERR() to exclude NULL.
+
+Link: https://lkml.kernel.org/r/20240417003349.2520094-1-surenb@google.com
+Fixes: 6e8a230a6b1a ("lib: add allocation tagging support for memory allocation profiling")
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lore.kernel.org/r/202404051340.7Wo7oiJ5-lkp@intel.com/
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ lib/alloc_tag.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/alloc_tag.c~lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4
++++ a/lib/alloc_tag.c
+@@ -141,7 +141,7 @@ static int __init alloc_tag_init(void)
+ };
+
+ alloc_tag_cttype = codetag_register_type(&desc);
+- if (IS_ERR_OR_NULL(alloc_tag_cttype))
++ if (IS_ERR(alloc_tag_cttype))
+ return PTR_ERR(alloc_tag_cttype);
+
+ register_sysctl_init("vm", memory_allocation_profiling_sysctls);
+_
diff --git a/patches/lib-introduce-early-boot-parameter-to-avoid-page_ext-memory-overhead.patch b/patches/lib-introduce-early-boot-parameter-to-avoid-page_ext-memory-overhead.patch
index 7e4bcfff5..99f6e0eea 100644
--- a/patches/lib-introduce-early-boot-parameter-to-avoid-page_ext-memory-overhead.patch
+++ b/patches/lib-introduce-early-boot-parameter-to-avoid-page_ext-memory-overhead.patch
@@ -102,7 +102,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static __init void init_page_alloc_tagging(void)
@@ -161,6 +198,8 @@ static int __init alloc_tag_init(void)
- if (IS_ERR_OR_NULL(alloc_tag_cttype))
+ if (IS_ERR(alloc_tag_cttype))
return PTR_ERR(alloc_tag_cttype);
+ if (!mem_profiling_support)
diff --git a/patches/mm-always-initialise-folio-_deferred_list-fix.patch b/patches/mm-always-initialise-folio-_deferred_list-fix.patch
new file mode 100644
index 000000000..13500e357
--- /dev/null
+++ b/patches/mm-always-initialise-folio-_deferred_list-fix.patch
@@ -0,0 +1,32 @@
+From: Peter Xu <peterx@redhat.com>
+Subject: fixup! mm: always initialise folio->_deferred_list
+Date: Wed, 17 Apr 2024 17:18:34 -0400
+
+Current mm-unstable will hit this when running test_hugetlb_memcg. This
+fixes the crash for me.
+
+Link: https://lkml.kernel.org/r/20240417211836.2742593-2-peterx@redhat.com
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/memcontrol.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/memcontrol.c~mm-always-initialise-folio-_deferred_list-fix
++++ a/mm/memcontrol.c
+@@ -7401,6 +7401,7 @@ static void uncharge_folio(struct folio
+
+ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
+ VM_BUG_ON_FOLIO(folio_order(folio) > 1 &&
++ !folio_test_hugetlb(folio) &&
+ !list_empty(&folio->_deferred_list), folio);
+
+ /*
+_
diff --git a/patches/mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.patch b/patches/mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.patch
new file mode 100644
index 000000000..ff1384e08
--- /dev/null
+++ b/patches/mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.patch
@@ -0,0 +1,32 @@
+From: Peter Xu <peterx@redhat.com>
+Subject: mm/hugetlb: assert hugetlb_lock in __hugetlb_cgroup_commit_charge
+Date: Wed, 17 Apr 2024 17:18:36 -0400
+
+This is similar to __hugetlb_cgroup_uncharge_folio() where it relies on
+holding hugetlb_lock. Add the similar assertion like the other one, since
+it looks like such things may help some day.
+
+Link: https://lkml.kernel.org/r/20240417211836.2742593-4-peterx@redhat.com
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Mina Almasry <almasrymina@google.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/hugetlb_cgroup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/hugetlb_cgroup.c~mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge
++++ a/mm/hugetlb_cgroup.c
+@@ -308,7 +308,7 @@ static void __hugetlb_cgroup_commit_char
+ {
+ if (hugetlb_cgroup_disabled() || !h_cg)
+ return;
+-
++ lockdep_assert_held(&hugetlb_lock);
+ __set_hugetlb_cgroup(folio, h_cg, rsvd);
+ if (!rsvd) {
+ unsigned long usage =
+_
diff --git a/patches/mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.patch b/patches/mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.patch
new file mode 100644
index 000000000..774a705a1
--- /dev/null
+++ b/patches/mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.patch
@@ -0,0 +1,47 @@
+From: Peter Xu <peterx@redhat.com>
+Subject: mm/hugetlb: fix missing hugetlb_lock for resv uncharge
+Date: Wed, 17 Apr 2024 17:18:35 -0400
+
+There is a recent report on UFFDIO_COPY over hugetlb:
+
+https://lore.kernel.org/all/000000000000ee06de0616177560@google.com/
+
+350: lockdep_assert_held(&hugetlb_lock);
+
+Should be an issue in hugetlb but triggered in an userfault context, where
+it goes into the unlikely path where two threads modifying the resv map
+together. Mike has a fix in that path for resv uncharge but it looks like
+the locking criteria was overlooked: hugetlb_cgroup_uncharge_folio_rsvd()
+will update the cgroup pointer, so it requires to be called with the lock
+held.
+
+Link: https://lkml.kernel.org/r/20240417211836.2742593-3-peterx@redhat.com
+Fixes: 79aa925bf239 ("hugetlb_cgroup: fix reservation accounting")
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Reported-by: syzbot+4b8077a5fccc61c385a1@syzkaller.appspotmail.com
+Cc: Mina Almasry <almasrymina@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/hugetlb.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/mm/hugetlb.c~mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge
++++ a/mm/hugetlb.c
+@@ -3268,9 +3268,12 @@ struct folio *alloc_hugetlb_folio(struct
+
+ rsv_adjust = hugepage_subpool_put_pages(spool, 1);
+ hugetlb_acct_memory(h, -rsv_adjust);
+- if (deferred_reserve)
++ if (deferred_reserve) {
++ spin_lock_irq(&hugetlb_lock);
+ hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
+ pages_per_huge_page(h), folio);
++ spin_unlock_irq(&hugetlb_lock);
++ }
+ }
+
+ if (!memcg_charge_ret)
+_
diff --git a/patches/mm-page_table_check-support-userfault-wr-protect-entries.patch b/patches/mm-page_table_check-support-userfault-wr-protect-entries.patch
index 8b9ab4750..167892194 100644
--- a/patches/mm-page_table_check-support-userfault-wr-protect-entries.patch
+++ b/patches/mm-page_table_check-support-userfault-wr-protect-entries.patch
@@ -1,9 +1,9 @@
From: Peter Xu <peterx@redhat.com>
Subject: mm/page_table_check: support userfault wr-protect entries
-Date: Mon, 15 Apr 2024 16:52:59 -0400
+Date: Wed, 17 Apr 2024 17:25:49 -0400
Allow page_table_check hooks to check over userfaultfd wr-protect criteria
-upon pgtable updates. The rule is no co-existence allowed for any
+upon pgtable updates. The rule is no co-existance allowed for any
writable flag against userfault wr-protect flag.
This should be better than c2da319c2e, where we used to only sanitize such
@@ -46,7 +46,7 @@ better now.
[1] https://lore.kernel.org/all/000000000000dce0530615c89210@google.com/
-Link: https://lkml.kernel.org/r/20240415205259.2535077-1-peterx@redhat.com
+Link: https://lkml.kernel.org/r/20240417212549.2766883-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
@@ -55,9 +55,10 @@ Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
- arch/x86/include/asm/pgtable.h | 18 -----------------
- mm/page_table_check.c | 32 ++++++++++++++++++++++++++++++-
- 2 files changed, 32 insertions(+), 18 deletions(-)
+ Documentation/mm/page_table_check.rst | 9 ++++++-
+ arch/x86/include/asm/pgtable.h | 18 --------------
+ mm/page_table_check.c | 30 ++++++++++++++++++++++++
+ 3 files changed, 39 insertions(+), 18 deletions(-)
--- a/arch/x86/include/asm/pgtable.h~mm-page_table_check-support-userfault-wr-protect-entries
+++ a/arch/x86/include/asm/pgtable.h
@@ -86,6 +87,31 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
static inline pte_t pte_mkuffd_wp(pte_t pte)
+--- a/Documentation/mm/page_table_check.rst~mm-page_table_check-support-userfault-wr-protect-entries
++++ a/Documentation/mm/page_table_check.rst
+@@ -14,7 +14,7 @@ Page table check performs extra verifica
+ accessible from the userspace by getting their page table entries (PTEs PMDs
+ etc.) added into the table.
+
+-In case of detected corruption, the kernel is crashed. There is a small
++In case of most detected corruption, the kernel is crashed. There is a small
+ performance and memory overhead associated with the page table check. Therefore,
+ it is disabled by default, but can be optionally enabled on systems where the
+ extra hardening outweighs the performance costs. Also, because page table check
+@@ -22,6 +22,13 @@ is synchronous, it can help with debuggi
+ by crashing kernel at the time wrong mapping occurs instead of later which is
+ often the case with memory corruptions bugs.
+
++It can also be used to do page table entry checks over various flags, dump
++warnings when illegal combinations of entry flags are detected. Currently,
++userfaultfd is the only user of such to sanity check wr-protect bit against
++any writable flags. Illegal flag combinations will not directly cause data
++corruption in this case immediately, but that will cause read-only data to
++be writable, leading to corrupt when the page content is later modified.
++
+ Double mapping detection logic
+ ==============================
+
--- a/mm/page_table_check.c~mm-page_table_check-support-userfault-wr-protect-entries
+++ a/mm/page_table_check.c
@@ -7,6 +7,8 @@
@@ -97,20 +123,19 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#undef pr_fmt
#define pr_fmt(fmt) "page_table_check: " fmt
-@@ -182,6 +184,23 @@ void __page_table_check_pud_clear(struct
+@@ -182,6 +184,22 @@ void __page_table_check_pud_clear(struct
}
EXPORT_SYMBOL(__page_table_check_pud_clear);
+/* Whether the swap entry cached writable information */
+static inline bool swap_cached_writable(swp_entry_t entry)
+{
-+ unsigned type = swp_type(entry);
-+
-+ return type == SWP_DEVICE_EXCLUSIVE_WRITE ||
-+ type == SWP_MIGRATION_WRITE;
++ return is_writable_device_exclusive_entry(entry) ||
++ is_writable_device_private_entry(entry) ||
++ is_writable_migration_entry(entry);
+}
+
-+static inline void __page_table_check_pte(pte_t pte)
++static inline void page_table_check_pte_flags(pte_t pte)
+{
+ if (pte_present(pte) && pte_uffd_wp(pte))
+ WARN_ON_ONCE(pte_write(pte));
@@ -121,21 +146,20 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
unsigned int nr)
{
-@@ -190,18 +209,29 @@ void __page_table_check_ptes_set(struct
+@@ -190,6 +208,8 @@ void __page_table_check_ptes_set(struct
if (&init_mm == mm)
return;
-- for (i = 0; i < nr; i++)
-+ for (i = 0; i < nr; i++) {
-+ __page_table_check_pte(pte);
++ page_table_check_pte_flags(pte);
++
+ for (i = 0; i < nr; i++)
__page_table_check_pte_clear(mm, ptep_get(ptep + i));
-+ }
if (pte_user_accessible_page(pte))
- page_table_check_set(pte_pfn(pte), nr, pte_write(pte));
+@@ -197,11 +217,21 @@ void __page_table_check_ptes_set(struct
}
EXPORT_SYMBOL(__page_table_check_ptes_set);
-+static inline void __page_table_check_pmd(pmd_t pmd)
++static inline void page_table_check_pmd_flags(pmd_t pmd)
+{
+ if (pmd_present(pmd) && pmd_uffd_wp(pmd))
+ WARN_ON_ONCE(pmd_write(pmd));
@@ -148,7 +172,8 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (&init_mm == mm)
return;
-+ __page_table_check_pmd(pmd);
++ page_table_check_pmd_flags(pmd);
++
__page_table_check_pmd_clear(mm, *pmdp);
if (pmd_user_accessible_page(pmd)) {
page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
diff --git a/patches/mm-zswap-optimize-zswap-pool-size-tracking.patch b/patches/mm-zswap-optimize-zswap-pool-size-tracking.patch
index 46e754c12..f2860c73e 100644
--- a/patches/mm-zswap-optimize-zswap-pool-size-tracking.patch
+++ b/patches/mm-zswap-optimize-zswap-pool-size-tracking.patch
@@ -191,16 +191,16 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
/*
-@@ -1337,7 +1322,7 @@ static unsigned long zswap_shrinker_coun
- nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
- #else
- /* use pool stats instead of memcg stats */
-- nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
-+ nr_backing = zswap_total_pages();
- nr_stored = atomic_read(&zswap_nr_stored);
- #endif
+@@ -1344,7 +1329,7 @@ static unsigned long zswap_shrinker_coun
+ nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
+ nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
+ } else {
+- nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
++ nr_backing = zswap_total_pages();
+ nr_stored = atomic_read(&zswap_nr_stored);
+ }
-@@ -1405,6 +1390,10 @@ static void shrink_worker(struct work_st
+@@ -1412,6 +1397,10 @@ static void shrink_worker(struct work_st
{
struct mem_cgroup *memcg;
int ret, failures = 0;
@@ -211,7 +211,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* global reclaim will select cgroup in a round-robin fashion. */
do {
-@@ -1452,10 +1441,9 @@ static void shrink_worker(struct work_st
+@@ -1459,10 +1448,9 @@ static void shrink_worker(struct work_st
break;
if (ret && ++failures == MAX_RECLAIM_RETRIES)
break;
@@ -223,7 +223,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
-@@ -1496,6 +1484,7 @@ bool zswap_store(struct folio *folio)
+@@ -1503,6 +1491,7 @@ bool zswap_store(struct folio *folio)
struct zswap_entry *entry, *dupentry;
struct obj_cgroup *objcg = NULL;
struct mem_cgroup *memcg = NULL;
@@ -231,7 +231,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
VM_WARN_ON_ONCE(!folio_test_locked(folio));
VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
-@@ -1507,6 +1496,7 @@ bool zswap_store(struct folio *folio)
+@@ -1514,6 +1503,7 @@ bool zswap_store(struct folio *folio)
if (!zswap_enabled)
goto check_old;
@@ -239,7 +239,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
objcg = get_obj_cgroup_from_folio(folio);
if (objcg && !obj_cgroup_may_zswap(objcg)) {
memcg = get_mem_cgroup_from_objcg(objcg);
-@@ -1517,15 +1507,18 @@ bool zswap_store(struct folio *folio)
+@@ -1524,15 +1514,18 @@ bool zswap_store(struct folio *folio)
mem_cgroup_put(memcg);
}
@@ -261,7 +261,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
goto shrink;
else
zswap_pool_reached_full = false;
-@@ -1601,7 +1594,6 @@ insert_entry:
+@@ -1608,7 +1601,6 @@ insert_entry:
/* update stats */
atomic_inc(&zswap_stored_pages);
@@ -269,7 +269,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
count_vm_event(ZSWPOUT);
return true;
-@@ -1745,6 +1737,13 @@ void zswap_swapoff(int type)
+@@ -1752,6 +1744,13 @@ void zswap_swapoff(int type)
static struct dentry *zswap_debugfs_root;
@@ -283,7 +283,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static int zswap_debugfs_init(void)
{
if (!debugfs_initialized())
-@@ -1766,8 +1765,8 @@ static int zswap_debugfs_init(void)
+@@ -1773,8 +1772,8 @@ static int zswap_debugfs_init(void)
zswap_debugfs_root, &zswap_reject_compress_poor);
debugfs_create_u64("written_back_pages", 0444,
zswap_debugfs_root, &zswap_written_back_pages);
diff --git a/patches/mm-zswap-remove-nr_zswap_stored-atomic.patch b/patches/mm-zswap-remove-nr_zswap_stored-atomic.patch
index 34949a085..f609e6e30 100644
--- a/patches/mm-zswap-remove-nr_zswap_stored-atomic.patch
+++ b/patches/mm-zswap-remove-nr_zswap_stored-atomic.patch
@@ -58,16 +58,16 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
zswap_pool_put(entry->pool);
}
if (entry->objcg) {
-@@ -1318,7 +1315,7 @@ static unsigned long zswap_shrinker_coun
- #else
- /* use pool stats instead of memcg stats */
- nr_backing = zswap_total_pages();
-- nr_stored = atomic_read(&zswap_nr_stored);
-+ nr_stored = atomic_read(&zswap_stored_pages);
- #endif
+@@ -1325,7 +1322,7 @@ static unsigned long zswap_shrinker_coun
+ nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
+ } else {
+ nr_backing = zswap_total_pages();
+- nr_stored = atomic_read(&zswap_nr_stored);
++ nr_stored = atomic_read(&zswap_stored_pages);
+ }
if (!nr_stored)
-@@ -1338,6 +1335,11 @@ static unsigned long zswap_shrinker_coun
+@@ -1345,6 +1342,11 @@ static unsigned long zswap_shrinker_coun
* This ensures that the better zswap compresses memory, the fewer
* pages we will evict to swap (as it will otherwise incur IO for
* relatively small memory saving).
@@ -79,7 +79,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
*/
return mult_frac(nr_freeable, nr_backing, nr_stored);
}
-@@ -1583,7 +1585,6 @@ insert_entry:
+@@ -1590,7 +1592,6 @@ insert_entry:
if (entry->length) {
INIT_LIST_HEAD(&entry->lru);
zswap_lru_add(&zswap_list_lru, entry);
diff --git a/patches/null-pointer-dereference-while-shrinking-zswap.patch b/patches/null-pointer-dereference-while-shrinking-zswap.patch
new file mode 100644
index 000000000..a94d69056
--- /dev/null
+++ b/patches/null-pointer-dereference-while-shrinking-zswap.patch
@@ -0,0 +1,57 @@
+From: Johannes Weiner <hannes@cmpxchg.org>
+Subject: Re: Null pointer dereference while shrinking zswap
+Date: Wed, 17 Apr 2024 10:33:24 -0400
+
+temp version of this fix, for testing
+
+Link: https://lkml.kernel.org/r/20240417143324.GA1055428@cmpxchg.org
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Tested-by: Richard W.M. Jones <rjones@redhat.com>
+Tested-By: Christian Heusel <christian@heusel.eu>
+Cc: Chengming Zhou <chengming.zhou@linux.dev>
+Cc: Dan Streetman <ddstreet@ieee.org>
+Cc: Nhat Pham <nphamcs@gmail.com>
+Cc: Seth Jennings <sjenning@redhat.com>
+Cc: Vitaly Wool <vitaly.wool@konsulko.com>
+Cc: Yosry Ahmed <yosryahmed@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/zswap.c | 25 ++++++++++++++++---------
+ 1 file changed, 16 insertions(+), 9 deletions(-)
+
+--- a/mm/zswap.c~null-pointer-dereference-while-shrinking-zswap
++++ a/mm/zswap.c
+@@ -1331,15 +1331,22 @@ static unsigned long zswap_shrinker_coun
+ if (!gfp_has_io_fs(sc->gfp_mask))
+ return 0;
+
+-#ifdef CONFIG_MEMCG_KMEM
+- mem_cgroup_flush_stats(memcg);
+- nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
+- nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
+-#else
+- /* use pool stats instead of memcg stats */
+- nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
+- nr_stored = atomic_read(&zswap_nr_stored);
+-#endif
++ /*
++ * For memcg, use the cgroup-wide ZSWAP stats since we don't
++ * have them per-node and thus per-lruvec. Careful if memcg is
++ * runtime-disabled: we can get sc->memcg == NULL, which is ok
++ * for the lruvec, but not for memcg_page_state().
++ *
++ * Without memcg, use the zswap pool-wide metrics.
++ */
++ if (!mem_cgroup_disabled()) {
++ mem_cgroup_flush_stats(memcg);
++ nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
++ nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
++ } else {
++ nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
++ nr_stored = atomic_read(&zswap_nr_stored);
++ }
+
+ if (!nr_stored)
+ return 0;
+_
diff --git a/patches/old/mm-page_table_check-support-userfault-wr-protect-entries.patch b/patches/old/mm-page_table_check-support-userfault-wr-protect-entries.patch
new file mode 100644
index 000000000..8b9ab4750
--- /dev/null
+++ b/patches/old/mm-page_table_check-support-userfault-wr-protect-entries.patch
@@ -0,0 +1,155 @@
+From: Peter Xu <peterx@redhat.com>
+Subject: mm/page_table_check: support userfault wr-protect entries
+Date: Mon, 15 Apr 2024 16:52:59 -0400
+
+Allow page_table_check hooks to check over userfaultfd wr-protect criteria
+upon pgtable updates. The rule is no co-existence allowed for any
+writable flag against userfault wr-protect flag.
+
+This should be better than c2da319c2e, where we used to only sanitize such
+issues during a pgtable walk, but when hitting such issue we don't have a
+good chance to know where does that writable bit came from [1], so that
+even the pgtable walk exposes a kernel bug (which is still helpful on
+triaging) but not easy to track and debug.
+
+Now we switch to track the source. It's much easier too with the recent
+introduction of page table check.
+
+There are some limitations with using the page table check here for
+userfaultfd wr-protect purpose:
+
+ - It is only enabled with explicit enablement of page table check configs
+ and/or boot parameters, but should be good enough to track at least
+ syzbot issues, as syzbot should enable PAGE_TABLE_CHECK[_ENFORCED] for
+ x86 [1]. We used to have DEBUG_VM but it's now off for most distros,
+ while distros also normally not enable PAGE_TABLE_CHECK[_ENFORCED], which
+ is similar.
+
+ - It conditionally works with the ptep_modify_prot API. It will be
+ bypassed when e.g. XEN PV is enabled, however still work for most of the
+ rest scenarios, which should be the common cases so should be good
+ enough.
+
+ - Hugetlb check is a bit hairy, as the page table check cannot identify
+ hugetlb pte or normal pte via trapping at set_pte_at(), because of the
+ current design where hugetlb maps every layers to pte_t... For example,
+ the default set_huge_pte_at() can invoke set_pte_at() directly and lose
+ the hugetlb context, treating it the same as a normal pte_t. So far it's
+ fine because we have huge_pte_uffd_wp() always equals to pte_uffd_wp() as
+ long as supported (x86 only). It'll be a bigger problem when we'll
+ define _PAGE_UFFD_WP differently at various pgtable levels, because then
+ one huge_pte_uffd_wp() per-arch will stop making sense first.. as of now
+ we can leave this for later too.
+
+This patch also removes commit c2da319c2e altogether, as we have something
+better now.
+
+[1] https://lore.kernel.org/all/000000000000dce0530615c89210@google.com/
+
+Link: https://lkml.kernel.org/r/20240415205259.2535077-1-peterx@redhat.com
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Nadav Amit <nadav.amit@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ arch/x86/include/asm/pgtable.h | 18 -----------------
+ mm/page_table_check.c | 32 ++++++++++++++++++++++++++++++-
+ 2 files changed, 32 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable.h~mm-page_table_check-support-userfault-wr-protect-entries
++++ a/arch/x86/include/asm/pgtable.h
+@@ -388,23 +388,7 @@ static inline pte_t pte_wrprotect(pte_t
+ #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+ static inline int pte_uffd_wp(pte_t pte)
+ {
+- bool wp = pte_flags(pte) & _PAGE_UFFD_WP;
+-
+-#ifdef CONFIG_DEBUG_VM
+- /*
+- * Having write bit for wr-protect-marked present ptes is fatal,
+- * because it means the uffd-wp bit will be ignored and write will
+- * just go through.
+- *
+- * Use any chance of pgtable walking to verify this (e.g., when
+- * page swapped out or being migrated for all purposes). It means
+- * something is already wrong. Tell the admin even before the
+- * process crashes. We also nail it with wrong pgtable setup.
+- */
+- WARN_ON_ONCE(wp && pte_write(pte));
+-#endif
+-
+- return wp;
++ return pte_flags(pte) & _PAGE_UFFD_WP;
+ }
+
+ static inline pte_t pte_mkuffd_wp(pte_t pte)
+--- a/mm/page_table_check.c~mm-page_table_check-support-userfault-wr-protect-entries
++++ a/mm/page_table_check.c
+@@ -7,6 +7,8 @@
+ #include <linux/kstrtox.h>
+ #include <linux/mm.h>
+ #include <linux/page_table_check.h>
++#include <linux/swap.h>
++#include <linux/swapops.h>
+
+ #undef pr_fmt
+ #define pr_fmt(fmt) "page_table_check: " fmt
+@@ -182,6 +184,23 @@ void __page_table_check_pud_clear(struct
+ }
+ EXPORT_SYMBOL(__page_table_check_pud_clear);
+
++/* Whether the swap entry cached writable information */
++static inline bool swap_cached_writable(swp_entry_t entry)
++{
++ unsigned type = swp_type(entry);
++
++ return type == SWP_DEVICE_EXCLUSIVE_WRITE ||
++ type == SWP_MIGRATION_WRITE;
++}
++
++static inline void __page_table_check_pte(pte_t pte)
++{
++ if (pte_present(pte) && pte_uffd_wp(pte))
++ WARN_ON_ONCE(pte_write(pte));
++ else if (is_swap_pte(pte) && pte_swp_uffd_wp(pte))
++ WARN_ON_ONCE(swap_cached_writable(pte_to_swp_entry(pte)));
++}
++
+ void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
+ unsigned int nr)
+ {
+@@ -190,18 +209,29 @@ void __page_table_check_ptes_set(struct
+ if (&init_mm == mm)
+ return;
+
+- for (i = 0; i < nr; i++)
++ for (i = 0; i < nr; i++) {
++ __page_table_check_pte(pte);
+ __page_table_check_pte_clear(mm, ptep_get(ptep + i));
++ }
+ if (pte_user_accessible_page(pte))
+ page_table_check_set(pte_pfn(pte), nr, pte_write(pte));
+ }
+ EXPORT_SYMBOL(__page_table_check_ptes_set);
+
++static inline void __page_table_check_pmd(pmd_t pmd)
++{
++ if (pmd_present(pmd) && pmd_uffd_wp(pmd))
++ WARN_ON_ONCE(pmd_write(pmd));
++ else if (is_swap_pmd(pmd) && pmd_swp_uffd_wp(pmd))
++ WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd)));
++}
++
+ void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
+ {
+ if (&init_mm == mm)
+ return;
+
++ __page_table_check_pmd(pmd);
+ __page_table_check_pmd_clear(mm, *pmdp);
+ if (pmd_user_accessible_page(pmd)) {
+ page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
+_
diff --git a/patches/s390-netiucv-remove-function-pointer-cast.patch b/patches/s390-netiucv-remove-function-pointer-cast.patch
new file mode 100644
index 000000000..c8e08b365
--- /dev/null
+++ b/patches/s390-netiucv-remove-function-pointer-cast.patch
@@ -0,0 +1,63 @@
+From: Nathan Chancellor <nathan@kernel.org>
+Subject: s390/netiucv: remove function pointer cast
+Date: Wed, 17 Apr 2024 11:24:37 -0700
+
+Clang warns (or errors with CONFIG_WERROR) after enabling
+-Wcast-function-type-strict by default:
+
+ drivers/s390/net/netiucv.c:1716:18: error: cast from 'void (*)(const void *)' to 'void (*)(struct device *)' converts to incompatible function type [-Werror,-Wcast-function-type-strict]
+ 1716 | dev->release = (void (*)(struct device *))kfree;
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 1 error generated.
+
+Add a standalone function to fix the warning properly, which addresses
+the root of the warning that these casts are not safe for kCFI. The
+comment is not really relevant after this change, so remove it.
+
+Link: https://lkml.kernel.org/r/20240417-s390-drivers-fix-cast-function-type-v1-3-fd048c9903b0@kernel.org
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Cc: Alexander Gordeev <agordeev@linux.ibm.com>
+Cc: Alexandra Winter <wintera@linux.ibm.com>
+Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Sven Schnelle <svens@linux.ibm.com>
+Cc: Thorsten Winkler <twinkler@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ drivers/s390/net/netiucv.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+--- a/drivers/s390/net/netiucv.c~s390-netiucv-remove-function-pointer-cast
++++ a/drivers/s390/net/netiucv.c
+@@ -1693,6 +1693,11 @@ static const struct attribute_group *net
+ NULL,
+ };
+
++static void netiucv_free_dev(struct device *dev)
++{
++ kfree(dev);
++}
++
+ static int netiucv_register_device(struct net_device *ndev)
+ {
+ struct netiucv_priv *priv = netdev_priv(ndev);
+@@ -1706,14 +1711,7 @@ static int netiucv_register_device(struc
+ dev->bus = &iucv_bus;
+ dev->parent = iucv_root;
+ dev->groups = netiucv_attr_groups;
+- /*
+- * The release function could be called after the
+- * module has been unloaded. It's _only_ task is to
+- * free the struct. Therefore, we specify kfree()
+- * directly here. (Probably a little bit obfuscating
+- * but legitime ...).
+- */
+- dev->release = (void (*)(struct device *))kfree;
++ dev->release = netiucv_free_dev;
+ dev->driver = &netiucv_driver;
+ } else
+ return -ENOMEM;
+_
diff --git a/patches/s390-smsgiucv_app-remove-function-pointer-cast.patch b/patches/s390-smsgiucv_app-remove-function-pointer-cast.patch
new file mode 100644
index 000000000..c72798b61
--- /dev/null
+++ b/patches/s390-smsgiucv_app-remove-function-pointer-cast.patch
@@ -0,0 +1,55 @@
+From: Nathan Chancellor <nathan@kernel.org>
+Subject: s390/smsgiucv_app: remove function pointer cast
+Date: Wed, 17 Apr 2024 11:24:36 -0700
+
+Clang warns (or errors with CONFIG_WERROR) after enabling
+-Wcast-function-type-strict by default:
+
+ drivers/s390/net/smsgiucv_app.c:176:26: error: cast from 'void (*)(const void *)' to 'void (*)(struct device *)' converts to incompatible function type [-Werror,-Wcast-function-type-strict]
+ 176 | smsg_app_dev->release = (void (*)(struct device *)) kfree;
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 1 error generated.
+
+Add a standalone function to fix the warning properly, which addresses
+the root of the warning that these casts are not safe for kCFI.
+
+Link: https://lkml.kernel.org/r/20240417-s390-drivers-fix-cast-function-type-v1-2-fd048c9903b0@kernel.org
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Cc: Alexander Gordeev <agordeev@linux.ibm.com>
+Cc: Alexandra Winter <wintera@linux.ibm.com>
+Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Sven Schnelle <svens@linux.ibm.com>
+Cc: Thorsten Winkler <twinkler@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ drivers/s390/net/smsgiucv_app.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/s390/net/smsgiucv_app.c~s390-smsgiucv_app-remove-function-pointer-cast
++++ a/drivers/s390/net/smsgiucv_app.c
+@@ -64,6 +64,11 @@ static void smsg_app_event_free(struct s
+ kfree(ev);
+ }
+
++static void smsg_app_free_dev(struct device *dev)
++{
++ kfree(dev);
++}
++
+ static struct smsg_app_event *smsg_app_event_alloc(const char *from,
+ const char *msg)
+ {
+@@ -173,7 +178,7 @@ static int __init smsgiucv_app_init(void
+ }
+ smsg_app_dev->bus = &iucv_bus;
+ smsg_app_dev->parent = iucv_root;
+- smsg_app_dev->release = (void (*)(struct device *)) kfree;
++ smsg_app_dev->release = smsg_app_free_dev;
+ smsg_app_dev->driver = smsgiucv_drv;
+ rc = device_register(smsg_app_dev);
+ if (rc) {
+_
diff --git a/patches/s390-vmlogrdr-remove-function-pointer-cast.patch b/patches/s390-vmlogrdr-remove-function-pointer-cast.patch
new file mode 100644
index 000000000..9b90a7d39
--- /dev/null
+++ b/patches/s390-vmlogrdr-remove-function-pointer-cast.patch
@@ -0,0 +1,72 @@
+From: Nathan Chancellor <nathan@kernel.org>
+Subject: s390/vmlogrdr: remove function pointer cast
+Date: Wed, 17 Apr 2024 11:24:35 -0700
+
+Patch series "drivers/s390: Fix instances of -Wcast-function-type-strict".
+
+This series resolves the instances of -Wcast-function-type-strict that
+show up in my s390 builds on -next, which has this warning enabled by
+default.
+
+
+This patch (of 3):
+
+Clang warns (or errors with CONFIG_WERROR) after enabling
+-Wcast-function-type-strict by default:
+
+ drivers/s390/char/vmlogrdr.c:746:18: error: cast from 'void (*)(const void *)' to 'void (*)(struct device *)' converts to incompatible function type [-Werror,-Wcast-function-type-strict]
+ 746 | dev->release = (void (*)(struct device *))kfree;
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 1 error generated.
+
+Add a standalone function to fix the warning properly, which addresses
+the root of the warning that these casts are not safe for kCFI. The
+comment is not really relevant after this change, so remove it.
+
+Link: https://lkml.kernel.org/r/20240417-s390-drivers-fix-cast-function-type-v1-0-fd048c9903b0@kernel.org
+Link: https://lkml.kernel.org/r/20240417-s390-drivers-fix-cast-function-type-v1-1-fd048c9903b0@kernel.org
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Cc: Alexander Gordeev <agordeev@linux.ibm.com>
+Cc: Alexandra Winter <wintera@linux.ibm.com>
+Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Sven Schnelle <svens@linux.ibm.com>
+Cc: Thorsten Winkler <twinkler@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ drivers/s390/char/vmlogrdr.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/drivers/s390/char/vmlogrdr.c~s390-vmlogrdr-remove-function-pointer-cast
++++ a/drivers/s390/char/vmlogrdr.c
+@@ -722,6 +722,10 @@ static void vmlogrdr_unregister_driver(v
+ iucv_unregister(&vmlogrdr_iucv_handler, 1);
+ }
+
++static void vmlogrdr_free_dev(struct device *dev)
++{
++ kfree(dev);
++}
+
+ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
+ {
+@@ -736,14 +740,7 @@ static int vmlogrdr_register_device(stru
+ dev->driver = &vmlogrdr_driver;
+ dev->groups = vmlogrdr_attr_groups;
+ dev_set_drvdata(dev, priv);
+- /*
+- * The release function could be called after the
+- * module has been unloaded. It's _only_ task is to
+- * free the struct. Therefore, we specify kfree()
+- * directly here. (Probably a little bit obfuscating
+- * but legitime ...).
+- */
+- dev->release = (void (*)(struct device *))kfree;
++ dev->release = vmlogrdr_free_dev;
+ } else
+ return -ENOMEM;
+ ret = device_register(dev);
+_
diff --git a/patches/selftest-mm-mseal-read-only-elf-memory-segment-fix.patch b/patches/selftest-mm-mseal-read-only-elf-memory-segment-fix.patch
new file mode 100644
index 000000000..9e68f99fa
--- /dev/null
+++ b/patches/selftest-mm-mseal-read-only-elf-memory-segment-fix.patch
@@ -0,0 +1,524 @@
+From: Jeff Xu <jeffxu@chromium.org>
+Subject: selftest mm/mseal: style change
+Date: Tue, 16 Apr 2024 22:09:44 +0000
+
+remove "assert" from testcase.
+remove "return 0"
+
+Link: https://lkml.kernel.org/r/20240416220944.2481203-2-jeffxu@chromium.org
+Signed-off-by: Jeff Xu <jeffxu@chromium.org>
+Suggested-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Reviewed-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ tools/testing/selftests/mm/mseal_test.c | 124 ++++++++++++++++------
+ tools/testing/selftests/mm/seal_elf.c | 3
+ 2 files changed, 91 insertions(+), 36 deletions(-)
+
+--- a/tools/testing/selftests/mm/mseal_test.c~selftest-mm-mseal-read-only-elf-memory-segment-fix
++++ a/tools/testing/selftests/mm/mseal_test.c
+@@ -12,9 +12,7 @@
+ #include <errno.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+-#include <assert.h>
+ #include <fcntl.h>
+-#include <assert.h>
+ #include <sys/ioctl.h>
+ #include <sys/vfs.h>
+ #include <sys/stat.h>
+@@ -189,7 +187,6 @@ static void __write_pkey_reg(u64 pkey_re
+
+ asm volatile(".byte 0x0f,0x01,0xef\n\t"
+ : : "a" (eax), "c" (ecx), "d" (edx));
+- assert(pkey_reg == __read_pkey_reg());
+ #endif
+ }
+
+@@ -214,7 +211,6 @@ static void set_pkey(int pkey, unsigned
+ unsigned long mask = (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE);
+ u64 new_pkey_reg;
+
+- assert(!(pkey_value & ~mask));
+ new_pkey_reg = set_pkey_bits(__read_pkey_reg(), pkey, pkey_value);
+ __write_pkey_reg(new_pkey_reg);
+ }
+@@ -224,7 +220,6 @@ static void setup_single_address(int siz
+ void *ptr;
+
+ ptr = sys_mmap(NULL, size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+- assert(ptr != (void *)-1);
+ *ptrOut = ptr;
+ }
+
+@@ -234,24 +229,21 @@ static void setup_single_address_rw(int
+ unsigned long mapflags = MAP_ANONYMOUS | MAP_PRIVATE;
+
+ ptr = sys_mmap(NULL, size, PROT_READ | PROT_WRITE, mapflags, -1, 0);
+- assert(ptr != (void *)-1);
+ *ptrOut = ptr;
+ }
+
+-static void clean_single_address(void *ptr, int size)
++static int clean_single_address(void *ptr, int size)
+ {
+ int ret;
+-
+ ret = munmap(ptr, size);
+- assert(!ret);
++ return ret;
+ }
+
+-static void seal_single_address(void *ptr, int size)
++static int seal_single_address(void *ptr, int size)
+ {
+ int ret;
+-
+ ret = sys_mseal(ptr, size);
+- assert(!ret);
++ return ret;
+ }
+
+ bool seal_support(void)
+@@ -290,6 +282,7 @@ static void test_seal_addseal(void)
+ unsigned long size = 4 * page_size;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ ret = sys_mseal(ptr, size);
+ FAIL_TEST_IF_FALSE(!ret);
+@@ -305,6 +298,7 @@ static void test_seal_unmapped_start(voi
+ unsigned long size = 4 * page_size;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* munmap 2 pages from ptr. */
+ ret = sys_munmap(ptr, 2 * page_size);
+@@ -332,6 +326,7 @@ static void test_seal_unmapped_middle(vo
+ unsigned long size = 4 * page_size;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* munmap 2 pages from ptr + page. */
+ ret = sys_munmap(ptr + page_size, 2 * page_size);
+@@ -363,6 +358,7 @@ static void test_seal_unmapped_end(void)
+ unsigned long size = 4 * page_size;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* unmap last 2 pages. */
+ ret = sys_munmap(ptr + 2 * page_size, 2 * page_size);
+@@ -391,6 +387,7 @@ static void test_seal_multiple_vmas(void
+ unsigned long size = 4 * page_size;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* use mprotect to split the vma into 3. */
+ ret = sys_mprotect(ptr + page_size, 2 * page_size,
+@@ -421,6 +418,7 @@ static void test_seal_split_start(void)
+ unsigned long size = 4 * page_size;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* use mprotect to split at middle */
+ ret = sys_mprotect(ptr, 2 * page_size, PROT_READ | PROT_WRITE);
+@@ -445,6 +443,7 @@ static void test_seal_split_end(void)
+ unsigned long size = 4 * page_size;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* use mprotect to split at middle */
+ ret = sys_mprotect(ptr, 2 * page_size, PROT_READ | PROT_WRITE);
+@@ -469,7 +468,9 @@ static void test_seal_invalid_input(void
+ int ret;
+
+ setup_single_address(8 * page_size, &ptr);
+- clean_single_address(ptr + 4 * page_size, 4 * page_size);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
++ ret = clean_single_address(ptr + 4 * page_size, 4 * page_size);
++ FAIL_TEST_IF_FALSE(!ret);
+
+ /* invalid flag */
+ ret = syscall(__NR_mseal, ptr, size, 0x20);
+@@ -502,6 +503,7 @@ static void test_seal_zero_length(void)
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ ret = sys_mprotect(ptr, 0, PROT_READ | PROT_WRITE);
+ FAIL_TEST_IF_FALSE(!ret);
+@@ -551,6 +553,7 @@ static void test_seal_twice(void)
+ unsigned long size = 4 * page_size;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ ret = sys_mseal(ptr, size);
+ FAIL_TEST_IF_FALSE(!ret);
+@@ -570,9 +573,12 @@ static void test_seal_mprotect(bool seal
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+- if (seal)
+- seal_single_address(ptr, size);
++ if (seal) {
++ ret = seal_single_address(ptr, size);
++ FAIL_TEST_IF_FALSE(!ret);
++ }
+
+ ret = sys_mprotect(ptr, size, PROT_READ | PROT_WRITE);
+ if (seal)
+@@ -591,9 +597,12 @@ static void test_seal_start_mprotect(boo
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+- if (seal)
+- seal_single_address(ptr, page_size);
++ if (seal) {
++ ret = seal_single_address(ptr, page_size);
++ FAIL_TEST_IF_FALSE(!ret);
++ }
+
+ /* the first page is sealed. */
+ ret = sys_mprotect(ptr, page_size, PROT_READ | PROT_WRITE);
+@@ -618,9 +627,12 @@ static void test_seal_end_mprotect(bool
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+- if (seal)
+- seal_single_address(ptr + page_size, 3 * page_size);
++ if (seal) {
++ ret = seal_single_address(ptr + page_size, 3 * page_size);
++ FAIL_TEST_IF_FALSE(!ret);
++ }
+
+ /* first page is not sealed */
+ ret = sys_mprotect(ptr, page_size, PROT_READ | PROT_WRITE);
+@@ -645,9 +657,12 @@ static void test_seal_mprotect_unalign_l
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+- if (seal)
+- seal_single_address(ptr, page_size * 2 - 1);
++ if (seal) {
++ ret = seal_single_address(ptr, page_size * 2 - 1);
++ FAIL_TEST_IF_FALSE(!ret);
++ }
+
+ /* 2 pages are sealed. */
+ ret = sys_mprotect(ptr, page_size * 2, PROT_READ | PROT_WRITE);
+@@ -671,8 +686,11 @@ static void test_seal_mprotect_unalign_l
+ int ret;
+
+ setup_single_address(size, &ptr);
+- if (seal)
+- seal_single_address(ptr, page_size * 2 + 1);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
++ if (seal) {
++ ret = seal_single_address(ptr, page_size * 2 + 1);
++ FAIL_TEST_IF_FALSE(!ret);
++ }
+
+ /* 3 pages are sealed. */
+ ret = sys_mprotect(ptr, page_size * 3, PROT_READ | PROT_WRITE);
+@@ -696,13 +714,16 @@ static void test_seal_mprotect_two_vma(b
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* use mprotect to split */
+ ret = sys_mprotect(ptr, page_size * 2, PROT_READ | PROT_WRITE);
+ FAIL_TEST_IF_FALSE(!ret);
+
+- if (seal)
+- seal_single_address(ptr, page_size * 4);
++ if (seal) {
++ ret = seal_single_address(ptr, page_size * 4);
++ FAIL_TEST_IF_FALSE(!ret);
++ }
+
+ ret = sys_mprotect(ptr, page_size * 2, PROT_READ | PROT_WRITE);
+ if (seal)
+@@ -728,14 +749,17 @@ static void test_seal_mprotect_two_vma_w
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* use mprotect to split as two vma. */
+ ret = sys_mprotect(ptr, page_size * 2, PROT_READ | PROT_WRITE);
+ FAIL_TEST_IF_FALSE(!ret);
+
+ /* mseal can apply across 2 vma, also split them. */
+- if (seal)
+- seal_single_address(ptr + page_size, page_size * 2);
++ if (seal) {
++ ret = seal_single_address(ptr + page_size, page_size * 2);
++ FAIL_TEST_IF_FALSE(!ret);
++ }
+
+ /* the first page is not sealed. */
+ ret = sys_mprotect(ptr, page_size, PROT_READ | PROT_WRITE);
+@@ -772,10 +796,13 @@ static void test_seal_mprotect_partial_m
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* seal one page. */
+- if (seal)
+- seal_single_address(ptr, page_size);
++ if (seal) {
++ ret = seal_single_address(ptr, page_size);
++ FAIL_TEST_IF_FALSE(!ret);
++ }
+
+ /* mprotect first 2 page will fail, since the first page are sealed. */
+ ret = sys_mprotect(ptr, 2 * page_size, PROT_READ | PROT_WRITE);
+@@ -795,6 +822,7 @@ static void test_seal_mprotect_two_vma_w
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* use mprotect to split. */
+ ret = sys_mprotect(ptr, page_size, PROT_READ | PROT_WRITE);
+@@ -837,6 +865,7 @@ static void test_seal_mprotect_split(boo
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* use mprotect to split. */
+ ret = sys_mprotect(ptr, page_size, PROT_READ | PROT_WRITE);
+@@ -873,6 +902,7 @@ static void test_seal_mprotect_merge(boo
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* use mprotect to split one page. */
+ ret = sys_mprotect(ptr, page_size, PROT_READ | PROT_WRITE);
+@@ -906,6 +936,7 @@ static void test_seal_munmap(bool seal)
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ if (seal) {
+ ret = sys_mseal(ptr, size);
+@@ -936,6 +967,7 @@ static void test_seal_munmap_two_vma(boo
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* use mprotect to split */
+ ret = sys_mprotect(ptr, page_size * 2, PROT_READ | PROT_WRITE);
+@@ -976,6 +1008,7 @@ static void test_seal_munmap_vma_with_ga
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ ret = sys_munmap(ptr + page_size, page_size * 2);
+ FAIL_TEST_IF_FALSE(!ret);
+@@ -1007,6 +1040,7 @@ static void test_munmap_start_freed(bool
+ int prot;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* unmap the first page. */
+ ret = sys_munmap(ptr, page_size);
+@@ -1045,6 +1079,8 @@ static void test_munmap_end_freed(bool s
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
++
+ /* unmap last page. */
+ ret = sys_munmap(ptr + page_size * 3, page_size);
+ FAIL_TEST_IF_FALSE(!ret);
+@@ -1074,6 +1110,8 @@ static void test_munmap_middle_freed(boo
+ int prot;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
++
+ /* unmap 2 pages in the middle. */
+ ret = sys_munmap(ptr + page_size, page_size * 2);
+ FAIL_TEST_IF_FALSE(!ret);
+@@ -1116,6 +1154,7 @@ static void test_seal_mremap_shrink(bool
+ void *ret2;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ if (seal) {
+ ret = sys_mseal(ptr, size);
+@@ -1144,6 +1183,7 @@ static void test_seal_mremap_expand(bool
+ void *ret2;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+ /* ummap last 2 pages. */
+ ret = sys_munmap(ptr + 2 * page_size, 2 * page_size);
+ FAIL_TEST_IF_FALSE(!ret);
+@@ -1175,8 +1215,11 @@ static void test_seal_mremap_move(bool s
+ void *ret2;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+ setup_single_address(size, &newPtr);
+- clean_single_address(newPtr, size);
++ FAIL_TEST_IF_FALSE(newPtr != (void *)-1);
++ ret = clean_single_address(newPtr, size);
++ FAIL_TEST_IF_FALSE(!ret);
+
+ if (seal) {
+ ret = sys_mseal(ptr, size);
+@@ -1205,6 +1248,7 @@ static void test_seal_mmap_overwrite_pro
+ void *ret2;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ if (seal) {
+ ret = sys_mseal(ptr, size);
+@@ -1232,6 +1276,7 @@ static void test_seal_mmap_expand(bool s
+ void *ret2;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+ /* ummap last 4 pages. */
+ ret = sys_munmap(ptr + 8 * page_size, 4 * page_size);
+ FAIL_TEST_IF_FALSE(!ret);
+@@ -1262,6 +1307,7 @@ static void test_seal_mmap_shrink(bool s
+ void *ret2;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ if (seal) {
+ ret = sys_mseal(ptr, size);
+@@ -1290,7 +1336,9 @@ static void test_seal_mremap_shrink_fixe
+ void *ret2;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+ setup_single_address(size, &newAddr);
++ FAIL_TEST_IF_FALSE(newAddr != (void *)-1);
+
+ if (seal) {
+ ret = sys_mseal(ptr, size);
+@@ -1319,7 +1367,9 @@ static void test_seal_mremap_expand_fixe
+ void *ret2;
+
+ setup_single_address(page_size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+ setup_single_address(size, &newAddr);
++ FAIL_TEST_IF_FALSE(newAddr != (void *)-1);
+
+ if (seal) {
+ ret = sys_mseal(newAddr, size);
+@@ -1348,7 +1398,9 @@ static void test_seal_mremap_move_fixed(
+ void *ret2;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+ setup_single_address(size, &newAddr);
++ FAIL_TEST_IF_FALSE(newAddr != (void *)-1);
+
+ if (seal) {
+ ret = sys_mseal(newAddr, size);
+@@ -1375,6 +1427,7 @@ static void test_seal_mremap_move_fixed_
+ void *ret2;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ if (seal) {
+ ret = sys_mseal(ptr, size);
+@@ -1406,6 +1459,7 @@ static void test_seal_mremap_move_dontun
+ void *ret2;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ if (seal) {
+ ret = sys_mseal(ptr, size);
+@@ -1434,6 +1488,7 @@ static void test_seal_mremap_move_dontun
+ void *ret2;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ if (seal) {
+ ret = sys_mseal(ptr, size);
+@@ -1469,6 +1524,7 @@ static void test_seal_merge_and_split(vo
+
+ /* (24 RO) */
+ setup_single_address(24 * page_size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ /* use mprotect(NONE) to set out boundary */
+ /* (1 NONE) (22 RO) (1 NONE) */
+@@ -1700,9 +1756,12 @@ static void test_seal_discard_ro_anon(bo
+ int ret;
+
+ setup_single_address(size, &ptr);
++ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+- if (seal)
+- seal_single_address(ptr, size);
++ if (seal) {
++ ret = seal_single_address(ptr, size);
++ FAIL_TEST_IF_FALSE(!ret);
++ }
+
+ ret = sys_madvise(ptr, size, MADV_DONTNEED);
+ if (seal)
+@@ -1832,5 +1891,4 @@ int main(int argc, char **argv)
+ test_seal_discard_ro_anon_on_pkey(true);
+
+ ksft_finished();
+- return 0;
+ }
+--- a/tools/testing/selftests/mm/seal_elf.c~selftest-mm-mseal-read-only-elf-memory-segment-fix
++++ a/tools/testing/selftests/mm/seal_elf.c
+@@ -12,9 +12,7 @@
+ #include <errno.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+-#include <assert.h>
+ #include <fcntl.h>
+-#include <assert.h>
+ #include <sys/ioctl.h>
+ #include <sys/vfs.h>
+ #include <sys/stat.h>
+@@ -179,5 +177,4 @@ int main(int argc, char **argv)
+ test_seal_elf();
+
+ ksft_finished();
+- return 0;
+ }
+_
diff --git a/patches/selftests-harness-remove-use-of-line_max-fix-fix-fix.patch b/patches/selftests-harness-remove-use-of-line_max-fix-fix-fix.patch
new file mode 100644
index 000000000..fd1210798
--- /dev/null
+++ b/patches/selftests-harness-remove-use-of-line_max-fix-fix-fix.patch
@@ -0,0 +1,59 @@
+From: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Subject: selftests: mm: fix undeclared function error
+Date: Wed, 17 Apr 2024 12:55:30 +0500
+
+Fix the error reported by clang:
+
+In file included from mdwe_test.c:17:
+./../kselftest_harness.h:1169:2: error: call to undeclared function
+'asprintf'; ISO C99 and later do not support implicit function
+declarations [-Wimplicit-function-declaration]
+ 1169 | asprintf(&test_name, "%s%s%s.%s", f->name,
+ | ^
+1 warning generated.
+
+The gcc reports it as warning:
+
+In file included from mdwe_test.c:17:
+../kselftest_harness.h: In function `__run_test':
+../kselftest_harness.h:1169:9: warning: implicit declaration of function
+`asprintf'; did you mean `vsprintf'? [-Wimplicit-function-declaration]
+ 1169 | asprintf(&test_name, "%s%s%s.%s", f->name,
+ | ^~~~~~~~
+ | vsprintf
+
+Fix this by setting _GNU_SOURCE macro needed to get exposure to the
+asprintf().
+
+Link: https://lkml.kernel.org/r/20240417075530.3807625-1-usama.anjum@collabora.com
+Signed-off-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: Bill Wendling <morbo@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Edward Liaw <edliaw@google.com>
+Cc: Justin Stitt <justinstitt@google.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: "Mike Rapoport (IBM)" <rppt@kernel.org>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Will Drewry <wad@chromium.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ tools/testing/selftests/mm/mdwe_test.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/testing/selftests/mm/mdwe_test.c~selftests-harness-remove-use-of-line_max-fix-fix-fix
++++ a/tools/testing/selftests/mm/mdwe_test.c
+@@ -7,6 +7,7 @@
+ #include <linux/mman.h>
+ #include <linux/prctl.h>
+
++#define _GNU_SOURCE
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <sys/auxv.h>
+_
diff --git a/pc/cpumask-delete-unused-reset_cpu_possible_mask.pc b/pc/cpumask-delete-unused-reset_cpu_possible_mask.pc
new file mode 100644
index 000000000..820986888
--- /dev/null
+++ b/pc/cpumask-delete-unused-reset_cpu_possible_mask.pc
@@ -0,0 +1 @@
+include/linux/cpumask.h
diff --git a/pc/devel-series b/pc/devel-series
index 79b54871f..2d92c6eea 100644
--- a/pc/devel-series
+++ b/pc/devel-series
@@ -98,15 +98,21 @@ mm-hotfixes-stable.patch
###
##nilfs2-fix-oob-in-nilfs_set_de_type.patch
###
-##bootconfig-use-memblock_free_late-to-free-xbc-memory-to-buddy.patch
+#
+bootconfig-use-memblock_free_late-to-free-xbc-memory-to-buddy.patch
bootconfig-use-memblock_free_late-to-free-xbc-memory-to-buddy-fix.patch
#
selftests-harness-remove-use-of-line_max.patch
selftests-harness-remove-use-of-line_max-fix.patch
selftests-harness-remove-use-of-line_max-fix-fix.patch
+selftests-harness-remove-use-of-line_max-fix-fix-fix.patch
#
selftests-mm-fix-unused-and-uninitialized-variable-warning.patch
#
+null-pointer-dereference-while-shrinking-zswap.patch
+#
+mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.patch
+#
### hfe
#
#ENDBRANCH mm-hotfixes-unstable
@@ -185,6 +191,7 @@ lib-add-allocation-tagging-support-for-memory-allocation-profiling.patch
lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix.patch
lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-2.patch
lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-3.patch
+lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.patch
lib-introduce-support-for-page-allocation-tagging.patch
lib-introduce-early-boot-parameter-to-avoid-page_ext-memory-overhead.patch
mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags.patch
@@ -226,6 +233,7 @@ memprofiling-documentation.patch
mm-change-inlined-allocation-helpers-to-account-at-the-call-site.patch
#
mm-always-initialise-folio-_deferred_list.patch
+mm-always-initialise-folio-_deferred_list-fix.patch
mm-create-folio_flag_false-and-folio_type_ops-macros.patch
mm-remove-folio_prep_large_rmappable.patch
mm-support-page_mapcount-on-page_has_type-pages.patch
@@ -603,6 +611,7 @@ mseal-add-mseal-syscall.patch
selftest-mm-mseal-memory-sealing.patch
mseal-add-documentation.patch
selftest-mm-mseal-read-only-elf-memory-segment.patch
+selftest-mm-mseal-read-only-elf-memory-segment-fix.patch
#
userfaultfd-remove-write_once-when-setting-folio-index-during-uffdio_move.patch
#
@@ -624,12 +633,16 @@ buffer-fix-__bread-and-__bread_gfp-kernel-doc.patch
buffer-add-kernel-doc-for-brelse-and-__brelse.patch
buffer-add-kernel-doc-for-bforget-and-__bforget.patch
buffer-improve-bdev_getblk-documentation.patch
-#doc-split-bufferrst-out-of-api-summaryrst.patch: https://lkml.kernel.org/r/5b1938bc-e675-4f1c-810b-dd91f6915f1d@infradead.org
doc-split-bufferrst-out-of-api-summaryrst.patch
+doc-split-bufferrst-out-of-api-summaryrst-fix.patch
#
mm-sparse-guard-the-size-of-mem_section-is-power-of-2.patch
#
-#mm-page_table_check-support-userfault-wr-protect-entries.patch: https://lkml.kernel.org/r/CA+CK2bCSs8om+7tO_Sq2fAUD+gzD_4unUXMtO9oRUB+=4biv-Q@mail.gmail.com
+fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.patch
+fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.patch
+#
+mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.patch
+#
mm-page_table_check-support-userfault-wr-protect-entries.patch
#
#
@@ -650,6 +663,7 @@ mm-page_table_check-support-userfault-wr-protect-entries.patch
#
#
#
+#
#filemap-replace-pte_offset_map-with-pte_offset_map_nolock.patch: move to hotfixes?
filemap-replace-pte_offset_map-with-pte_offset_map_nolock.patch
#
@@ -764,6 +778,10 @@ nilfs2-add-kernel-doc-comments-to-nilfs_remove_all_gcinodes.patch
#
loongarch-tlb-fix-error-parameter-ptep-set-but-not-used-due-to-__tlb_remove_tlb_entry.patch
#
+s390-vmlogrdr-remove-function-pointer-cast.patch
+s390-smsgiucv_app-remove-function-pointer-cast.patch
+s390-netiucv-remove-function-pointer-cast.patch
+#
#kbuild-turn-on-wextra-by-default.patch+N: late merge
kbuild-turn-on-wextra-by-default.patch
kbuild-remove-redundant-extra-warning-flags.patch
@@ -778,4 +796,6 @@ mux-remove-usage-of-the-deprecated-ida_simple_xx-api.patch
#
selftests-exec-make-binaries-position-independent.patch
#
+cpumask-delete-unused-reset_cpu_possible_mask.patch
+#
#ENDBRANCH mm-nonmm-unstable
diff --git a/pc/doc-split-bufferrst-out-of-api-summaryrst-fix.pc b/pc/doc-split-bufferrst-out-of-api-summaryrst-fix.pc
new file mode 100644
index 000000000..b3d01a4ea
--- /dev/null
+++ b/pc/doc-split-bufferrst-out-of-api-summaryrst-fix.pc
@@ -0,0 +1 @@
+Documentation/filesystems/buffer.rst
diff --git a/pc/fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.pc b/pc/fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.pc
new file mode 100644
index 000000000..d60452b01
--- /dev/null
+++ b/pc/fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.pc
@@ -0,0 +1 @@
+fs/proc/task_mmu.c
diff --git a/pc/fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.pc b/pc/fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.pc
new file mode 100644
index 000000000..d60452b01
--- /dev/null
+++ b/pc/fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.pc
@@ -0,0 +1 @@
+fs/proc/task_mmu.c
diff --git a/pc/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.pc b/pc/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.pc
new file mode 100644
index 000000000..4aea30396
--- /dev/null
+++ b/pc/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.pc
@@ -0,0 +1 @@
+lib/alloc_tag.c
diff --git a/pc/mm-always-initialise-folio-_deferred_list-fix.pc b/pc/mm-always-initialise-folio-_deferred_list-fix.pc
new file mode 100644
index 000000000..ba4010b8e
--- /dev/null
+++ b/pc/mm-always-initialise-folio-_deferred_list-fix.pc
@@ -0,0 +1 @@
+mm/memcontrol.c
diff --git a/pc/mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.pc b/pc/mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.pc
new file mode 100644
index 000000000..84957965a
--- /dev/null
+++ b/pc/mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.pc
@@ -0,0 +1 @@
+mm/hugetlb_cgroup.c
diff --git a/pc/mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.pc b/pc/mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.pc
new file mode 100644
index 000000000..6dc98425d
--- /dev/null
+++ b/pc/mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.pc
@@ -0,0 +1 @@
+mm/hugetlb.c
diff --git a/pc/mm-page_table_check-support-userfault-wr-protect-entries.pc b/pc/mm-page_table_check-support-userfault-wr-protect-entries.pc
index 5f28139d0..fe8823d08 100644
--- a/pc/mm-page_table_check-support-userfault-wr-protect-entries.pc
+++ b/pc/mm-page_table_check-support-userfault-wr-protect-entries.pc
@@ -1,2 +1,3 @@
arch/x86/include/asm/pgtable.h
+Documentation/mm/page_table_check.rst
mm/page_table_check.c
diff --git a/pc/null-pointer-dereference-while-shrinking-zswap.pc b/pc/null-pointer-dereference-while-shrinking-zswap.pc
new file mode 100644
index 000000000..7f1f05d5c
--- /dev/null
+++ b/pc/null-pointer-dereference-while-shrinking-zswap.pc
@@ -0,0 +1 @@
+mm/zswap.c
diff --git a/pc/s390-netiucv-remove-function-pointer-cast.pc b/pc/s390-netiucv-remove-function-pointer-cast.pc
new file mode 100644
index 000000000..d8230ab93
--- /dev/null
+++ b/pc/s390-netiucv-remove-function-pointer-cast.pc
@@ -0,0 +1 @@
+drivers/s390/net/netiucv.c
diff --git a/pc/s390-smsgiucv_app-remove-function-pointer-cast.pc b/pc/s390-smsgiucv_app-remove-function-pointer-cast.pc
new file mode 100644
index 000000000..c31021910
--- /dev/null
+++ b/pc/s390-smsgiucv_app-remove-function-pointer-cast.pc
@@ -0,0 +1 @@
+drivers/s390/net/smsgiucv_app.c
diff --git a/pc/s390-vmlogrdr-remove-function-pointer-cast.pc b/pc/s390-vmlogrdr-remove-function-pointer-cast.pc
new file mode 100644
index 000000000..876169ab6
--- /dev/null
+++ b/pc/s390-vmlogrdr-remove-function-pointer-cast.pc
@@ -0,0 +1 @@
+drivers/s390/char/vmlogrdr.c
diff --git a/pc/selftest-mm-mseal-read-only-elf-memory-segment-fix.pc b/pc/selftest-mm-mseal-read-only-elf-memory-segment-fix.pc
new file mode 100644
index 000000000..44ce41054
--- /dev/null
+++ b/pc/selftest-mm-mseal-read-only-elf-memory-segment-fix.pc
@@ -0,0 +1,2 @@
+tools/testing/selftests/mm/mseal_test.c
+tools/testing/selftests/mm/seal_elf.c
diff --git a/pc/selftests-harness-remove-use-of-line_max-fix-fix-fix.pc b/pc/selftests-harness-remove-use-of-line_max-fix-fix-fix.pc
new file mode 100644
index 000000000..4ccc3f9da
--- /dev/null
+++ b/pc/selftests-harness-remove-use-of-line_max-fix-fix-fix.pc
@@ -0,0 +1 @@
+tools/testing/selftests/mm/mdwe_test.c
diff --git a/txt/buffer-add-kernel-doc-for-bforget-and-__bforget.txt b/txt/buffer-add-kernel-doc-for-bforget-and-__bforget.txt
index 1fe20c458..b2da039fc 100644
--- a/txt/buffer-add-kernel-doc-for-bforget-and-__bforget.txt
+++ b/txt/buffer-add-kernel-doc-for-bforget-and-__bforget.txt
@@ -6,4 +6,5 @@ Distinguish these functions from brelse() and __brelse().
Link: https://lkml.kernel.org/r/20240416031754.4076917-7-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Tested-by: Randy Dunlap <rdunlap@infradead.org>
Cc: Pankaj Raghav <p.raghav@samsung.com>
diff --git a/txt/buffer-add-kernel-doc-for-block_dirty_folio.txt b/txt/buffer-add-kernel-doc-for-block_dirty_folio.txt
index 605d33fd8..8a0cc7064 100644
--- a/txt/buffer-add-kernel-doc-for-block_dirty_folio.txt
+++ b/txt/buffer-add-kernel-doc-for-block_dirty_folio.txt
@@ -8,3 +8,4 @@ Replace 'page' with 'folio' and make a few other minor updates.
Link: https://lkml.kernel.org/r/20240416031754.4076917-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Pankaj Raghav <p.raghav@samsung.com>
+Tested-by: Randy Dunlap <rdunlap@infradead.org>
diff --git a/txt/buffer-add-kernel-doc-for-brelse-and-__brelse.txt b/txt/buffer-add-kernel-doc-for-brelse-and-__brelse.txt
index 874edf9d9..70653693d 100644
--- a/txt/buffer-add-kernel-doc-for-brelse-and-__brelse.txt
+++ b/txt/buffer-add-kernel-doc-for-brelse-and-__brelse.txt
@@ -7,4 +7,5 @@ and update it from talking about pages to folios.
Link: https://lkml.kernel.org/r/20240416031754.4076917-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Tested-by: Randy Dunlap <rdunlap@infradead.org>
Cc: Pankaj Raghav <p.raghav@samsung.com>
diff --git a/txt/buffer-add-kernel-doc-for-try_to_free_buffers.txt b/txt/buffer-add-kernel-doc-for-try_to_free_buffers.txt
index 15ad08925..215ae2d2c 100644
--- a/txt/buffer-add-kernel-doc-for-try_to_free_buffers.txt
+++ b/txt/buffer-add-kernel-doc-for-try_to_free_buffers.txt
@@ -10,3 +10,4 @@ less about how it does it.
Link: https://lkml.kernel.org/r/20240416031754.4076917-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Pankaj Raghav <p.raghav@samsung.com>
+Tested-by: Randy Dunlap <rdunlap@infradead.org>
diff --git a/txt/buffer-fix-__bread-and-__bread_gfp-kernel-doc.txt b/txt/buffer-fix-__bread-and-__bread_gfp-kernel-doc.txt
index 6de1c8737..ad1164b95 100644
--- a/txt/buffer-fix-__bread-and-__bread_gfp-kernel-doc.txt
+++ b/txt/buffer-fix-__bread-and-__bread_gfp-kernel-doc.txt
@@ -13,3 +13,4 @@ Link: https://lkml.kernel.org/r/20240416031754.4076917-5-willy@infradead.org
Co-developed-by: Pankaj Raghav <p.raghav@samsung.com>
Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Tested-by: Randy Dunlap <rdunlap@infradead.org>
diff --git a/txt/cpumask-delete-unused-reset_cpu_possible_mask.txt b/txt/cpumask-delete-unused-reset_cpu_possible_mask.txt
new file mode 100644
index 000000000..b766d670e
--- /dev/null
+++ b/txt/cpumask-delete-unused-reset_cpu_possible_mask.txt
@@ -0,0 +1,8 @@
+From: Alexey Dobriyan <adobriyan@gmail.com>
+Subject: cpumask: delete unused reset_cpu_possible_mask()
+Date: Wed, 17 Apr 2024 23:11:23 +0300
+
+Link: https://lkml.kernel.org/r/20240417201123.2961-1-adobriyan@gmail.com
+Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
+Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Cc: Yury Norov <yury.norov@gmail.com>
diff --git a/txt/doc-split-bufferrst-out-of-api-summaryrst-fix.txt b/txt/doc-split-bufferrst-out-of-api-summaryrst-fix.txt
new file mode 100644
index 000000000..0c5f2ad9d
--- /dev/null
+++ b/txt/doc-split-bufferrst-out-of-api-summaryrst-fix.txt
@@ -0,0 +1,12 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: doc-split-bufferrst-out-of-api-summaryrst-fix
+Date: Wed, 17 Apr 2024 02:57:46 +0100
+
+fix kerneldoc warning
+
+Documentation/filesystems/index.rst:50: WARNING: toctree contains reference to nonexisting document 'filesystems/buffer'
+
+Link: https://lkml.kernel.org/r/20240417015933.453505-1-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reported-by: Randy Dunlap <rdunlap@infradead.org>
+Tested-by: Randy Dunlap <rdunlap@infradead.org>
diff --git a/txt/doc-split-bufferrst-out-of-api-summaryrst.txt b/txt/doc-split-bufferrst-out-of-api-summaryrst.txt
index 470af6301..e74923a19 100644
--- a/txt/doc-split-bufferrst-out-of-api-summaryrst.txt
+++ b/txt/doc-split-bufferrst-out-of-api-summaryrst.txt
@@ -10,4 +10,5 @@ enthusiasm for documenting it is limited.
Link: https://lkml.kernel.org/r/20240416031754.4076917-9-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Tested-by: Randy Dunlap <rdunlap@infradead.org>
Cc: Pankaj Raghav <p.raghav@samsung.com>
diff --git a/txt/fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.txt b/txt/fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.txt
new file mode 100644
index 000000000..7fdb6e7ad
--- /dev/null
+++ b/txt/fs-proc-task_mmu-convert-pagemap_hugetlb_range-to-work-on-folios.txt
@@ -0,0 +1,27 @@
+From: David Hildenbrand <david@redhat.com>
+Subject: fs/proc/task_mmu: convert pagemap_hugetlb_range() to work on folios
+Date: Wed, 17 Apr 2024 11:23:12 +0200
+
+Patch series "fs/proc/task_mmu: convert hugetlb functions to work on folis".
+
+Let's convert two more functions, getting rid of two more page_mapcount()
+calls.
+
+
+This patch (of 2):
+
+Let's get rid of another page_mapcount() check and simply use
+folio_likely_mapped_shared(), which is precise for hugetlb folios.
+
+While at it, also check for PMD table sharing, like we do in
+smaps_hugetlb_range().
+
+No functional change intended, except that we would now detect hugetlb
+folios shared via PMD table sharing correctly.
+
+Link: https://lkml.kernel.org/r/20240417092313.753919-1-david@redhat.com
+Link: https://lkml.kernel.org/r/20240417092313.753919-2-david@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Cc: Muchun Song <muchun.song@linux.dev>
+
diff --git a/txt/fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.txt b/txt/fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.txt
new file mode 100644
index 000000000..9b173064d
--- /dev/null
+++ b/txt/fs-proc-task_mmu-convert-smaps_hugetlb_range-to-work-on-folios.txt
@@ -0,0 +1,16 @@
+From: David Hildenbrand <david@redhat.com>
+Subject: fs/proc/task_mmu: convert smaps_hugetlb_range() to work on folios
+Date: Wed, 17 Apr 2024 11:23:13 +0200
+
+Let's get rid of another page_mapcount() check and simply use
+folio_likely_mapped_shared(), which is precise for hugetlb folios.
+
+While at it, use huge_ptep_get() + pte_page() instead of ptep_get() +
+vm_normal_page(), just like we do in pagemap_hugetlb_range().
+
+No functional change intended.
+
+Link: https://lkml.kernel.org/r/20240417092313.753919-3-david@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Cc: Muchun Song <muchun.song@linux.dev>
diff --git a/txt/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.txt b/txt/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.txt
new file mode 100644
index 000000000..1bda9cd54
--- /dev/null
+++ b/txt/lib-add-allocation-tagging-support-for-memory-allocation-profiling-fix-4.txt
@@ -0,0 +1,14 @@
+From: Suren Baghdasaryan <surenb@google.com>
+Subject: lib: fix alloc_tag_init() to prevent passing NULL to PTR_ERR()
+Date: Tue, 16 Apr 2024 17:33:49 -0700
+
+codetag_register_type() never returns NULL, yet IS_ERR_OR_NULL() is used
+to check its return value. This leads to a warning about possibility of
+passing NULL to PTR_ERR(). Fix that by using IS_ERR() to exclude NULL.
+
+Link: https://lkml.kernel.org/r/20240417003349.2520094-1-surenb@google.com
+Fixes: 6e8a230a6b1a ("lib: add allocation tagging support for memory allocation profiling")
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lore.kernel.org/r/202404051340.7Wo7oiJ5-lkp@intel.com/
diff --git a/txt/mm-always-initialise-folio-_deferred_list-fix.txt b/txt/mm-always-initialise-folio-_deferred_list-fix.txt
new file mode 100644
index 000000000..8f8fc4b1c
--- /dev/null
+++ b/txt/mm-always-initialise-folio-_deferred_list-fix.txt
@@ -0,0 +1,15 @@
+From: Peter Xu <peterx@redhat.com>
+Subject: fixup! mm: always initialise folio->_deferred_list
+Date: Wed, 17 Apr 2024 17:18:34 -0400
+
+Current mm-unstable will hit this when running test_hugetlb_memcg. This
+fixes the crash for me.
+
+Link: https://lkml.kernel.org/r/20240417211836.2742593-2-peterx@redhat.com
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Vlastimil Babka <vbabka@suse.cz>
diff --git a/txt/mm-always-initialise-folio-_deferred_list.txt b/txt/mm-always-initialise-folio-_deferred_list.txt
index 29a6ac64d..450b78c2c 100644
--- a/txt/mm-always-initialise-folio-_deferred_list.txt
+++ b/txt/mm-always-initialise-folio-_deferred_list.txt
@@ -31,3 +31,4 @@ Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
diff --git a/txt/mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.txt b/txt/mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.txt
new file mode 100644
index 000000000..9d6abe706
--- /dev/null
+++ b/txt/mm-hugetlb-assert-hugetlb_lock-in-__hugetlb_cgroup_commit_charge.txt
@@ -0,0 +1,14 @@
+From: Peter Xu <peterx@redhat.com>
+Subject: mm/hugetlb: assert hugetlb_lock in __hugetlb_cgroup_commit_charge
+Date: Wed, 17 Apr 2024 17:18:36 -0400
+
+This is similar to __hugetlb_cgroup_uncharge_folio() where it relies on
+holding hugetlb_lock. Add the similar assertion like the other one, since
+it looks like such things may help some day.
+
+Link: https://lkml.kernel.org/r/20240417211836.2742593-4-peterx@redhat.com
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Mina Almasry <almasrymina@google.com>
+Cc: Muchun Song <muchun.song@linux.dev>
diff --git a/txt/mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.txt b/txt/mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.txt
new file mode 100644
index 000000000..0de0ba9dc
--- /dev/null
+++ b/txt/mm-hugetlb-fix-missing-hugetlb_lock-for-resv-uncharge.txt
@@ -0,0 +1,24 @@
+From: Peter Xu <peterx@redhat.com>
+Subject: mm/hugetlb: fix missing hugetlb_lock for resv uncharge
+Date: Wed, 17 Apr 2024 17:18:35 -0400
+
+There is a recent report on UFFDIO_COPY over hugetlb:
+
+https://lore.kernel.org/all/000000000000ee06de0616177560@google.com/
+
+350: lockdep_assert_held(&hugetlb_lock);
+
+Should be an issue in hugetlb but triggered in an userfault context, where
+it goes into the unlikely path where two threads modifying the resv map
+together. Mike has a fix in that path for resv uncharge but it looks like
+the locking criteria was overlooked: hugetlb_cgroup_uncharge_folio_rsvd()
+will update the cgroup pointer, so it requires to be called with the lock
+held.
+
+Link: https://lkml.kernel.org/r/20240417211836.2742593-3-peterx@redhat.com
+Fixes: 79aa925bf239 ("hugetlb_cgroup: fix reservation accounting")
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Reported-by: syzbot+4b8077a5fccc61c385a1@syzkaller.appspotmail.com
+Cc: Mina Almasry <almasrymina@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: <stable@vger.kernel.org>
diff --git a/txt/mm-page_table_check-support-userfault-wr-protect-entries.txt b/txt/mm-page_table_check-support-userfault-wr-protect-entries.txt
index fdacaf220..046beb6cb 100644
--- a/txt/mm-page_table_check-support-userfault-wr-protect-entries.txt
+++ b/txt/mm-page_table_check-support-userfault-wr-protect-entries.txt
@@ -1,9 +1,9 @@
From: Peter Xu <peterx@redhat.com>
Subject: mm/page_table_check: support userfault wr-protect entries
-Date: Mon, 15 Apr 2024 16:52:59 -0400
+Date: Wed, 17 Apr 2024 17:25:49 -0400
Allow page_table_check hooks to check over userfaultfd wr-protect criteria
-upon pgtable updates. The rule is no co-existence allowed for any
+upon pgtable updates. The rule is no co-existance allowed for any
writable flag against userfault wr-protect flag.
This should be better than c2da319c2e, where we used to only sanitize such
@@ -46,7 +46,7 @@ better now.
[1] https://lore.kernel.org/all/000000000000dce0530615c89210@google.com/
-Link: https://lkml.kernel.org/r/20240415205259.2535077-1-peterx@redhat.com
+Link: https://lkml.kernel.org/r/20240417212549.2766883-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
diff --git a/txt/null-pointer-dereference-while-shrinking-zswap.txt b/txt/null-pointer-dereference-while-shrinking-zswap.txt
new file mode 100644
index 000000000..f437585b2
--- /dev/null
+++ b/txt/null-pointer-dereference-while-shrinking-zswap.txt
@@ -0,0 +1,16 @@
+From: Johannes Weiner <hannes@cmpxchg.org>
+Subject: Re: Null pointer dereference while shrinking zswap
+Date: Wed, 17 Apr 2024 10:33:24 -0400
+
+temp version of this fix, for testing
+
+Link: https://lkml.kernel.org/r/20240417143324.GA1055428@cmpxchg.org
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Tested-by: Richard W.M. Jones <rjones@redhat.com>
+Tested-By: Christian Heusel <christian@heusel.eu>
+Cc: Chengming Zhou <chengming.zhou@linux.dev>
+Cc: Dan Streetman <ddstreet@ieee.org>
+Cc: Nhat Pham <nphamcs@gmail.com>
+Cc: Seth Jennings <sjenning@redhat.com>
+Cc: Vitaly Wool <vitaly.wool@konsulko.com>
+Cc: Yosry Ahmed <yosryahmed@google.com>
diff --git a/txt/old/mm-page_table_check-support-userfault-wr-protect-entries.txt b/txt/old/mm-page_table_check-support-userfault-wr-protect-entries.txt
new file mode 100644
index 000000000..fdacaf220
--- /dev/null
+++ b/txt/old/mm-page_table_check-support-userfault-wr-protect-entries.txt
@@ -0,0 +1,54 @@
+From: Peter Xu <peterx@redhat.com>
+Subject: mm/page_table_check: support userfault wr-protect entries
+Date: Mon, 15 Apr 2024 16:52:59 -0400
+
+Allow page_table_check hooks to check over userfaultfd wr-protect criteria
+upon pgtable updates. The rule is no co-existence allowed for any
+writable flag against userfault wr-protect flag.
+
+This should be better than c2da319c2e, where we used to only sanitize such
+issues during a pgtable walk, but when hitting such issue we don't have a
+good chance to know where does that writable bit came from [1], so that
+even the pgtable walk exposes a kernel bug (which is still helpful on
+triaging) but not easy to track and debug.
+
+Now we switch to track the source. It's much easier too with the recent
+introduction of page table check.
+
+There are some limitations with using the page table check here for
+userfaultfd wr-protect purpose:
+
+ - It is only enabled with explicit enablement of page table check configs
+ and/or boot parameters, but should be good enough to track at least
+ syzbot issues, as syzbot should enable PAGE_TABLE_CHECK[_ENFORCED] for
+ x86 [1]. We used to have DEBUG_VM but it's now off for most distros,
+ while distros also normally not enable PAGE_TABLE_CHECK[_ENFORCED], which
+ is similar.
+
+ - It conditionally works with the ptep_modify_prot API. It will be
+ bypassed when e.g. XEN PV is enabled, however still work for most of the
+ rest scenarios, which should be the common cases so should be good
+ enough.
+
+ - Hugetlb check is a bit hairy, as the page table check cannot identify
+ hugetlb pte or normal pte via trapping at set_pte_at(), because of the
+ current design where hugetlb maps every layers to pte_t... For example,
+ the default set_huge_pte_at() can invoke set_pte_at() directly and lose
+ the hugetlb context, treating it the same as a normal pte_t. So far it's
+ fine because we have huge_pte_uffd_wp() always equals to pte_uffd_wp() as
+ long as supported (x86 only). It'll be a bigger problem when we'll
+ define _PAGE_UFFD_WP differently at various pgtable levels, because then
+ one huge_pte_uffd_wp() per-arch will stop making sense first.. as of now
+ we can leave this for later too.
+
+This patch also removes commit c2da319c2e altogether, as we have something
+better now.
+
+[1] https://lore.kernel.org/all/000000000000dce0530615c89210@google.com/
+
+Link: https://lkml.kernel.org/r/20240415205259.2535077-1-peterx@redhat.com
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Nadav Amit <nadav.amit@gmail.com>
diff --git a/txt/s390-netiucv-remove-function-pointer-cast.txt b/txt/s390-netiucv-remove-function-pointer-cast.txt
new file mode 100644
index 000000000..e407d4d6c
--- /dev/null
+++ b/txt/s390-netiucv-remove-function-pointer-cast.txt
@@ -0,0 +1,26 @@
+From: Nathan Chancellor <nathan@kernel.org>
+Subject: s390/netiucv: remove function pointer cast
+Date: Wed, 17 Apr 2024 11:24:37 -0700
+
+Clang warns (or errors with CONFIG_WERROR) after enabling
+-Wcast-function-type-strict by default:
+
+ drivers/s390/net/netiucv.c:1716:18: error: cast from 'void (*)(const void *)' to 'void (*)(struct device *)' converts to incompatible function type [-Werror,-Wcast-function-type-strict]
+ 1716 | dev->release = (void (*)(struct device *))kfree;
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 1 error generated.
+
+Add a standalone function to fix the warning properly, which addresses
+the root of the warning that these casts are not safe for kCFI. The
+comment is not really relevant after this change, so remove it.
+
+Link: https://lkml.kernel.org/r/20240417-s390-drivers-fix-cast-function-type-v1-3-fd048c9903b0@kernel.org
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Cc: Alexander Gordeev <agordeev@linux.ibm.com>
+Cc: Alexandra Winter <wintera@linux.ibm.com>
+Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Sven Schnelle <svens@linux.ibm.com>
+Cc: Thorsten Winkler <twinkler@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
diff --git a/txt/s390-smsgiucv_app-remove-function-pointer-cast.txt b/txt/s390-smsgiucv_app-remove-function-pointer-cast.txt
new file mode 100644
index 000000000..79ba19e54
--- /dev/null
+++ b/txt/s390-smsgiucv_app-remove-function-pointer-cast.txt
@@ -0,0 +1,25 @@
+From: Nathan Chancellor <nathan@kernel.org>
+Subject: s390/smsgiucv_app: remove function pointer cast
+Date: Wed, 17 Apr 2024 11:24:36 -0700
+
+Clang warns (or errors with CONFIG_WERROR) after enabling
+-Wcast-function-type-strict by default:
+
+ drivers/s390/net/smsgiucv_app.c:176:26: error: cast from 'void (*)(const void *)' to 'void (*)(struct device *)' converts to incompatible function type [-Werror,-Wcast-function-type-strict]
+ 176 | smsg_app_dev->release = (void (*)(struct device *)) kfree;
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 1 error generated.
+
+Add a standalone function to fix the warning properly, which addresses
+the root of the warning that these casts are not safe for kCFI.
+
+Link: https://lkml.kernel.org/r/20240417-s390-drivers-fix-cast-function-type-v1-2-fd048c9903b0@kernel.org
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Cc: Alexander Gordeev <agordeev@linux.ibm.com>
+Cc: Alexandra Winter <wintera@linux.ibm.com>
+Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Sven Schnelle <svens@linux.ibm.com>
+Cc: Thorsten Winkler <twinkler@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
diff --git a/txt/s390-vmlogrdr-remove-function-pointer-cast.txt b/txt/s390-vmlogrdr-remove-function-pointer-cast.txt
new file mode 100644
index 000000000..414408fdb
--- /dev/null
+++ b/txt/s390-vmlogrdr-remove-function-pointer-cast.txt
@@ -0,0 +1,36 @@
+From: Nathan Chancellor <nathan@kernel.org>
+Subject: s390/vmlogrdr: remove function pointer cast
+Date: Wed, 17 Apr 2024 11:24:35 -0700
+
+Patch series "drivers/s390: Fix instances of -Wcast-function-type-strict".
+
+This series resolves the instances of -Wcast-function-type-strict that
+show up in my s390 builds on -next, which has this warning enabled by
+default.
+
+
+This patch (of 3):
+
+Clang warns (or errors with CONFIG_WERROR) after enabling
+-Wcast-function-type-strict by default:
+
+ drivers/s390/char/vmlogrdr.c:746:18: error: cast from 'void (*)(const void *)' to 'void (*)(struct device *)' converts to incompatible function type [-Werror,-Wcast-function-type-strict]
+ 746 | dev->release = (void (*)(struct device *))kfree;
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 1 error generated.
+
+Add a standalone function to fix the warning properly, which addresses
+the root of the warning that these casts are not safe for kCFI. The
+comment is not really relevant after this change, so remove it.
+
+Link: https://lkml.kernel.org/r/20240417-s390-drivers-fix-cast-function-type-v1-0-fd048c9903b0@kernel.org
+Link: https://lkml.kernel.org/r/20240417-s390-drivers-fix-cast-function-type-v1-1-fd048c9903b0@kernel.org
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Cc: Alexander Gordeev <agordeev@linux.ibm.com>
+Cc: Alexandra Winter <wintera@linux.ibm.com>
+Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Sven Schnelle <svens@linux.ibm.com>
+Cc: Thorsten Winkler <twinkler@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
diff --git a/txt/selftest-mm-mseal-read-only-elf-memory-segment-fix.txt b/txt/selftest-mm-mseal-read-only-elf-memory-segment-fix.txt
new file mode 100644
index 000000000..62160d73e
--- /dev/null
+++ b/txt/selftest-mm-mseal-read-only-elf-memory-segment-fix.txt
@@ -0,0 +1,11 @@
+From: Jeff Xu <jeffxu@chromium.org>
+Subject: selftest mm/mseal: style change
+Date: Tue, 16 Apr 2024 22:09:44 +0000
+
+remove "assert" from testcase.
+remove "return 0"
+
+Link: https://lkml.kernel.org/r/20240416220944.2481203-2-jeffxu@chromium.org
+Signed-off-by: Jeff Xu <jeffxu@chromium.org>
+Suggested-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Reviewed-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
diff --git a/txt/selftests-harness-remove-use-of-line_max-fix-fix-fix.txt b/txt/selftests-harness-remove-use-of-line_max-fix-fix-fix.txt
new file mode 100644
index 000000000..893886673
--- /dev/null
+++ b/txt/selftests-harness-remove-use-of-line_max-fix-fix-fix.txt
@@ -0,0 +1,42 @@
+From: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Subject: selftests: mm: fix undeclared function error
+Date: Wed, 17 Apr 2024 12:55:30 +0500
+
+Fix the error reported by clang:
+
+In file included from mdwe_test.c:17:
+./../kselftest_harness.h:1169:2: error: call to undeclared function
+'asprintf'; ISO C99 and later do not support implicit function
+declarations [-Wimplicit-function-declaration]
+ 1169 | asprintf(&test_name, "%s%s%s.%s", f->name,
+ | ^
+1 warning generated.
+
+The gcc reports it as warning:
+
+In file included from mdwe_test.c:17:
+../kselftest_harness.h: In function `__run_test':
+../kselftest_harness.h:1169:9: warning: implicit declaration of function
+`asprintf'; did you mean `vsprintf'? [-Wimplicit-function-declaration]
+ 1169 | asprintf(&test_name, "%s%s%s.%s", f->name,
+ | ^~~~~~~~
+ | vsprintf
+
+Fix this by setting _GNU_SOURCE macro needed to get exposure to the
+asprintf().
+
+Link: https://lkml.kernel.org/r/20240417075530.3807625-1-usama.anjum@collabora.com
+Signed-off-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: Bill Wendling <morbo@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Edward Liaw <edliaw@google.com>
+Cc: Justin Stitt <justinstitt@google.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: "Mike Rapoport (IBM)" <rppt@kernel.org>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Will Drewry <wad@chromium.org>
diff --git a/txt/selftests-harness-remove-use-of-line_max-fix-fix.txt b/txt/selftests-harness-remove-use-of-line_max-fix-fix.txt
index c4d3f4006..a577e05cd 100644
--- a/txt/selftests-harness-remove-use-of-line_max-fix-fix.txt
+++ b/txt/selftests-harness-remove-use-of-line_max-fix-fix.txt
@@ -5,3 +5,17 @@ Date: Tue Apr 16 11:39:55 AM PDT 2024
check asprintf() return
Reviewed-by: Edward Liaw <edliaw@google.com>
+Tested-by: Yujie Liu <yujie.liu@intel.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: Bill Wendling <morbo@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Justin Stitt <justinstitt@google.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: "Mike Rapoport (IBM)" <rppt@kernel.org>
+Cc: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Will Drewry <wad@chromium.org>
diff --git a/txt/selftests-harness-remove-use-of-line_max-fix.txt b/txt/selftests-harness-remove-use-of-line_max-fix.txt
index 2189c534d..6cd3723a1 100644
--- a/txt/selftests-harness-remove-use-of-line_max-fix.txt
+++ b/txt/selftests-harness-remove-use-of-line_max-fix.txt
@@ -5,3 +5,16 @@ Date: Fri Apr 12 03:53:51 PM PDT 2024
remove limits.h include, per Edward
Cc: Edward Liaw <edliaw@google.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: Bill Wendling <morbo@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Justin Stitt <justinstitt@google.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: "Mike Rapoport (IBM)" <rppt@kernel.org>
+Cc: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Will Drewry <wad@chromium.org>
diff --git a/txt/selftests-harness-remove-use-of-line_max.txt b/txt/selftests-harness-remove-use-of-line_max.txt
index d44a6733c..5c01fe21e 100644
--- a/txt/selftests-harness-remove-use-of-line_max.txt
+++ b/txt/selftests-harness-remove-use-of-line_max.txt
@@ -26,3 +26,4 @@ Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Will Drewry <wad@chromium.org>
+Cc: Muhammad Usama Anjum <usama.anjum@collabora.com>