summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2024-04-11 14:30:26 -0700
committerAndrew Morton <akpm@linux-foundation.org>2024-04-11 14:30:26 -0700
commit1783541c07397f89f2e84cb2dd860fb95c2c5767 (patch)
tree623b3d59adfdc27329705c48ab37c17d55cab533
parente4ea0ed739f273e3fcca6ee4a1712fe6da601448 (diff)
download25-new-1783541c07397f89f2e84cb2dd860fb95c2c5767.tar.gz
foo
-rw-r--r--patches/maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.patch46
-rw-r--r--patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.patch111
-rw-r--r--patches/mm-ksm-add-folio_set_stable_node.patch47
-rw-r--r--patches/mm-ksm-add-ksm_get_folio.patch146
-rw-r--r--patches/mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.patch401
-rw-r--r--patches/mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.patch140
-rw-r--r--patches/mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.patch54
-rw-r--r--patches/mm-ksm-use-folio-in-remove_rmap_item_from_tree.patch45
-rw-r--r--patches/mm-ksm-use-folio-in-remove_stable_node.patch73
-rw-r--r--patches/mm-ksm-use-folio-in-stable_node_dup.patch80
-rw-r--r--patches/mm-ksm-use-folio-in-write_protect_page.patch103
-rw-r--r--patches/mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.patch43
-rw-r--r--patches/old/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.patch (renamed from patches/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.patch)0
-rw-r--r--patches/old/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch (renamed from patches/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch)0
-rw-r--r--patches/old/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.patch (renamed from patches/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.patch)0
-rw-r--r--patches/old/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.patch (renamed from patches/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.patch)0
-rw-r--r--patches/old/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.patch (renamed from patches/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.patch)0
-rw-r--r--pc/devel-series22
-rw-r--r--pc/maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.pc1
-rw-r--r--pc/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.pc2
-rw-r--r--pc/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.pc3
-rw-r--r--pc/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.pc4
-rw-r--r--pc/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.pc3
-rw-r--r--pc/mm-ksm-add-folio_set_stable_node.pc1
-rw-r--r--pc/mm-ksm-add-ksm_get_folio.pc1
-rw-r--r--pc/mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.pc2
-rw-r--r--pc/mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.pc1
-rw-r--r--pc/mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.pc1
-rw-r--r--pc/mm-ksm-use-folio-in-remove_rmap_item_from_tree.pc1
-rw-r--r--pc/mm-ksm-use-folio-in-remove_stable_node.pc1
-rw-r--r--pc/mm-ksm-use-folio-in-stable_node_dup.pc1
-rw-r--r--pc/mm-ksm-use-folio-in-write_protect_page.pc1
-rw-r--r--pc/mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.pc1
-rw-r--r--pc/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.pc1
-rw-r--r--pc/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.pc6
-rw-r--r--txt/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt2
-rw-r--r--txt/maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.txt16
-rw-r--r--txt/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.txt14
-rw-r--r--txt/mm-ksm-add-folio_set_stable_node.txt16
-rw-r--r--txt/mm-ksm-add-ksm_get_folio.txt35
-rw-r--r--txt/mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.txt18
-rw-r--r--txt/mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.txt16
-rw-r--r--txt/mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.txt16
-rw-r--r--txt/mm-ksm-use-folio-in-remove_rmap_item_from_tree.txt14
-rw-r--r--txt/mm-ksm-use-folio-in-remove_stable_node.txt15
-rw-r--r--txt/mm-ksm-use-folio-in-stable_node_dup.txt14
-rw-r--r--txt/mm-ksm-use-folio-in-write_protect_page.txt15
-rw-r--r--txt/mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.txt14
-rw-r--r--txt/old/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.txt (renamed from txt/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.txt)0
-rw-r--r--txt/old/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.txt (renamed from txt/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.txt)0
-rw-r--r--txt/old/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.txt (renamed from txt/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.txt)0
-rw-r--r--txt/old/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.txt (renamed from txt/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.txt)0
-rw-r--r--txt/old/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.txt (renamed from txt/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.txt)0
53 files changed, 1523 insertions, 24 deletions
diff --git a/patches/maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.patch b/patches/maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.patch
new file mode 100644
index 000000000..bcc47b7b3
--- /dev/null
+++ b/patches/maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.patch
@@ -0,0 +1,46 @@
+From: Lukas Bulwahn <lbulwahn@redhat.com>
+Subject: MAINTAINERS: improve entries in CODE TAGGING and MEMORY ALLOCATION PROFILING
+Date: Thu, 11 Apr 2024 08:47:17 +0200
+
+Commit 5acf2502db99 ("MAINTAINERS: add entries for code tagging and memory
+allocation profiling") adds the two new sections CODE TAGGING and MEMORY
+ALLOCATION PROFILING. The files in these sections however do not match
+with the files added in the corresponding patch series.
+
+Improve the two entries to refer to all files added with that series and
+drop the entries to non-existing files.
+
+Link: https://lkml.kernel.org/r/20240411064717.51140-1-lukas.bulwahn@redhat.com
+Signed-off-by: Lukas Bulwahn <lukas.bulwahn@redhat.com>
+Acked-by: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ MAINTAINERS | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/MAINTAINERS~maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix
++++ a/MAINTAINERS
+@@ -5258,6 +5258,7 @@ CODE TAGGING
+ M: Suren Baghdasaryan <surenb@google.com>
+ M: Kent Overstreet <kent.overstreet@linux.dev>
+ S: Maintained
++F: include/asm-generic/codetag.lds.h
+ F: include/linux/codetag.h
+ F: lib/codetag.c
+
+@@ -14167,10 +14168,10 @@ M: Suren Baghdasaryan <surenb@google.com
+ M: Kent Overstreet <kent.overstreet@linux.dev>
+ L: linux-mm@kvack.org
+ S: Maintained
++F: Documentation/mm/allocation-profiling.rst
+ F: include/linux/alloc_tag.h
+-F: include/linux/codetag_ctx.h
++F: include/linux/pgalloc_tag.h
+ F: lib/alloc_tag.c
+-F: lib/pgalloc_tag.c
+
+ MEMORY CONTROLLER DRIVERS
+ M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+_
diff --git a/patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.patch b/patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.patch
new file mode 100644
index 000000000..f42d33122
--- /dev/null
+++ b/patches/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.patch
@@ -0,0 +1,111 @@
+From: Sidhartha Kumar <sidhartha.kumar@oracle.com>
+Subject: mm/hugetlb: convert dissolve_free_huge_pages() to folios
+Date: Thu, 11 Apr 2024 09:47:56 -0700
+
+Allows us to rename dissolve_free_huge_pages() to
+dissolve_free_hugetlb_folio(). Convert one caller to pass in a folio
+directly and use page_folio() to convert the caller in mm/memory-failure.
+
+Link: https://lkml.kernel.org/r/20240411164756.261178-1-sidhartha.kumar@oracle.com
+Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
+Cc: Jane Chu <jane.chu@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/hugetlb.h | 4 ++--
+ mm/hugetlb.c | 15 +++++++--------
+ mm/memory-failure.c | 4 ++--
+ 3 files changed, 11 insertions(+), 12 deletions(-)
+
+--- a/include/linux/hugetlb.h~mm-hugetlb-convert-dissolve_free_huge_pages-to-folios
++++ a/include/linux/hugetlb.h
+@@ -861,7 +861,7 @@ static inline int hstate_index(struct hs
+ return h - hstates;
+ }
+
+-extern int dissolve_free_huge_page(struct page *page);
++extern int dissolve_free_hugetlb_folio(struct folio *folio);
+ extern int dissolve_free_huge_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
+
+@@ -1148,7 +1148,7 @@ static inline int hstate_index(struct hs
+ return 0;
+ }
+
+-static inline int dissolve_free_huge_page(struct page *page)
++static inline int dissolve_free_hugetlb_folio(struct folio *folio)
+ {
+ return 0;
+ }
+--- a/mm/hugetlb.c~mm-hugetlb-convert-dissolve_free_huge_pages-to-folios
++++ a/mm/hugetlb.c
+@@ -2377,8 +2377,8 @@ static struct folio *remove_pool_hugetlb
+ }
+
+ /*
+- * Dissolve a given free hugepage into free buddy pages. This function does
+- * nothing for in-use hugepages and non-hugepages.
++ * Dissolve a given free hugetlb folio into free buddy pages. This function
++ * does nothing for in-use hugepages and non-hugepages.
+ * This function returns values like below:
+ *
+ * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
+@@ -2390,10 +2390,9 @@ static struct folio *remove_pool_hugetlb
+ * 0: successfully dissolved free hugepages or the page is not a
+ * hugepage (considered as already dissolved)
+ */
+-int dissolve_free_huge_page(struct page *page)
++int dissolve_free_hugetlb_folio(struct folio *folio)
+ {
+ int rc = -EBUSY;
+- struct folio *folio = page_folio(page);
+
+ retry:
+ /* Not to disrupt normal path by vainly holding hugetlb_lock */
+@@ -2470,13 +2469,13 @@ out:
+ * make specified memory blocks removable from the system.
+ * Note that this will dissolve a free gigantic hugepage completely, if any
+ * part of it lies within the given range.
+- * Also note that if dissolve_free_huge_page() returns with an error, all
++ * Also note that if dissolve_free_hugetlb_folio() returns with an error, all
+ * free hugepages that were dissolved before that error are lost.
+ */
+ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+ {
+ unsigned long pfn;
+- struct page *page;
++ struct folio *folio;
+ int rc = 0;
+ unsigned int order;
+ struct hstate *h;
+@@ -2489,8 +2488,8 @@ int dissolve_free_huge_pages(unsigned lo
+ order = min(order, huge_page_order(h));
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
+- page = pfn_to_page(pfn);
+- rc = dissolve_free_huge_page(page);
++ folio = pfn_folio(pfn);
++ rc = dissolve_free_hugetlb_folio(folio);
+ if (rc)
+ break;
+ }
+--- a/mm/memory-failure.c~mm-hugetlb-convert-dissolve_free_huge_pages-to-folios
++++ a/mm/memory-failure.c
+@@ -155,11 +155,11 @@ static int __page_handle_poison(struct p
+
+ /*
+ * zone_pcp_disable() can't be used here. It will hold pcp_batch_high_lock and
+- * dissolve_free_huge_page() might hold cpu_hotplug_lock via static_key_slow_dec()
++ * dissolve_free_hugetlb_folio() might hold cpu_hotplug_lock via static_key_slow_dec()
+ * when hugetlb vmemmap optimization is enabled. This will break current lock
+ * dependency chain and leads to deadlock.
+ */
+- ret = dissolve_free_huge_page(page);
++ ret = dissolve_free_hugetlb_folio(page_folio(page));
+ if (!ret) {
+ drain_all_pages(page_zone(page));
+ ret = take_page_off_buddy(page);
+_
diff --git a/patches/mm-ksm-add-folio_set_stable_node.patch b/patches/mm-ksm-add-folio_set_stable_node.patch
new file mode 100644
index 000000000..07fbba6db
--- /dev/null
+++ b/patches/mm-ksm-add-folio_set_stable_node.patch
@@ -0,0 +1,47 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: add folio_set_stable_node
+Date: Thu, 11 Apr 2024 14:17:04 +0800
+
+Turn set_page_stable_node() into a wrapper folio_set_stable_node, and then
+use it to replace the former. we will merge them together after all place
+converted to folio.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-4-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/ksm.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/mm/ksm.c~mm-ksm-add-folio_set_stable_node
++++ a/mm/ksm.c
+@@ -1109,6 +1109,12 @@ static inline void set_page_stable_node(
+ page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
+ }
+
++static inline void folio_set_stable_node(struct folio *folio,
++ struct ksm_stable_node *stable_node)
++{
++ set_page_stable_node(&folio->page, stable_node);
++}
++
+ #ifdef CONFIG_SYSFS
+ /*
+ * Only called through the sysfs control interface:
+@@ -3241,7 +3247,7 @@ void folio_migrate_ksm(struct folio *new
+ * has gone stale (or that folio_test_swapcache has been cleared).
+ */
+ smp_wmb();
+- set_page_stable_node(&folio->page, NULL);
++ folio_set_stable_node(folio, NULL);
+ }
+ }
+ #endif /* CONFIG_MIGRATION */
+_
diff --git a/patches/mm-ksm-add-ksm_get_folio.patch b/patches/mm-ksm-add-ksm_get_folio.patch
new file mode 100644
index 000000000..ed231be8b
--- /dev/null
+++ b/patches/mm-ksm-add-ksm_get_folio.patch
@@ -0,0 +1,146 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: add ksm_get_folio
+Date: Thu, 11 Apr 2024 14:17:02 +0800
+
+Patch series "transfer page to folio in KSM".
+
+This is the first part of page to folio transfer on KSM. Since only
+single page could be stored in KSM, we could safely transfer stable tree
+pages to folios.
+
+This patchset could reduce ksm.o 57kbytes from 2541776 bytes on latest
+akpm/mm-stable branch with CONFIG_DEBUG_VM enabled. It pass the KSM
+testing in LTP and kernel selftest.
+
+Thanks for Matthew Wilcox and David Hildenbrand's suggestions and
+comments!
+
+
+This patch (of 10):
+
+The ksm only contains single pages, so we could add a new func
+ksm_get_folio for get_ksm_page to use folio instead of pages to save a
+couple of compound_head calls.
+
+After all caller replaced, get_ksm_page will be removed.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-1-alexs@kernel.org
+Link: https://lkml.kernel.org/r/20240411061713.1847574-2-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/ksm.c | 42 +++++++++++++++++++++++++-----------------
+ 1 file changed, 25 insertions(+), 17 deletions(-)
+
+--- a/mm/ksm.c~mm-ksm-add-ksm_get_folio
++++ a/mm/ksm.c
+@@ -897,7 +897,7 @@ enum get_ksm_page_flags {
+ };
+
+ /*
+- * get_ksm_page: checks if the page indicated by the stable node
++ * ksm_get_folio: checks if the page indicated by the stable node
+ * is still its ksm page, despite having held no reference to it.
+ * In which case we can trust the content of the page, and it
+ * returns the gotten page; but if the page has now been zapped,
+@@ -915,10 +915,10 @@ enum get_ksm_page_flags {
+ * a page to put something that might look like our key in page->mapping.
+ * is on its way to being freed; but it is an anomaly to bear in mind.
+ */
+-static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
++static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
+ enum get_ksm_page_flags flags)
+ {
+- struct page *page;
++ struct folio *folio;
+ void *expected_mapping;
+ unsigned long kpfn;
+
+@@ -926,8 +926,8 @@ static struct page *get_ksm_page(struct
+ PAGE_MAPPING_KSM);
+ again:
+ kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
+- page = pfn_to_page(kpfn);
+- if (READ_ONCE(page->mapping) != expected_mapping)
++ folio = pfn_folio(kpfn);
++ if (READ_ONCE(folio->mapping) != expected_mapping)
+ goto stale;
+
+ /*
+@@ -940,41 +940,41 @@ again:
+ * in folio_migrate_mapping(), it might still be our page,
+ * in which case it's essential to keep the node.
+ */
+- while (!get_page_unless_zero(page)) {
++ while (!folio_try_get(folio)) {
+ /*
+ * Another check for page->mapping != expected_mapping would
+ * work here too. We have chosen the !PageSwapCache test to
+ * optimize the common case, when the page is or is about to
+ * be freed: PageSwapCache is cleared (under spin_lock_irq)
+ * in the ref_freeze section of __remove_mapping(); but Anon
+- * page->mapping reset to NULL later, in free_pages_prepare().
++ * folio->mapping reset to NULL later, in free_pages_prepare().
+ */
+- if (!PageSwapCache(page))
++ if (!folio_test_swapcache(folio))
+ goto stale;
+ cpu_relax();
+ }
+
+- if (READ_ONCE(page->mapping) != expected_mapping) {
+- put_page(page);
++ if (READ_ONCE(folio->mapping) != expected_mapping) {
++ folio_put(folio);
+ goto stale;
+ }
+
+ if (flags == GET_KSM_PAGE_TRYLOCK) {
+- if (!trylock_page(page)) {
+- put_page(page);
++ if (!folio_trylock(folio)) {
++ folio_put(folio);
+ return ERR_PTR(-EBUSY);
+ }
+ } else if (flags == GET_KSM_PAGE_LOCK)
+- lock_page(page);
++ folio_lock(folio);
+
+ if (flags != GET_KSM_PAGE_NOLOCK) {
+- if (READ_ONCE(page->mapping) != expected_mapping) {
+- unlock_page(page);
+- put_page(page);
++ if (READ_ONCE(folio->mapping) != expected_mapping) {
++ folio_unlock(folio);
++ folio_put(folio);
+ goto stale;
+ }
+ }
+- return page;
++ return folio;
+
+ stale:
+ /*
+@@ -990,6 +990,14 @@ stale:
+ return NULL;
+ }
+
++static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
++ enum get_ksm_page_flags flags)
++{
++ struct folio *folio = ksm_get_folio(stable_node, flags);
++
++ return &folio->page;
++}
++
+ /*
+ * Removing rmap_item from stable or unstable tree.
+ * This function will clean the information from the stable/unstable tree.
+_
diff --git a/patches/mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.patch b/patches/mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.patch
new file mode 100644
index 000000000..01afa6062
--- /dev/null
+++ b/patches/mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.patch
@@ -0,0 +1,401 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: convert chain series funcs and replace get_ksm_page
+Date: Thu, 11 Apr 2024 14:17:09 +0800
+
+In ksm stable tree all page are single, let's convert them to use and
+folios as well as stable_tree_insert/stable_tree_search funcs. And
+replace get_ksm_page() by ksm_get_folio() since there is no more needs.
+
+It could save a few compound_head calls.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-9-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
+Cc: David Hildenbrand <david@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/ksm.c | 136 +++++++++++++++++++++++--------------------------
+ mm/migrate.c | 2
+ 2 files changed, 66 insertions(+), 72 deletions(-)
+
+--- a/mm/ksm.c~mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page
++++ a/mm/ksm.c
+@@ -990,14 +990,6 @@ stale:
+ return NULL;
+ }
+
+-static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
+- enum get_ksm_page_flags flags)
+-{
+- struct folio *folio = ksm_get_folio(stable_node, flags);
+-
+- return &folio->page;
+-}
+-
+ /*
+ * Removing rmap_item from stable or unstable tree.
+ * This function will clean the information from the stable/unstable tree.
+@@ -1632,10 +1624,10 @@ bool is_page_sharing_candidate(struct ks
+ return __is_page_sharing_candidate(stable_node, 0);
+ }
+
+-static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
+- struct ksm_stable_node **_stable_node,
+- struct rb_root *root,
+- bool prune_stale_stable_nodes)
++static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
++ struct ksm_stable_node **_stable_node,
++ struct rb_root *root,
++ bool prune_stale_stable_nodes)
+ {
+ struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node;
+ struct hlist_node *hlist_safe;
+@@ -1748,7 +1740,7 @@ static struct page *stable_node_dup(stru
+ }
+
+ *_stable_node_dup = found;
+- return &tree_folio->page;
++ return tree_folio;
+ }
+
+ static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node,
+@@ -1765,7 +1757,7 @@ static struct ksm_stable_node *stable_no
+ }
+
+ /*
+- * Like for get_ksm_page, this function can free the *_stable_node and
++ * Like for ksm_get_folio, this function can free the *_stable_node and
+ * *_stable_node_dup if the returned tree_page is NULL.
+ *
+ * It can also free and overwrite *_stable_node with the found
+@@ -1778,16 +1770,16 @@ static struct ksm_stable_node *stable_no
+ * function and will be overwritten in all cases, the caller doesn't
+ * need to initialize it.
+ */
+-static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
+- struct ksm_stable_node **_stable_node,
+- struct rb_root *root,
+- bool prune_stale_stable_nodes)
++static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
++ struct ksm_stable_node **_stable_node,
++ struct rb_root *root,
++ bool prune_stale_stable_nodes)
+ {
+ struct ksm_stable_node *stable_node = *_stable_node;
+ if (!is_stable_node_chain(stable_node)) {
+ if (is_page_sharing_candidate(stable_node)) {
+ *_stable_node_dup = stable_node;
+- return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
++ return ksm_get_folio(stable_node, GET_KSM_PAGE_NOLOCK);
+ }
+ /*
+ * _stable_node_dup set to NULL means the stable_node
+@@ -1800,24 +1792,24 @@ static struct page *__stable_node_chain(
+ prune_stale_stable_nodes);
+ }
+
+-static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d,
+- struct ksm_stable_node **s_n,
+- struct rb_root *root)
++static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d,
++ struct ksm_stable_node **s_n,
++ struct rb_root *root)
+ {
+ return __stable_node_chain(s_n_d, s_n, root, true);
+ }
+
+-static __always_inline struct page *chain(struct ksm_stable_node **s_n_d,
+- struct ksm_stable_node *s_n,
+- struct rb_root *root)
++static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d,
++ struct ksm_stable_node *s_n,
++ struct rb_root *root)
+ {
+ struct ksm_stable_node *old_stable_node = s_n;
+- struct page *tree_page;
++ struct folio *tree_folio;
+
+- tree_page = __stable_node_chain(s_n_d, &s_n, root, false);
++ tree_folio = __stable_node_chain(s_n_d, &s_n, root, false);
+ /* not pruning dups so s_n cannot have changed */
+ VM_BUG_ON(s_n != old_stable_node);
+- return tree_page;
++ return tree_folio;
+ }
+
+ /*
+@@ -1837,28 +1829,30 @@ static struct page *stable_tree_search(s
+ struct rb_node *parent;
+ struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any;
+ struct ksm_stable_node *page_node;
++ struct folio *folio;
+
+- page_node = page_stable_node(page);
++ folio = page_folio(page);
++ page_node = folio_stable_node(folio);
+ if (page_node && page_node->head != &migrate_nodes) {
+ /* ksm page forked */
+- get_page(page);
+- return page;
++ folio_get(folio);
++ return &folio->page;
+ }
+
+- nid = get_kpfn_nid(page_to_pfn(page));
++ nid = get_kpfn_nid(folio_pfn(folio));
+ root = root_stable_tree + nid;
+ again:
+ new = &root->rb_node;
+ parent = NULL;
+
+ while (*new) {
+- struct page *tree_page;
++ struct folio *tree_folio;
+ int ret;
+
+ cond_resched();
+ stable_node = rb_entry(*new, struct ksm_stable_node, node);
+ stable_node_any = NULL;
+- tree_page = chain_prune(&stable_node_dup, &stable_node, root);
++ tree_folio = chain_prune(&stable_node_dup, &stable_node, root);
+ /*
+ * NOTE: stable_node may have been freed by
+ * chain_prune() if the returned stable_node_dup is
+@@ -1892,14 +1886,14 @@ again:
+ * write protected at all times. Any will work
+ * fine to continue the walk.
+ */
+- tree_page = get_ksm_page(stable_node_any,
+- GET_KSM_PAGE_NOLOCK);
++ tree_folio = ksm_get_folio(stable_node_any,
++ GET_KSM_PAGE_NOLOCK);
+ }
+ VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
+- if (!tree_page) {
++ if (!tree_folio) {
+ /*
+ * If we walked over a stale stable_node,
+- * get_ksm_page() will call rb_erase() and it
++ * ksm_get_folio() will call rb_erase() and it
+ * may rebalance the tree from under us. So
+ * restart the search from scratch. Returning
+ * NULL would be safe too, but we'd generate
+@@ -1909,8 +1903,8 @@ again:
+ goto again;
+ }
+
+- ret = memcmp_pages(page, tree_page);
+- put_page(tree_page);
++ ret = memcmp_pages(page, &tree_folio->page);
++ folio_put(tree_folio);
+
+ parent = *new;
+ if (ret < 0)
+@@ -1953,26 +1947,26 @@ again:
+ * It would be more elegant to return stable_node
+ * than kpage, but that involves more changes.
+ */
+- tree_page = get_ksm_page(stable_node_dup,
+- GET_KSM_PAGE_TRYLOCK);
++ tree_folio = ksm_get_folio(stable_node_dup,
++ GET_KSM_PAGE_TRYLOCK);
+
+- if (PTR_ERR(tree_page) == -EBUSY)
++ if (PTR_ERR(tree_folio) == -EBUSY)
+ return ERR_PTR(-EBUSY);
+
+- if (unlikely(!tree_page))
++ if (unlikely(!tree_folio))
+ /*
+ * The tree may have been rebalanced,
+ * so re-evaluate parent and new.
+ */
+ goto again;
+- unlock_page(tree_page);
++ folio_unlock(tree_folio);
+
+ if (get_kpfn_nid(stable_node_dup->kpfn) !=
+ NUMA(stable_node_dup->nid)) {
+- put_page(tree_page);
++ folio_put(tree_folio);
+ goto replace;
+ }
+- return tree_page;
++ return &tree_folio->page;
+ }
+ }
+
+@@ -1985,8 +1979,8 @@ again:
+ rb_insert_color(&page_node->node, root);
+ out:
+ if (is_page_sharing_candidate(page_node)) {
+- get_page(page);
+- return page;
++ folio_get(folio);
++ return &folio->page;
+ } else
+ return NULL;
+
+@@ -2011,12 +2005,12 @@ replace:
+ &page_node->node,
+ root);
+ if (is_page_sharing_candidate(page_node))
+- get_page(page);
++ folio_get(folio);
+ else
+- page = NULL;
++ folio = NULL;
+ } else {
+ rb_erase(&stable_node_dup->node, root);
+- page = NULL;
++ folio = NULL;
+ }
+ } else {
+ VM_BUG_ON(!is_stable_node_chain(stable_node));
+@@ -2027,16 +2021,16 @@ replace:
+ DO_NUMA(page_node->nid = nid);
+ stable_node_chain_add_dup(page_node, stable_node);
+ if (is_page_sharing_candidate(page_node))
+- get_page(page);
++ folio_get(folio);
+ else
+- page = NULL;
++ folio = NULL;
+ } else {
+- page = NULL;
++ folio = NULL;
+ }
+ }
+ stable_node_dup->head = &migrate_nodes;
+ list_add(&stable_node_dup->list, stable_node_dup->head);
+- return page;
++ return &folio->page;
+
+ chain_append:
+ /* stable_node_dup could be null if it reached the limit */
+@@ -2079,7 +2073,7 @@ chain_append:
+ * This function returns the stable tree node just allocated on success,
+ * NULL otherwise.
+ */
+-static struct ksm_stable_node *stable_tree_insert(struct page *kpage)
++static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
+ {
+ int nid;
+ unsigned long kpfn;
+@@ -2089,7 +2083,7 @@ static struct ksm_stable_node *stable_tr
+ struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any;
+ bool need_chain = false;
+
+- kpfn = page_to_pfn(kpage);
++ kpfn = folio_pfn(kfolio);
+ nid = get_kpfn_nid(kpfn);
+ root = root_stable_tree + nid;
+ again:
+@@ -2097,13 +2091,13 @@ again:
+ new = &root->rb_node;
+
+ while (*new) {
+- struct page *tree_page;
++ struct folio *tree_folio;
+ int ret;
+
+ cond_resched();
+ stable_node = rb_entry(*new, struct ksm_stable_node, node);
+ stable_node_any = NULL;
+- tree_page = chain(&stable_node_dup, stable_node, root);
++ tree_folio = chain(&stable_node_dup, stable_node, root);
+ if (!stable_node_dup) {
+ /*
+ * Either all stable_node dups were full in
+@@ -2125,14 +2119,14 @@ again:
+ * write protected at all times. Any will work
+ * fine to continue the walk.
+ */
+- tree_page = get_ksm_page(stable_node_any,
+- GET_KSM_PAGE_NOLOCK);
++ tree_folio = ksm_get_folio(stable_node_any,
++ GET_KSM_PAGE_NOLOCK);
+ }
+ VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
+- if (!tree_page) {
++ if (!tree_folio) {
+ /*
+ * If we walked over a stale stable_node,
+- * get_ksm_page() will call rb_erase() and it
++ * ksm_get_folio() will call rb_erase() and it
+ * may rebalance the tree from under us. So
+ * restart the search from scratch. Returning
+ * NULL would be safe too, but we'd generate
+@@ -2142,8 +2136,8 @@ again:
+ goto again;
+ }
+
+- ret = memcmp_pages(kpage, tree_page);
+- put_page(tree_page);
++ ret = memcmp_pages(&kfolio->page, &tree_folio->page);
++ folio_put(tree_folio);
+
+ parent = *new;
+ if (ret < 0)
+@@ -2162,7 +2156,7 @@ again:
+
+ INIT_HLIST_HEAD(&stable_node_dup->hlist);
+ stable_node_dup->kpfn = kpfn;
+- set_page_stable_node(kpage, stable_node_dup);
++ folio_set_stable_node(kfolio, stable_node_dup);
+ stable_node_dup->rmap_hlist_len = 0;
+ DO_NUMA(stable_node_dup->nid = nid);
+ if (!need_chain) {
+@@ -2440,7 +2434,7 @@ static void cmp_and_merge_page(struct pa
+ * node in the stable tree and add both rmap_items.
+ */
+ lock_page(kpage);
+- stable_node = stable_tree_insert(kpage);
++ stable_node = stable_tree_insert(page_folio(kpage));
+ if (stable_node) {
+ stable_tree_append(tree_rmap_item, stable_node,
+ false);
+@@ -3244,7 +3238,7 @@ void folio_migrate_ksm(struct folio *new
+ /*
+ * newfolio->mapping was set in advance; now we need smp_wmb()
+ * to make sure that the new stable_node->kpfn is visible
+- * to get_ksm_page() before it can see that folio->mapping
++ * to ksm_get_folio() before it can see that folio->mapping
+ * has gone stale (or that folio_test_swapcache has been cleared).
+ */
+ smp_wmb();
+@@ -3271,7 +3265,7 @@ static bool stable_node_dup_remove_range
+ if (stable_node->kpfn >= start_pfn &&
+ stable_node->kpfn < end_pfn) {
+ /*
+- * Don't get_ksm_page, page has already gone:
++ * Don't ksm_get_folio, page has already gone:
+ * which is why we keep kpfn instead of page*
+ */
+ remove_node_from_stable_tree(stable_node);
+@@ -3359,7 +3353,7 @@ static int ksm_memory_callback(struct no
+ * Most of the work is done by page migration; but there might
+ * be a few stable_nodes left over, still pointing to struct
+ * pages which have been offlined: prune those from the tree,
+- * otherwise get_ksm_page() might later try to access a
++ * otherwise ksm_get_folio() might later try to access a
+ * non-existent struct page.
+ */
+ ksm_check_stable_tree(mn->start_pfn,
+--- a/mm/migrate.c~mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page
++++ a/mm/migrate.c
+@@ -616,7 +616,7 @@ void folio_migrate_flags(struct folio *n
+ folio_migrate_ksm(newfolio, folio);
+ /*
+ * Please do not reorder this without considering how mm/ksm.c's
+- * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
++ * ksm_get_folio() depends upon ksm_migrate_page() and PageSwapCache().
+ */
+ if (folio_test_swapcache(folio))
+ folio_clear_swapcache(folio);
+_
diff --git a/patches/mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.patch b/patches/mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.patch
new file mode 100644
index 000000000..0f9c3e1e6
--- /dev/null
+++ b/patches/mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.patch
@@ -0,0 +1,140 @@
+From: David Hildenbrand <david@redhat.com>
+Subject: mm/ksm: rename get_ksm_page_flags to ksm_get_folio_flags
+Date: Thu, 11 Apr 2024 14:17:10 +0800
+
+As we are removing get_ksm_page_flags(), make the flags match the new
+function name.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-10-alexs@kernel.org
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Alex Shi <alexs@kernel.org>
+Reviewed-by: Alex Shi <alexs@kernel.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/ksm.c | 32 ++++++++++++++++----------------
+ 1 file changed, 16 insertions(+), 16 deletions(-)
+
+--- a/mm/ksm.c~mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags
++++ a/mm/ksm.c
+@@ -890,10 +890,10 @@ static void remove_node_from_stable_tree
+ free_stable_node(stable_node);
+ }
+
+-enum get_ksm_page_flags {
+- GET_KSM_PAGE_NOLOCK,
+- GET_KSM_PAGE_LOCK,
+- GET_KSM_PAGE_TRYLOCK
++enum ksm_get_folio_flags {
++ KSM_GET_FOLIO_NOLOCK,
++ KSM_GET_FOLIO_LOCK,
++ KSM_GET_FOLIO_TRYLOCK
+ };
+
+ /*
+@@ -916,7 +916,7 @@ enum get_ksm_page_flags {
+ * is on its way to being freed; but it is an anomaly to bear in mind.
+ */
+ static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
+- enum get_ksm_page_flags flags)
++ enum ksm_get_folio_flags flags)
+ {
+ struct folio *folio;
+ void *expected_mapping;
+@@ -959,15 +959,15 @@ again:
+ goto stale;
+ }
+
+- if (flags == GET_KSM_PAGE_TRYLOCK) {
++ if (flags == KSM_GET_FOLIO_TRYLOCK) {
+ if (!folio_trylock(folio)) {
+ folio_put(folio);
+ return ERR_PTR(-EBUSY);
+ }
+- } else if (flags == GET_KSM_PAGE_LOCK)
++ } else if (flags == KSM_GET_FOLIO_LOCK)
+ folio_lock(folio);
+
+- if (flags != GET_KSM_PAGE_NOLOCK) {
++ if (flags != KSM_GET_FOLIO_NOLOCK) {
+ if (READ_ONCE(folio->mapping) != expected_mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+@@ -1001,7 +1001,7 @@ static void remove_rmap_item_from_tree(s
+ struct folio *folio;
+
+ stable_node = rmap_item->head;
+- folio = ksm_get_folio(stable_node, GET_KSM_PAGE_LOCK);
++ folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK);
+ if (!folio)
+ goto out;
+
+@@ -1116,7 +1116,7 @@ static int remove_stable_node(struct ksm
+ struct folio *folio;
+ int err;
+
+- folio = ksm_get_folio(stable_node, GET_KSM_PAGE_LOCK);
++ folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK);
+ if (!folio) {
+ /*
+ * ksm_get_folio did remove_node_from_stable_tree itself.
+@@ -1656,7 +1656,7 @@ static struct folio *stable_node_dup(str
+ * stable_node parameter itself will be freed from
+ * under us if it returns NULL.
+ */
+- folio = ksm_get_folio(dup, GET_KSM_PAGE_NOLOCK);
++ folio = ksm_get_folio(dup, KSM_GET_FOLIO_NOLOCK);
+ if (!folio)
+ continue;
+ nr += 1;
+@@ -1779,7 +1779,7 @@ static struct folio *__stable_node_chain
+ if (!is_stable_node_chain(stable_node)) {
+ if (is_page_sharing_candidate(stable_node)) {
+ *_stable_node_dup = stable_node;
+- return ksm_get_folio(stable_node, GET_KSM_PAGE_NOLOCK);
++ return ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK);
+ }
+ /*
+ * _stable_node_dup set to NULL means the stable_node
+@@ -1887,7 +1887,7 @@ again:
+ * fine to continue the walk.
+ */
+ tree_folio = ksm_get_folio(stable_node_any,
+- GET_KSM_PAGE_NOLOCK);
++ KSM_GET_FOLIO_NOLOCK);
+ }
+ VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
+ if (!tree_folio) {
+@@ -1948,7 +1948,7 @@ again:
+ * than kpage, but that involves more changes.
+ */
+ tree_folio = ksm_get_folio(stable_node_dup,
+- GET_KSM_PAGE_TRYLOCK);
++ KSM_GET_FOLIO_TRYLOCK);
+
+ if (PTR_ERR(tree_folio) == -EBUSY)
+ return ERR_PTR(-EBUSY);
+@@ -2120,7 +2120,7 @@ again:
+ * fine to continue the walk.
+ */
+ tree_folio = ksm_get_folio(stable_node_any,
+- GET_KSM_PAGE_NOLOCK);
++ KSM_GET_FOLIO_NOLOCK);
+ }
+ VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
+ if (!tree_folio) {
+@@ -2611,7 +2611,7 @@ static struct ksm_rmap_item *scan_get_ne
+ list_for_each_entry_safe(stable_node, next,
+ &migrate_nodes, list) {
+ folio = ksm_get_folio(stable_node,
+- GET_KSM_PAGE_NOLOCK);
++ KSM_GET_FOLIO_NOLOCK);
+ if (folio)
+ folio_put(folio);
+ cond_resched();
+_
diff --git a/patches/mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.patch b/patches/mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.patch
new file mode 100644
index 000000000..c0a8f604b
--- /dev/null
+++ b/patches/mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.patch
@@ -0,0 +1,54 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: replace set_page_stable_node by folio_set_stable_node
+Date: Thu, 11 Apr 2024 14:17:11 +0800
+
+Only single page could be reached where we set stable node after write
+protect, so use folio converted func to replace page's. And remove the
+unused func set_page_stable_node().
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-11-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/ksm.c | 12 +++---------
+ 1 file changed, 3 insertions(+), 9 deletions(-)
+
+--- a/mm/ksm.c~mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node
++++ a/mm/ksm.c
+@@ -1094,17 +1094,11 @@ static inline struct ksm_stable_node *pa
+ return folio_stable_node(page_folio(page));
+ }
+
+-static inline void set_page_stable_node(struct page *page,
+- struct ksm_stable_node *stable_node)
+-{
+- VM_BUG_ON_PAGE(PageAnon(page) && PageAnonExclusive(page), page);
+- page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
+-}
+-
+ static inline void folio_set_stable_node(struct folio *folio,
+ struct ksm_stable_node *stable_node)
+ {
+- set_page_stable_node(&folio->page, stable_node);
++ VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio);
++ folio->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
+ }
+
+ #ifdef CONFIG_SYSFS
+@@ -1519,7 +1513,7 @@ static int try_to_merge_one_page(struct
+ * PageAnon+anon_vma to PageKsm+NULL stable_node:
+ * stable_tree_insert() will update stable_node.
+ */
+- set_page_stable_node(page, NULL);
++ folio_set_stable_node(page_folio(page), NULL);
+ mark_page_accessed(page);
+ /*
+ * Page reclaim just frees a clean page with no dirty
+_
diff --git a/patches/mm-ksm-use-folio-in-remove_rmap_item_from_tree.patch b/patches/mm-ksm-use-folio-in-remove_rmap_item_from_tree.patch
new file mode 100644
index 000000000..4638cd785
--- /dev/null
+++ b/patches/mm-ksm-use-folio-in-remove_rmap_item_from_tree.patch
@@ -0,0 +1,45 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: use folio in remove_rmap_item_from_tree
+Date: Thu, 11 Apr 2024 14:17:03 +0800
+
+To save 2 compound_head calls.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-3-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/ksm.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/mm/ksm.c~mm-ksm-use-folio-in-remove_rmap_item_from_tree
++++ a/mm/ksm.c
+@@ -1006,16 +1006,16 @@ static void remove_rmap_item_from_tree(s
+ {
+ if (rmap_item->address & STABLE_FLAG) {
+ struct ksm_stable_node *stable_node;
+- struct page *page;
++ struct folio *folio;
+
+ stable_node = rmap_item->head;
+- page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
+- if (!page)
++ folio = ksm_get_folio(stable_node, GET_KSM_PAGE_LOCK);
++ if (!folio)
+ goto out;
+
+ hlist_del(&rmap_item->hlist);
+- unlock_page(page);
+- put_page(page);
++ folio_unlock(folio);
++ folio_put(folio);
+
+ if (!hlist_empty(&stable_node->hlist))
+ ksm_pages_sharing--;
+_
diff --git a/patches/mm-ksm-use-folio-in-remove_stable_node.patch b/patches/mm-ksm-use-folio-in-remove_stable_node.patch
new file mode 100644
index 000000000..d20bb33c6
--- /dev/null
+++ b/patches/mm-ksm-use-folio-in-remove_stable_node.patch
@@ -0,0 +1,73 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: use folio in remove_stable_node
+Date: Thu, 11 Apr 2024 14:17:05 +0800
+
+Pages in stable tree are all single normal page, so uses ksm_get_folio()
+and folio_set_stable_node(), also saves 3 calls to compound_head().
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-5-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/ksm.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/mm/ksm.c~mm-ksm-use-folio-in-remove_stable_node
++++ a/mm/ksm.c
+@@ -1121,13 +1121,13 @@ static inline void folio_set_stable_node
+ */
+ static int remove_stable_node(struct ksm_stable_node *stable_node)
+ {
+- struct page *page;
++ struct folio *folio;
+ int err;
+
+- page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
+- if (!page) {
++ folio = ksm_get_folio(stable_node, GET_KSM_PAGE_LOCK);
++ if (!folio) {
+ /*
+- * get_ksm_page did remove_node_from_stable_tree itself.
++ * ksm_get_folio did remove_node_from_stable_tree itself.
+ */
+ return 0;
+ }
+@@ -1138,22 +1138,22 @@ static int remove_stable_node(struct ksm
+ * merge_across_nodes/max_page_sharing be switched.
+ */
+ err = -EBUSY;
+- if (!page_mapped(page)) {
++ if (!folio_mapped(folio)) {
+ /*
+- * The stable node did not yet appear stale to get_ksm_page(),
+- * since that allows for an unmapped ksm page to be recognized
++ * The stable node did not yet appear stale to ksm_get_folio(),
++ * since that allows for an unmapped ksm folio to be recognized
+ * right up until it is freed; but the node is safe to remove.
+- * This page might be in an LRU cache waiting to be freed,
+- * or it might be PageSwapCache (perhaps under writeback),
++ * This folio might be in an LRU cache waiting to be freed,
++ * or it might be in the swapcache (perhaps under writeback),
+ * or it might have been removed from swapcache a moment ago.
+ */
+- set_page_stable_node(page, NULL);
++ folio_set_stable_node(folio, NULL);
+ remove_node_from_stable_tree(stable_node);
+ err = 0;
+ }
+
+- unlock_page(page);
+- put_page(page);
++ folio_unlock(folio);
++ folio_put(folio);
+ return err;
+ }
+
+_
diff --git a/patches/mm-ksm-use-folio-in-stable_node_dup.patch b/patches/mm-ksm-use-folio-in-stable_node_dup.patch
new file mode 100644
index 000000000..f8c870a63
--- /dev/null
+++ b/patches/mm-ksm-use-folio-in-stable_node_dup.patch
@@ -0,0 +1,80 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: use folio in stable_node_dup
+Date: Thu, 11 Apr 2024 14:17:06 +0800
+
+Use ksm_get_folio() and save 2 compound_head calls.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-6-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/ksm.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/mm/ksm.c~mm-ksm-use-folio-in-stable_node_dup
++++ a/mm/ksm.c
+@@ -1638,7 +1638,7 @@ static struct page *stable_node_dup(stru
+ {
+ struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node;
+ struct hlist_node *hlist_safe;
+- struct page *_tree_page, *tree_page = NULL;
++ struct folio *folio, *tree_folio = NULL;
+ int nr = 0;
+ int found_rmap_hlist_len;
+
+@@ -1657,24 +1657,24 @@ static struct page *stable_node_dup(stru
+ * We must walk all stable_node_dup to prune the stale
+ * stable nodes during lookup.
+ *
+- * get_ksm_page can drop the nodes from the
++ * ksm_get_folio can drop the nodes from the
+ * stable_node->hlist if they point to freed pages
+ * (that's why we do a _safe walk). The "dup"
+ * stable_node parameter itself will be freed from
+ * under us if it returns NULL.
+ */
+- _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK);
+- if (!_tree_page)
++ folio = ksm_get_folio(dup, GET_KSM_PAGE_NOLOCK);
++ if (!folio)
+ continue;
+ nr += 1;
+ if (is_page_sharing_candidate(dup)) {
+ if (!found ||
+ dup->rmap_hlist_len > found_rmap_hlist_len) {
+ if (found)
+- put_page(tree_page);
++ folio_put(tree_folio);
+ found = dup;
+ found_rmap_hlist_len = found->rmap_hlist_len;
+- tree_page = _tree_page;
++ tree_folio = folio;
+
+ /* skip put_page for found dup */
+ if (!prune_stale_stable_nodes)
+@@ -1682,7 +1682,7 @@ static struct page *stable_node_dup(stru
+ continue;
+ }
+ }
+- put_page(_tree_page);
++ folio_put(folio);
+ }
+
+ if (found) {
+@@ -1747,7 +1747,7 @@ static struct page *stable_node_dup(stru
+ }
+
+ *_stable_node_dup = found;
+- return tree_page;
++ return &tree_folio->page;
+ }
+
+ static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node,
+_
diff --git a/patches/mm-ksm-use-folio-in-write_protect_page.patch b/patches/mm-ksm-use-folio-in-write_protect_page.patch
new file mode 100644
index 000000000..fa29769c7
--- /dev/null
+++ b/patches/mm-ksm-use-folio-in-write_protect_page.patch
@@ -0,0 +1,103 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: use folio in write_protect_page
+Date: Thu, 11 Apr 2024 14:17:08 +0800
+
+Compound page is checked and skipped before write_protect_page() called,
+use folio to save a few compound_head checking.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-8-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/ksm.c | 25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
+
+--- a/mm/ksm.c~mm-ksm-use-folio-in-write_protect_page
++++ a/mm/ksm.c
+@@ -1289,23 +1289,24 @@ static u32 calc_checksum(struct page *pa
+ return checksum;
+ }
+
+-static int write_protect_page(struct vm_area_struct *vma, struct page *page,
++static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
+ pte_t *orig_pte)
+ {
+ struct mm_struct *mm = vma->vm_mm;
+- DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0);
++ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0);
+ int swapped;
+ int err = -EFAULT;
+ struct mmu_notifier_range range;
+ bool anon_exclusive;
+ pte_t entry;
+
+- pvmw.address = page_address_in_vma(page, vma);
++ if (WARN_ON_ONCE(folio_test_large(folio)))
++ return err;
++
++ pvmw.address = page_address_in_vma(&folio->page, vma);
+ if (pvmw.address == -EFAULT)
+ goto out;
+
+- BUG_ON(PageTransCompound(page));
+-
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address,
+ pvmw.address + PAGE_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
+@@ -1315,12 +1316,12 @@ static int write_protect_page(struct vm_
+ if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
+ goto out_unlock;
+
+- anon_exclusive = PageAnonExclusive(page);
++ anon_exclusive = PageAnonExclusive(&folio->page);
+ entry = ptep_get(pvmw.pte);
+ if (pte_write(entry) || pte_dirty(entry) ||
+ anon_exclusive || mm_tlb_flush_pending(mm)) {
+- swapped = PageSwapCache(page);
+- flush_cache_page(vma, pvmw.address, page_to_pfn(page));
++ swapped = folio_test_swapcache(folio);
++ flush_cache_page(vma, pvmw.address, folio_pfn(folio));
+ /*
+ * Ok this is tricky, when get_user_pages_fast() run it doesn't
+ * take any lock, therefore the check that we are going to make
+@@ -1340,20 +1341,20 @@ static int write_protect_page(struct vm_
+ * Check that no O_DIRECT or similar I/O is in progress on the
+ * page
+ */
+- if (page_mapcount(page) + 1 + swapped != page_count(page)) {
++ if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) {
+ set_pte_at(mm, pvmw.address, pvmw.pte, entry);
+ goto out_unlock;
+ }
+
+ /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
+ if (anon_exclusive &&
+- folio_try_share_anon_rmap_pte(page_folio(page), page)) {
++ folio_try_share_anon_rmap_pte(folio, &folio->page)) {
+ set_pte_at(mm, pvmw.address, pvmw.pte, entry);
+ goto out_unlock;
+ }
+
+ if (pte_dirty(entry))
+- set_page_dirty(page);
++ folio_mark_dirty(folio);
+ entry = pte_mkclean(entry);
+
+ if (pte_write(entry))
+@@ -1519,7 +1520,7 @@ static int try_to_merge_one_page(struct
+ * ptes are necessarily already write-protected. But in either
+ * case, we need to lock and check page_count is not raised.
+ */
+- if (write_protect_page(vma, page, &orig_pte) == 0) {
++ if (write_protect_page(vma, page_folio(page), &orig_pte) == 0) {
+ if (!kpage) {
+ /*
+ * While we hold page lock, upgrade page from
+_
diff --git a/patches/mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.patch b/patches/mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.patch
new file mode 100644
index 000000000..da9f30bb1
--- /dev/null
+++ b/patches/mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.patch
@@ -0,0 +1,43 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: use ksm_get_folio in scan_get_next_rmap_item
+Date: Thu, 11 Apr 2024 14:17:07 +0800
+
+Save a compound calls.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-7-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/ksm.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/mm/ksm.c~mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item
++++ a/mm/ksm.c
+@@ -2611,14 +2611,14 @@ static struct ksm_rmap_item *scan_get_ne
+ */
+ if (!ksm_merge_across_nodes) {
+ struct ksm_stable_node *stable_node, *next;
+- struct page *page;
++ struct folio *folio;
+
+ list_for_each_entry_safe(stable_node, next,
+ &migrate_nodes, list) {
+- page = get_ksm_page(stable_node,
+- GET_KSM_PAGE_NOLOCK);
+- if (page)
+- put_page(page);
++ folio = ksm_get_folio(stable_node,
++ GET_KSM_PAGE_NOLOCK);
++ if (folio)
++ folio_put(folio);
+ cond_resched();
+ }
+ }
+_
diff --git a/patches/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.patch b/patches/old/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.patch
index 0fd79e34c..0fd79e34c 100644
--- a/patches/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.patch
+++ b/patches/old/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.patch
diff --git a/patches/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch b/patches/old/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch
index 7ad2424fe..7ad2424fe 100644
--- a/patches/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch
+++ b/patches/old/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch
diff --git a/patches/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.patch b/patches/old/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.patch
index b09222878..b09222878 100644
--- a/patches/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.patch
+++ b/patches/old/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.patch
diff --git a/patches/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.patch b/patches/old/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.patch
index ca656faab..ca656faab 100644
--- a/patches/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.patch
+++ b/patches/old/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.patch
diff --git a/patches/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.patch b/patches/old/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.patch
index c3a43e0c2..c3a43e0c2 100644
--- a/patches/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.patch
+++ b/patches/old/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.patch
diff --git a/pc/devel-series b/pc/devel-series
index c58498fb1..fa3eb22bf 100644
--- a/pc/devel-series
+++ b/pc/devel-series
@@ -211,6 +211,7 @@ codetag-debug-skip-objext-checking-when-its-for-objext-itself.patch
codetag-debug-mark-codetags-for-reserved-pages-as-empty.patch
codetag-debug-introduce-objexts_alloc_fail-to-mark-failed-slab_ext-allocations.patch
maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling.patch
+maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.patch
memprofiling-documentation.patch
#
#
@@ -230,9 +231,6 @@ hugetlb-remove-mention-of-destructors.patch
selftests-mm-confirm-va-exhaustion-without-reliance-on-correctness-of-mmap.patch
selftests-mm-confirm-va-exhaustion-without-reliance-on-correctness-of-mmap-v2.patch
#
-#mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.patch: TBU
-mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.patch
-s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.patch
#
mm-page-flags-make-__pagemovable-return-bool.patch
#
@@ -520,10 +518,6 @@ mm-convert-free_zone_device_page-to-free_zone_device_folio.patch
#
mm-set-pageblock_order-to-hpage_pmd_order-in-case-with-config_hugetlb_page-but-thp-enabled.patch
#
-#mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch+1: docs? acks? TBU?
-mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch
-mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.patch
-mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.patch
#
memory-tier-dax-kmem-introduce-an-abstract-layer-for-finding-allocating-and-putting-memory-types.patch
#memory-tier-create-cpuless-memory-tiers-after-obtaining-hmat-info.patch: https://lkml.kernel.org/r/20240405150244.00004b49@Huawei.com TBU? check review
@@ -562,12 +556,26 @@ documentation-admin-guide-cgroup-v1-memoryrst-dont-reference-page_mapcount.patch
#
#mm-madvise-optimize-lazyfreeing-with-mthp-in-madvise_free.patch: usual async concerns TBU
mm-madvise-optimize-lazyfreeing-with-mthp-in-madvise_free.patch
+#mm-arm64-override-mkold_clean_ptes-batch-helper.patch: https://lkml.kernel.org/r/3cd1036d-3814-4a10-b6d2-099937ceabc8@arm.com
mm-arm64-override-mkold_clean_ptes-batch-helper.patch
#
arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.patch
arm-mm-drop-vm_fault_badmap-vm_fault_badaccess.patch
arm-mm-drop-vm_fault_badmap-vm_fault_badaccess-checkpatch-fixes.patch
#
+mm-ksm-add-ksm_get_folio.patch
+mm-ksm-use-folio-in-remove_rmap_item_from_tree.patch
+mm-ksm-add-folio_set_stable_node.patch
+mm-ksm-use-folio-in-remove_stable_node.patch
+mm-ksm-use-folio-in-stable_node_dup.patch
+mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.patch
+mm-ksm-use-folio-in-write_protect_page.patch
+mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.patch
+mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.patch
+mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.patch
+#
+mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.patch
+#
#
#
#
diff --git a/pc/maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.pc b/pc/maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.pc
new file mode 100644
index 000000000..a04c4ff68
--- /dev/null
+++ b/pc/maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.pc
@@ -0,0 +1 @@
+MAINTAINERS
diff --git a/pc/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.pc b/pc/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.pc
deleted file mode 100644
index 751f878cc..000000000
--- a/pc/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.pc
+++ /dev/null
@@ -1,2 +0,0 @@
-include/linux/huge_mm.h
-mm/huge_memory.c
diff --git a/pc/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.pc b/pc/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.pc
deleted file mode 100644
index c3c5586ce..000000000
--- a/pc/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.pc
+++ /dev/null
@@ -1,3 +0,0 @@
-include/linux/huge_mm.h
-mm/huge_memory.c
-mm/memory.c
diff --git a/pc/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.pc b/pc/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.pc
deleted file mode 100644
index f238d5812..000000000
--- a/pc/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.pc
+++ /dev/null
@@ -1,4 +0,0 @@
-include/linux/huge_mm.h
-mm/huge_memory.c
-mm/page_io.c
-mm/vmscan.c
diff --git a/pc/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.pc b/pc/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.pc
new file mode 100644
index 000000000..771c93a72
--- /dev/null
+++ b/pc/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.pc
@@ -0,0 +1,3 @@
+include/linux/hugetlb.h
+mm/hugetlb.c
+mm/memory-failure.c
diff --git a/pc/mm-ksm-add-folio_set_stable_node.pc b/pc/mm-ksm-add-folio_set_stable_node.pc
new file mode 100644
index 000000000..712364693
--- /dev/null
+++ b/pc/mm-ksm-add-folio_set_stable_node.pc
@@ -0,0 +1 @@
+mm/ksm.c
diff --git a/pc/mm-ksm-add-ksm_get_folio.pc b/pc/mm-ksm-add-ksm_get_folio.pc
new file mode 100644
index 000000000..712364693
--- /dev/null
+++ b/pc/mm-ksm-add-ksm_get_folio.pc
@@ -0,0 +1 @@
+mm/ksm.c
diff --git a/pc/mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.pc b/pc/mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.pc
new file mode 100644
index 000000000..7325e53e3
--- /dev/null
+++ b/pc/mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.pc
@@ -0,0 +1,2 @@
+mm/ksm.c
+mm/migrate.c
diff --git a/pc/mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.pc b/pc/mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.pc
new file mode 100644
index 000000000..712364693
--- /dev/null
+++ b/pc/mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.pc
@@ -0,0 +1 @@
+mm/ksm.c
diff --git a/pc/mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.pc b/pc/mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.pc
new file mode 100644
index 000000000..712364693
--- /dev/null
+++ b/pc/mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.pc
@@ -0,0 +1 @@
+mm/ksm.c
diff --git a/pc/mm-ksm-use-folio-in-remove_rmap_item_from_tree.pc b/pc/mm-ksm-use-folio-in-remove_rmap_item_from_tree.pc
new file mode 100644
index 000000000..712364693
--- /dev/null
+++ b/pc/mm-ksm-use-folio-in-remove_rmap_item_from_tree.pc
@@ -0,0 +1 @@
+mm/ksm.c
diff --git a/pc/mm-ksm-use-folio-in-remove_stable_node.pc b/pc/mm-ksm-use-folio-in-remove_stable_node.pc
new file mode 100644
index 000000000..712364693
--- /dev/null
+++ b/pc/mm-ksm-use-folio-in-remove_stable_node.pc
@@ -0,0 +1 @@
+mm/ksm.c
diff --git a/pc/mm-ksm-use-folio-in-stable_node_dup.pc b/pc/mm-ksm-use-folio-in-stable_node_dup.pc
new file mode 100644
index 000000000..712364693
--- /dev/null
+++ b/pc/mm-ksm-use-folio-in-stable_node_dup.pc
@@ -0,0 +1 @@
+mm/ksm.c
diff --git a/pc/mm-ksm-use-folio-in-write_protect_page.pc b/pc/mm-ksm-use-folio-in-write_protect_page.pc
new file mode 100644
index 000000000..712364693
--- /dev/null
+++ b/pc/mm-ksm-use-folio-in-write_protect_page.pc
@@ -0,0 +1 @@
+mm/ksm.c
diff --git a/pc/mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.pc b/pc/mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.pc
new file mode 100644
index 000000000..712364693
--- /dev/null
+++ b/pc/mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.pc
@@ -0,0 +1 @@
+mm/ksm.c
diff --git a/pc/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.pc b/pc/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.pc
deleted file mode 100644
index d6f75b4ac..000000000
--- a/pc/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.pc
+++ /dev/null
@@ -1 +0,0 @@
-mm/userfaultfd.c
diff --git a/pc/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.pc b/pc/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.pc
deleted file mode 100644
index 3eff14551..000000000
--- a/pc/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.pc
+++ /dev/null
@@ -1,6 +0,0 @@
-arch/s390/include/asm/gmap.h
-arch/s390/include/asm/mmu_context.h
-arch/s390/include/asm/mmu.h
-arch/s390/include/asm/pgtable.h
-arch/s390/kvm/kvm-s390.c
-arch/s390/mm/gmap.c
diff --git a/txt/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt b/txt/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt
index 5e3b9e53b..37c030927 100644
--- a/txt/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt
+++ b/txt/arm64-mm-drop-vm_fault_badmap-vm_fault_badaccess.txt
@@ -17,8 +17,8 @@ arch's special vm fault reason.
Link: https://lkml.kernel.org/r/20240411130925.73281-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20240411130925.73281-2-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Aishwarya TCV <aishwarya.tcv@arm.com>
-Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Cristian Marussi <cristian.marussi@arm.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Russell King <linux@armlinux.org.uk>
diff --git a/txt/maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.txt b/txt/maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.txt
new file mode 100644
index 000000000..cf3ac0e8c
--- /dev/null
+++ b/txt/maintainers-add-entries-for-code-tagging-and-memory-allocation-profiling-fix.txt
@@ -0,0 +1,16 @@
+From: Lukas Bulwahn <lbulwahn@redhat.com>
+Subject: MAINTAINERS: improve entries in CODE TAGGING and MEMORY ALLOCATION PROFILING
+Date: Thu, 11 Apr 2024 08:47:17 +0200
+
+Commit 5acf2502db99 ("MAINTAINERS: add entries for code tagging and memory
+allocation profiling") adds the two new sections CODE TAGGING and MEMORY
+ALLOCATION PROFILING. The files in these sections however do not match
+with the files added in the corresponding patch series.
+
+Improve the two entries to refer to all files added with that series and
+drop the entries to non-existing files.
+
+Link: https://lkml.kernel.org/r/20240411064717.51140-1-lukas.bulwahn@redhat.com
+Signed-off-by: Lukas Bulwahn <lukas.bulwahn@redhat.com>
+Acked-by: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
diff --git a/txt/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.txt b/txt/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.txt
new file mode 100644
index 000000000..87b440018
--- /dev/null
+++ b/txt/mm-hugetlb-convert-dissolve_free_huge_pages-to-folios.txt
@@ -0,0 +1,14 @@
+From: Sidhartha Kumar <sidhartha.kumar@oracle.com>
+Subject: mm/hugetlb: convert dissolve_free_huge_pages() to folios
+Date: Thu, 11 Apr 2024 09:47:56 -0700
+
+Allows us to rename dissolve_free_huge_pages() to
+dissolve_free_hugetlb_folio(). Convert one caller to pass in a folio
+directly and use page_folio() to convert the caller in mm/memory-failure.
+
+Link: https://lkml.kernel.org/r/20240411164756.261178-1-sidhartha.kumar@oracle.com
+Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
+Cc: Jane Chu <jane.chu@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Muchun Song <muchun.song@linux.dev>
diff --git a/txt/mm-ksm-add-folio_set_stable_node.txt b/txt/mm-ksm-add-folio_set_stable_node.txt
new file mode 100644
index 000000000..af914e0a4
--- /dev/null
+++ b/txt/mm-ksm-add-folio_set_stable_node.txt
@@ -0,0 +1,16 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: add folio_set_stable_node
+Date: Thu, 11 Apr 2024 14:17:04 +0800
+
+Turn set_page_stable_node() into a wrapper folio_set_stable_node, and then
+use it to replace the former. we will merge them together after all place
+converted to folio.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-4-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
diff --git a/txt/mm-ksm-add-ksm_get_folio.txt b/txt/mm-ksm-add-ksm_get_folio.txt
new file mode 100644
index 000000000..ff5536c5e
--- /dev/null
+++ b/txt/mm-ksm-add-ksm_get_folio.txt
@@ -0,0 +1,35 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: add ksm_get_folio
+Date: Thu, 11 Apr 2024 14:17:02 +0800
+
+Patch series "transfer page to folio in KSM".
+
+This is the first part of page to folio transfer on KSM. Since only
+single page could be stored in KSM, we could safely transfer stable tree
+pages to folios.
+
+This patchset could reduce ksm.o 57kbytes from 2541776 bytes on latest
+akpm/mm-stable branch with CONFIG_DEBUG_VM enabled. It pass the KSM
+testing in LTP and kernel selftest.
+
+Thanks for Matthew Wilcox and David Hildenbrand's suggestions and
+comments!
+
+
+This patch (of 10):
+
+The ksm only contains single pages, so we could add a new func
+ksm_get_folio for get_ksm_page to use folio instead of pages to save a
+couple of compound_head calls.
+
+After all caller replaced, get_ksm_page will be removed.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-1-alexs@kernel.org
+Link: https://lkml.kernel.org/r/20240411061713.1847574-2-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
diff --git a/txt/mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.txt b/txt/mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.txt
new file mode 100644
index 000000000..34937acc6
--- /dev/null
+++ b/txt/mm-ksm-convert-chain-series-funcs-and-replace-get_ksm_page.txt
@@ -0,0 +1,18 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: convert chain series funcs and replace get_ksm_page
+Date: Thu, 11 Apr 2024 14:17:09 +0800
+
+In ksm stable tree all page are single, let's convert them to use and
+folios as well as stable_tree_insert/stable_tree_search funcs. And
+replace get_ksm_page() by ksm_get_folio() since there is no more needs.
+
+It could save a few compound_head calls.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-9-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
+Cc: David Hildenbrand <david@redhat.com>
diff --git a/txt/mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.txt b/txt/mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.txt
new file mode 100644
index 000000000..96089a991
--- /dev/null
+++ b/txt/mm-ksm-rename-get_ksm_page_flags-to-ksm_get_folio_flags.txt
@@ -0,0 +1,16 @@
+From: David Hildenbrand <david@redhat.com>
+Subject: mm/ksm: rename get_ksm_page_flags to ksm_get_folio_flags
+Date: Thu, 11 Apr 2024 14:17:10 +0800
+
+As we are removing get_ksm_page_flags(), make the flags match the new
+function name.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-10-alexs@kernel.org
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Alex Shi <alexs@kernel.org>
+Reviewed-by: Alex Shi <alexs@kernel.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
diff --git a/txt/mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.txt b/txt/mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.txt
new file mode 100644
index 000000000..077d37750
--- /dev/null
+++ b/txt/mm-ksm-replace-set_page_stable_node-by-folio_set_stable_node.txt
@@ -0,0 +1,16 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: replace set_page_stable_node by folio_set_stable_node
+Date: Thu, 11 Apr 2024 14:17:11 +0800
+
+Only single page could be reached where we set stable node after write
+protect, so use folio converted func to replace page's. And remove the
+unused func set_page_stable_node().
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-11-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
diff --git a/txt/mm-ksm-use-folio-in-remove_rmap_item_from_tree.txt b/txt/mm-ksm-use-folio-in-remove_rmap_item_from_tree.txt
new file mode 100644
index 000000000..9b4f15d2f
--- /dev/null
+++ b/txt/mm-ksm-use-folio-in-remove_rmap_item_from_tree.txt
@@ -0,0 +1,14 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: use folio in remove_rmap_item_from_tree
+Date: Thu, 11 Apr 2024 14:17:03 +0800
+
+To save 2 compound_head calls.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-3-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
diff --git a/txt/mm-ksm-use-folio-in-remove_stable_node.txt b/txt/mm-ksm-use-folio-in-remove_stable_node.txt
new file mode 100644
index 000000000..94918f4be
--- /dev/null
+++ b/txt/mm-ksm-use-folio-in-remove_stable_node.txt
@@ -0,0 +1,15 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: use folio in remove_stable_node
+Date: Thu, 11 Apr 2024 14:17:05 +0800
+
+Pages in stable tree are all single normal page, so uses ksm_get_folio()
+and folio_set_stable_node(), also saves 3 calls to compound_head().
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-5-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
diff --git a/txt/mm-ksm-use-folio-in-stable_node_dup.txt b/txt/mm-ksm-use-folio-in-stable_node_dup.txt
new file mode 100644
index 000000000..5f3f9e013
--- /dev/null
+++ b/txt/mm-ksm-use-folio-in-stable_node_dup.txt
@@ -0,0 +1,14 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: use folio in stable_node_dup
+Date: Thu, 11 Apr 2024 14:17:06 +0800
+
+Use ksm_get_folio() and save 2 compound_head calls.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-6-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
diff --git a/txt/mm-ksm-use-folio-in-write_protect_page.txt b/txt/mm-ksm-use-folio-in-write_protect_page.txt
new file mode 100644
index 000000000..8213bfd5b
--- /dev/null
+++ b/txt/mm-ksm-use-folio-in-write_protect_page.txt
@@ -0,0 +1,15 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: use folio in write_protect_page
+Date: Thu, 11 Apr 2024 14:17:08 +0800
+
+Compound page is checked and skipped before write_protect_page() called,
+use folio to save a few compound_head checking.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-8-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
diff --git a/txt/mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.txt b/txt/mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.txt
new file mode 100644
index 000000000..f7290f426
--- /dev/null
+++ b/txt/mm-ksm-use-ksm_get_folio-in-scan_get_next_rmap_item.txt
@@ -0,0 +1,14 @@
+From: "Alex Shi (tencent)" <alexs@kernel.org>
+Subject: mm/ksm: use ksm_get_folio in scan_get_next_rmap_item
+Date: Thu, 11 Apr 2024 14:17:07 +0800
+
+Save a compound calls.
+
+Link: https://lkml.kernel.org/r/20240411061713.1847574-7-alexs@kernel.org
+Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Izik Eidus <izik.eidus@ravellosystems.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Chris Wright <chrisw@sous-sol.org>
diff --git a/txt/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.txt b/txt/old/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.txt
index fe1b0f84b..fe1b0f84b 100644
--- a/txt/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.txt
+++ b/txt/old/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters-fix.txt
diff --git a/txt/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.txt b/txt/old/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.txt
index f1625f7c3..f1625f7c3 100644
--- a/txt/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.txt
+++ b/txt/old/mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.txt
diff --git a/txt/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.txt b/txt/old/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.txt
index b21321f7f..b21321f7f 100644
--- a/txt/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.txt
+++ b/txt/old/mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.txt
diff --git a/txt/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.txt b/txt/old/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.txt
index f5030d6d7..f5030d6d7 100644
--- a/txt/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.txt
+++ b/txt/old/mm-userfaultfd-dont-place-zeropages-when-zeropages-are-disallowed.txt
diff --git a/txt/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.txt b/txt/old/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.txt
index defda06e2..defda06e2 100644
--- a/txt/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.txt
+++ b/txt/old/s390-mm-re-enable-the-shared-zeropage-for-pv-and-skeys-kvm-guests.txt