summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2024-04-05 17:58:21 -0700
committerAndrew Morton <akpm@linux-foundation.org>2024-04-05 17:58:21 -0700
commita6c8e4a1cb40266ca9b7133430cf1d9ef598b80a (patch)
tree412e7a720f69ace74db396f146a9cdc998e492a0
parente460e6d35e659ea5506474de220e638365211271 (diff)
download25-new-a6c8e4a1cb40266ca9b7133430cf1d9ef598b80a.tar.gz
foo
-rw-r--r--patches/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault.patch1
-rw-r--r--patches/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault.patch1
-rw-r--r--patches/memory-remove-the-now-superfluous-sentinel-element-from-ctl_table-array.patch2
-rw-r--r--patches/mm-add-defines-for-min-max-swappiness.patch8
-rw-r--r--patches/mm-add-swapiness=-arg-to-memoryreclaim.patch6
-rw-r--r--patches/mm-combine-free_the_page-and-free_unref_page.patch10
-rw-r--r--patches/mm-make-is_free_buddy_page-take-a-const-argument.patch2
-rw-r--r--patches/mm-optimization-on-page-allocation-when-cma-enabled.patch4
-rw-r--r--patches/mm-page_alloc-close-migratetype-race-between-freeing-and-stealing.patch12
-rw-r--r--patches/mm-page_alloc-consolidate-free-page-accounting-fix-2.patch4
-rw-r--r--patches/mm-page_alloc-consolidate-free-page-accounting.patch18
-rw-r--r--patches/mm-page_alloc-fix-freelist-movement-during-block-conversion.patch1
-rw-r--r--patches/mm-page_alloc-set-migratetype-inside-move_freepages.patch2
-rw-r--r--patches/mm-page_alloc-use-the-correct-thp-order-for-thp-pcp.patch1
-rw-r--r--patches/mm-page_allocc-dont-show-protection-in-zones-lowmem_reserve-for-empty-zone.patch2
-rw-r--r--patches/mm-page_allocc-remove-unneeded-codes-in-numa-version-of-build_zonelists-v2.patch2
-rw-r--r--patches/mm-page_allocc-remove-unneeded-codes-in-numa-version-of-build_zonelists.patch2
-rw-r--r--patches/mm-page_isolation-prepare-for-hygienic-freelists.patch4
-rw-r--r--patches/mm-record-the-migration-reason-for-struct-migration_target_control.patch2
-rw-r--r--patches/mm-swap-remove-cluster_flag_huge-from-swap_cluster_info-flags.patch2
-rw-r--r--patches/mm-vmalloc-eliminated-the-lock-contention-from-twice-to-once.patch14
-rw-r--r--patches/mm-vmalloc-enable-memory-allocation-profiling-fix-2.patch14
-rw-r--r--patches/mm-vmalloc-enable-memory-allocation-profiling.patch34
-rw-r--r--patches/mm-vmallocc-optimize-to-reduce-arguments-of-alloc_vmap_area.patch10
-rw-r--r--pc/devel-series4
-rw-r--r--txt/khugepaged-inline-hpage_collapse_alloc_folio.txt1
-rw-r--r--txt/khugepaged-pass-a-folio-to-__collapse_huge_page_copy.txt1
-rw-r--r--txt/khugepaged-remove-hpage-from-collapse_file.txt1
-rw-r--r--txt/khugepaged-remove-hpage-from-collapse_huge_page.txt1
-rw-r--r--txt/khugepaged-use-a-folio-throughout-hpage_collapse_scan_file.txt1
30 files changed, 89 insertions, 78 deletions
diff --git a/patches/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault.patch b/patches/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault.patch
index 7ecff0c6d..a442e9f98 100644
--- a/patches/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault.patch
+++ b/patches/hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault.patch
@@ -8,6 +8,7 @@ variables into a single struct.
Link: https://lkml.kernel.org/r/20240401202651.31440-3-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
diff --git a/patches/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault.patch b/patches/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault.patch
index 31f19251c..ceeb17dc5 100644
--- a/patches/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault.patch
+++ b/patches/hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault.patch
@@ -8,6 +8,7 @@ struct.
Link: https://lkml.kernel.org/r/20240401202651.31440-4-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
diff --git a/patches/memory-remove-the-now-superfluous-sentinel-element-from-ctl_table-array.patch b/patches/memory-remove-the-now-superfluous-sentinel-element-from-ctl_table-array.patch
index e28bca5c5..e4cb58a49 100644
--- a/patches/memory-remove-the-now-superfluous-sentinel-element-from-ctl_table-array.patch
+++ b/patches/memory-remove-the-now-superfluous-sentinel-element-from-ctl_table-array.patch
@@ -78,7 +78,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/page_alloc.c~memory-remove-the-now-superfluous-sentinel-element-from-ctl_table-array
+++ a/mm/page_alloc.c
-@@ -6293,7 +6293,6 @@ static struct ctl_table page_alloc_sysct
+@@ -6294,7 +6294,6 @@ static struct ctl_table page_alloc_sysct
.extra2 = SYSCTL_ONE_HUNDRED,
},
#endif
diff --git a/patches/mm-add-defines-for-min-max-swappiness.patch b/patches/mm-add-defines-for-min-max-swappiness.patch
index 588d05431..20ccd2e0e 100644
--- a/patches/mm-add-defines-for-min-max-swappiness.patch
+++ b/patches/mm-add-defines-for-min-max-swappiness.patch
@@ -150,7 +150,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
*/
int vm_swappiness = 60;
-@@ -2433,7 +2433,7 @@ static void get_scan_count(struct lruvec
+@@ -2436,7 +2436,7 @@ static void get_scan_count(struct lruvec
ap = swappiness * (total_cost + 1);
ap /= anon_cost + 1;
@@ -159,7 +159,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fp /= file_cost + 1;
fraction[0] = ap;
-@@ -4453,7 +4453,7 @@ static int get_type_to_scan(struct lruve
+@@ -4456,7 +4456,7 @@ static int get_type_to_scan(struct lruve
{
int type, tier;
struct ctrl_pos sp, pv;
@@ -168,7 +168,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/*
* Compare the first tier of anon with that of file to determine which
-@@ -4500,7 +4500,7 @@ static int isolate_folios(struct lruvec
+@@ -4503,7 +4503,7 @@ static int isolate_folios(struct lruvec
type = LRU_GEN_ANON;
else if (swappiness == 1)
type = LRU_GEN_FILE;
@@ -177,7 +177,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
type = LRU_GEN_ANON;
else if (!(sc->gfp_mask & __GFP_IO))
type = LRU_GEN_FILE;
-@@ -5434,9 +5434,9 @@ static int run_cmd(char cmd, int memcg_i
+@@ -5437,9 +5437,9 @@ static int run_cmd(char cmd, int memcg_i
lruvec = get_lruvec(memcg, nid);
diff --git a/patches/mm-add-swapiness=-arg-to-memoryreclaim.patch b/patches/mm-add-swapiness=-arg-to-memoryreclaim.patch
index 25ca2b111..244dc0905 100644
--- a/patches/mm-add-swapiness=-arg-to-memoryreclaim.patch
+++ b/patches/mm-add-swapiness=-arg-to-memoryreclaim.patch
@@ -305,7 +305,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#endif
static void set_task_reclaim_state(struct task_struct *task,
-@@ -2357,7 +2374,7 @@ static void get_scan_count(struct lruvec
+@@ -2360,7 +2377,7 @@ static void get_scan_count(struct lruvec
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
unsigned long anon_cost, file_cost, total_cost;
@@ -314,7 +314,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
u64 fraction[ANON_AND_FILE];
u64 denominator = 0; /* gcc */
enum scan_balance scan_balance;
-@@ -2638,7 +2655,7 @@ static int get_swappiness(struct lruvec
+@@ -2641,7 +2658,7 @@ static int get_swappiness(struct lruvec
mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
return 0;
@@ -323,7 +323,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
static int get_nr_gens(struct lruvec *lruvec, int type)
-@@ -6519,12 +6536,14 @@ unsigned long mem_cgroup_shrink_node(str
+@@ -6522,12 +6539,14 @@ unsigned long mem_cgroup_shrink_node(str
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
diff --git a/patches/mm-combine-free_the_page-and-free_unref_page.patch b/patches/mm-combine-free_the_page-and-free_unref_page.patch
index 334ce13cc..4e01eeb9b 100644
--- a/patches/mm-combine-free_the_page-and-free_unref_page.patch
+++ b/patches/mm-combine-free_the_page-and-free_unref_page.patch
@@ -40,7 +40,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
static inline void set_buddy_order(struct page *page, unsigned int order)
-@@ -2578,6 +2570,11 @@ void free_unref_page(struct page *page,
+@@ -2579,6 +2571,11 @@ void free_unref_page(struct page *page,
unsigned long pfn = page_to_pfn(page);
int migratetype;
@@ -52,7 +52,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (!free_pages_prepare(page, order))
return;
-@@ -4760,11 +4757,11 @@ void __free_pages(struct page *page, uns
+@@ -4761,11 +4758,11 @@ void __free_pages(struct page *page, uns
struct alloc_tag *tag = pgalloc_tag_get(page);
if (put_page_testzero(page))
@@ -66,7 +66,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
}
EXPORT_SYMBOL(__free_pages);
-@@ -4826,7 +4823,7 @@ void __page_frag_cache_drain(struct page
+@@ -4827,7 +4824,7 @@ void __page_frag_cache_drain(struct page
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
if (page_ref_sub_and_test(page, count))
@@ -75,7 +75,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
EXPORT_SYMBOL(__page_frag_cache_drain);
-@@ -4867,7 +4864,7 @@ refill:
+@@ -4868,7 +4865,7 @@ refill:
goto refill;
if (unlikely(nc->pfmemalloc)) {
@@ -84,7 +84,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
goto refill;
}
-@@ -4911,7 +4908,7 @@ void page_frag_free(void *addr)
+@@ -4912,7 +4909,7 @@ void page_frag_free(void *addr)
struct page *page = virt_to_head_page(addr);
if (unlikely(put_page_testzero(page)))
diff --git a/patches/mm-make-is_free_buddy_page-take-a-const-argument.patch b/patches/mm-make-is_free_buddy_page-take-a-const-argument.patch
index 04e6e03a0..7cdc2d1b1 100644
--- a/patches/mm-make-is_free_buddy_page-take-a-const-argument.patch
+++ b/patches/mm-make-is_free_buddy_page-take-a-const-argument.patch
@@ -27,7 +27,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/page_alloc.c~mm-make-is_free_buddy_page-take-a-const-argument
+++ a/mm/page_alloc.c
-@@ -6750,16 +6750,16 @@ void __offline_isolated_pages(unsigned l
+@@ -6751,16 +6751,16 @@ void __offline_isolated_pages(unsigned l
/*
* This function returns a stable result only if called under zone lock.
*/
diff --git a/patches/mm-optimization-on-page-allocation-when-cma-enabled.patch b/patches/mm-optimization-on-page-allocation-when-cma-enabled.patch
index 34614521c..def3e7380 100644
--- a/patches/mm-optimization-on-page-allocation-when-cma-enabled.patch
+++ b/patches/mm-optimization-on-page-allocation-when-cma-enabled.patch
@@ -35,7 +35,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/page_alloc.c~mm-optimization-on-page-allocation-when-cma-enabled
+++ a/mm/page_alloc.c
-@@ -2189,6 +2189,43 @@ do_steal:
+@@ -2167,6 +2167,43 @@ do_steal:
return page;
}
@@ -79,7 +79,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/*
* Do the hard work of removing an element from the buddy allocator.
* Call me with the zone->lock already held.
-@@ -2202,12 +2239,11 @@ __rmqueue(struct zone *zone, unsigned in
+@@ -2180,12 +2217,11 @@ __rmqueue(struct zone *zone, unsigned in
if (IS_ENABLED(CONFIG_CMA)) {
/*
* Balance movable allocations between regular and CMA areas by
diff --git a/patches/mm-page_alloc-close-migratetype-race-between-freeing-and-stealing.patch b/patches/mm-page_alloc-close-migratetype-race-between-freeing-and-stealing.patch
index 10bd6873e..07b4933b3 100644
--- a/patches/mm-page_alloc-close-migratetype-race-between-freeing-and-stealing.patch
+++ b/patches/mm-page_alloc-close-migratetype-race-between-freeing-and-stealing.patch
@@ -89,7 +89,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
__count_vm_events(PGFREE, 1 << order);
}
-@@ -2502,7 +2491,7 @@ void free_unref_page(struct page *page,
+@@ -2503,7 +2492,7 @@ void free_unref_page(struct page *page,
struct per_cpu_pages *pcp;
struct zone *zone;
unsigned long pfn = page_to_pfn(page);
@@ -98,7 +98,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (!free_pages_prepare(page, order))
return;
-@@ -2514,23 +2503,23 @@ void free_unref_page(struct page *page,
+@@ -2515,23 +2504,23 @@ void free_unref_page(struct page *page,
* get those areas back if necessary. Otherwise, we may have to free
* excessively into the page allocator
*/
@@ -127,7 +127,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
pcp_trylock_finish(UP_flags);
}
-@@ -2560,12 +2549,8 @@ void free_unref_folios(struct folio_batc
+@@ -2561,12 +2550,8 @@ void free_unref_folios(struct folio_batc
* allocator.
*/
if (!pcp_allowed_order(order)) {
@@ -142,7 +142,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
continue;
}
folio->private = (void *)(unsigned long)order;
-@@ -2601,7 +2586,7 @@ void free_unref_folios(struct folio_batc
+@@ -2602,7 +2587,7 @@ void free_unref_folios(struct folio_batc
*/
if (is_migrate_isolate(migratetype)) {
free_one_page(zone, &folio->page, pfn,
@@ -151,7 +151,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
continue;
}
-@@ -2614,7 +2599,7 @@ void free_unref_folios(struct folio_batc
+@@ -2615,7 +2600,7 @@ void free_unref_folios(struct folio_batc
if (unlikely(!pcp)) {
pcp_trylock_finish(UP_flags);
free_one_page(zone, &folio->page, pfn,
@@ -160,7 +160,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
continue;
}
locked_zone = zone;
-@@ -6797,13 +6782,14 @@ bool take_page_off_buddy(struct page *pa
+@@ -6798,13 +6783,14 @@ bool take_page_off_buddy(struct page *pa
bool put_page_back_buddy(struct page *page)
{
struct zone *zone = page_zone(page);
diff --git a/patches/mm-page_alloc-consolidate-free-page-accounting-fix-2.patch b/patches/mm-page_alloc-consolidate-free-page-accounting-fix-2.patch
index b1cfa7e1d..a80066451 100644
--- a/patches/mm-page_alloc-consolidate-free-page-accounting-fix-2.patch
+++ b/patches/mm-page_alloc-consolidate-free-page-accounting-fix-2.patch
@@ -68,7 +68,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
while (order < MAX_PAGE_ORDER) {
int buddy_mt = migratetype;
-@@ -6930,7 +6930,7 @@ static bool try_to_accept_memory_one(str
+@@ -6931,7 +6931,7 @@ static bool try_to_accept_memory_one(str
list_del(&page->lru);
last = list_empty(&zone->unaccepted_pages);
@@ -77,7 +77,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
spin_unlock_irqrestore(&zone->lock, flags);
-@@ -6982,7 +6982,7 @@ static bool __free_unaccepted(struct pag
+@@ -6983,7 +6983,7 @@ static bool __free_unaccepted(struct pag
spin_lock_irqsave(&zone->lock, flags);
first = list_empty(&zone->unaccepted_pages);
list_add_tail(&page->lru, &zone->unaccepted_pages);
diff --git a/patches/mm-page_alloc-consolidate-free-page-accounting.patch b/patches/mm-page_alloc-consolidate-free-page-accounting.patch
index a235f24c5..9f0e6ceec 100644
--- a/patches/mm-page_alloc-consolidate-free-page-accounting.patch
+++ b/patches/mm-page_alloc-consolidate-free-page-accounting.patch
@@ -504,7 +504,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/*
* Reserving this block already succeeded, so this should
* not fail on zone boundaries.
-@@ -2224,12 +2255,7 @@ static int rmqueue_bulk(struct zone *zon
+@@ -2225,12 +2256,7 @@ static int rmqueue_bulk(struct zone *zon
* pages are ordered properly.
*/
list_add_tail(&page->pcp_list, list);
@@ -517,7 +517,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
spin_unlock_irqrestore(&zone->lock, flags);
return i;
-@@ -2722,11 +2748,9 @@ int __isolate_free_page(struct page *pag
+@@ -2723,11 +2749,9 @@ int __isolate_free_page(struct page *pag
watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
return 0;
@@ -530,7 +530,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/*
* Set the pageblock if the isolated page is at least half of a
-@@ -2741,7 +2765,7 @@ int __isolate_free_page(struct page *pag
+@@ -2742,7 +2766,7 @@ int __isolate_free_page(struct page *pag
* with others)
*/
if (migratetype_is_mergeable(mt))
@@ -539,7 +539,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
MIGRATE_MOVABLE);
}
}
-@@ -2826,8 +2850,6 @@ struct page *rmqueue_buddy(struct zone *
+@@ -2827,8 +2851,6 @@ struct page *rmqueue_buddy(struct zone *
return NULL;
}
}
@@ -548,7 +548,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
spin_unlock_irqrestore(&zone->lock, flags);
} while (check_new_pages(page, order));
-@@ -6715,8 +6737,9 @@ void __offline_isolated_pages(unsigned l
+@@ -6716,8 +6738,9 @@ void __offline_isolated_pages(unsigned l
BUG_ON(page_count(page));
BUG_ON(!PageBuddy(page));
@@ -559,7 +559,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
pfn += (1 << order);
}
spin_unlock_irqrestore(&zone->lock, flags);
-@@ -6766,10 +6789,10 @@ static void break_down_buddy_pages(struc
+@@ -6767,10 +6790,10 @@ static void break_down_buddy_pages(struc
current_buddy = page + size;
}
@@ -572,7 +572,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
set_buddy_order(current_buddy, high);
}
}
-@@ -6795,12 +6818,11 @@ bool take_page_off_buddy(struct page *pa
+@@ -6796,12 +6819,11 @@ bool take_page_off_buddy(struct page *pa
int migratetype = get_pfnblock_migratetype(page_head,
pfn_head);
@@ -587,7 +587,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
ret = true;
break;
}
-@@ -6908,7 +6930,7 @@ static bool try_to_accept_memory_one(str
+@@ -6909,7 +6931,7 @@ static bool try_to_accept_memory_one(str
list_del(&page->lru);
last = list_empty(&zone->unaccepted_pages);
@@ -596,7 +596,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
spin_unlock_irqrestore(&zone->lock, flags);
-@@ -6960,7 +6982,7 @@ static bool __free_unaccepted(struct pag
+@@ -6961,7 +6983,7 @@ static bool __free_unaccepted(struct pag
spin_lock_irqsave(&zone->lock, flags);
first = list_empty(&zone->unaccepted_pages);
list_add_tail(&page->lru, &zone->unaccepted_pages);
diff --git a/patches/mm-page_alloc-fix-freelist-movement-during-block-conversion.patch b/patches/mm-page_alloc-fix-freelist-movement-during-block-conversion.patch
index 6b8173fa2..d9d53b4c2 100644
--- a/patches/mm-page_alloc-fix-freelist-movement-during-block-conversion.patch
+++ b/patches/mm-page_alloc-fix-freelist-movement-during-block-conversion.patch
@@ -29,6 +29,7 @@ Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Zi Yan <ziy@nvidia.com>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
diff --git a/patches/mm-page_alloc-set-migratetype-inside-move_freepages.patch b/patches/mm-page_alloc-set-migratetype-inside-move_freepages.patch
index f9a5384a1..f2d0c65e2 100644
--- a/patches/mm-page_alloc-set-migratetype-inside-move_freepages.patch
+++ b/patches/mm-page_alloc-set-migratetype-inside-move_freepages.patch
@@ -86,7 +86,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (ret > 0) {
spin_unlock_irqrestore(&zone->lock, flags);
return ret;
-@@ -2681,10 +2681,9 @@ int __isolate_free_page(struct page *pag
+@@ -2682,10 +2682,9 @@ int __isolate_free_page(struct page *pag
* Only change normal pageblocks (i.e., they can merge
* with others)
*/
diff --git a/patches/mm-page_alloc-use-the-correct-thp-order-for-thp-pcp.patch b/patches/mm-page_alloc-use-the-correct-thp-order-for-thp-pcp.patch
index 385cbf1a3..4c6864ba8 100644
--- a/patches/mm-page_alloc-use-the-correct-thp-order-for-thp-pcp.patch
+++ b/patches/mm-page_alloc-use-the-correct-thp-order-for-thp-pcp.patch
@@ -17,6 +17,7 @@ Fixes: 44042b449872 ("mm/page_alloc: allow high-order pages to be stored on the
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
+Reviewed-by: Barry Song <baohua@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
diff --git a/patches/mm-page_allocc-dont-show-protection-in-zones-lowmem_reserve-for-empty-zone.patch b/patches/mm-page_allocc-dont-show-protection-in-zones-lowmem_reserve-for-empty-zone.patch
index 473fb331f..26392b8c6 100644
--- a/patches/mm-page_allocc-dont-show-protection-in-zones-lowmem_reserve-for-empty-zone.patch
+++ b/patches/mm-page_allocc-dont-show-protection-in-zones-lowmem_reserve-for-empty-zone.patch
@@ -101,7 +101,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
#else
static inline bool deferred_pages_enabled(void)
-@@ -5908,10 +5908,11 @@ static void setup_per_zone_lowmem_reserv
+@@ -5909,10 +5909,11 @@ static void setup_per_zone_lowmem_reserv
for (j = i + 1; j < MAX_NR_ZONES; j++) {
struct zone *upper_zone = &pgdat->node_zones[j];
diff --git a/patches/mm-page_allocc-remove-unneeded-codes-in-numa-version-of-build_zonelists-v2.patch b/patches/mm-page_allocc-remove-unneeded-codes-in-numa-version-of-build_zonelists-v2.patch
index 23118d6b3..ea073a8ec 100644
--- a/patches/mm-page_allocc-remove-unneeded-codes-in-numa-version-of-build_zonelists-v2.patch
+++ b/patches/mm-page_allocc-remove-unneeded-codes-in-numa-version-of-build_zonelists-v2.patch
@@ -14,7 +14,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/page_alloc.c~mm-page_allocc-remove-unneeded-codes-in-numa-version-of-build_zonelists-v2
+++ a/mm/page_alloc.c
-@@ -5285,12 +5285,9 @@ static void setup_min_slab_ratio(void);
+@@ -5286,12 +5286,9 @@ static void setup_min_slab_ratio(void);
static void build_zonelists(pg_data_t *pgdat)
{
diff --git a/patches/mm-page_allocc-remove-unneeded-codes-in-numa-version-of-build_zonelists.patch b/patches/mm-page_allocc-remove-unneeded-codes-in-numa-version-of-build_zonelists.patch
index 1f5772037..3685fd5c5 100644
--- a/patches/mm-page_allocc-remove-unneeded-codes-in-numa-version-of-build_zonelists.patch
+++ b/patches/mm-page_allocc-remove-unneeded-codes-in-numa-version-of-build_zonelists.patch
@@ -21,7 +21,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/page_alloc.c~mm-page_allocc-remove-unneeded-codes-in-numa-version-of-build_zonelists
+++ a/mm/page_alloc.c
-@@ -5295,27 +5295,6 @@ static void build_zonelists(pg_data_t *p
+@@ -5296,27 +5296,6 @@ static void build_zonelists(pg_data_t *p
nr_zones = build_zonerefs_node(pgdat, zonerefs);
zonerefs += nr_zones;
diff --git a/patches/mm-page_isolation-prepare-for-hygienic-freelists.patch b/patches/mm-page_isolation-prepare-for-hygienic-freelists.patch
index e8b729ee2..c45261208 100644
--- a/patches/mm-page_isolation-prepare-for-hygienic-freelists.patch
+++ b/patches/mm-page_isolation-prepare-for-hygienic-freelists.patch
@@ -280,7 +280,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static void change_pageblock_range(struct page *pageblock_page,
int start_order, int migratetype)
{
-@@ -6364,7 +6419,6 @@ int alloc_contig_range_noprof(unsigned l
+@@ -6365,7 +6420,6 @@ int alloc_contig_range_noprof(unsigned l
unsigned migratetype, gfp_t gfp_mask)
{
unsigned long outer_start, outer_end;
@@ -288,7 +288,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
int ret = 0;
struct compact_control cc = {
-@@ -6437,29 +6491,7 @@ int alloc_contig_range_noprof(unsigned l
+@@ -6438,29 +6492,7 @@ int alloc_contig_range_noprof(unsigned l
* We don't have to hold zone->lock here because the pages are
* isolated thus they won't get removed from buddy.
*/
diff --git a/patches/mm-record-the-migration-reason-for-struct-migration_target_control.patch b/patches/mm-record-the-migration-reason-for-struct-migration_target_control.patch
index 604dcbaa2..4c02963a2 100644
--- a/patches/mm-record-the-migration-reason-for-struct-migration_target_control.patch
+++ b/patches/mm-record-the-migration-reason-for-struct-migration_target_control.patch
@@ -110,7 +110,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
err = migrate_pages(pagelist, alloc_migration_target, NULL,
--- a/mm/page_alloc.c~mm-record-the-migration-reason-for-struct-migration_target_control
+++ a/mm/page_alloc.c
-@@ -6356,6 +6356,7 @@ int __alloc_contig_migrate_range(struct
+@@ -6357,6 +6357,7 @@ int __alloc_contig_migrate_range(struct
struct migration_target_control mtc = {
.nid = zone_to_nid(cc->zone),
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
diff --git a/patches/mm-swap-remove-cluster_flag_huge-from-swap_cluster_info-flags.patch b/patches/mm-swap-remove-cluster_flag_huge-from-swap_cluster_info-flags.patch
index 7ab185524..aba87fc6c 100644
--- a/patches/mm-swap-remove-cluster_flag_huge-from-swap_cluster_info-flags.patch
+++ b/patches/mm-swap-remove-cluster_flag_huge-from-swap_cluster_info-flags.patch
@@ -86,8 +86,8 @@ Link: https://lkml.kernel.org/r/20240403114032.1162100-2-ryan.roberts@arm.com
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Acked-by: Chris Li <chrisl@kernel.org>
+Acked-by: David Hildenbrand <david@redhat.com>
Cc: Barry Song <21cnbao@gmail.com>
-Cc: David Hildenbrand <david@redhat.com>
Cc: Gao Xiang <xiang@kernel.org>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Lance Yang <ioworker0@gmail.com>
diff --git a/patches/mm-vmalloc-eliminated-the-lock-contention-from-twice-to-once.patch b/patches/mm-vmalloc-eliminated-the-lock-contention-from-twice-to-once.patch
index c9a6c5be0..887e49046 100644
--- a/patches/mm-vmalloc-eliminated-the-lock-contention-from-twice-to-once.patch
+++ b/patches/mm-vmalloc-eliminated-the-lock-contention-from-twice-to-once.patch
@@ -39,7 +39,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/vmalloc.c~mm-vmalloc-eliminated-the-lock-contention-from-twice-to-once
+++ a/mm/vmalloc.c
-@@ -1927,15 +1927,26 @@ node_alloc(unsigned long size, unsigned
+@@ -1926,15 +1926,26 @@ node_alloc(unsigned long size, unsigned
return va;
}
@@ -68,7 +68,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
struct vmap_node *vn;
struct vmap_area *va;
-@@ -1998,6 +2009,9 @@ retry:
+@@ -1997,6 +2008,9 @@ retry:
va->vm = NULL;
va->flags = (va_flags | vn_id);
@@ -78,7 +78,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
vn = addr_to_node(va->va_start);
spin_lock(&vn->busy.lock);
-@@ -2575,7 +2589,8 @@ static void *new_vmap_block(unsigned int
+@@ -2574,7 +2588,8 @@ static void *new_vmap_block(unsigned int
va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
VMALLOC_START, VMALLOC_END,
node, gfp_mask,
@@ -88,7 +88,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (IS_ERR(va)) {
kfree(vb);
return ERR_CAST(va);
-@@ -2932,7 +2947,8 @@ void *vm_map_ram(struct page **pages, un
+@@ -2931,7 +2946,8 @@ void *vm_map_ram(struct page **pages, un
struct vmap_area *va;
va = alloc_vmap_area(size, PAGE_SIZE,
VMALLOC_START, VMALLOC_END,
@@ -98,7 +98,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (IS_ERR(va))
return NULL;
-@@ -3035,26 +3051,6 @@ void __init vm_area_register_early(struc
+@@ -3034,26 +3050,6 @@ void __init vm_area_register_early(struc
kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
}
@@ -125,7 +125,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static void clear_vm_uninitialized_flag(struct vm_struct *vm)
{
/*
-@@ -3091,14 +3087,12 @@ static struct vm_struct *__get_vm_area_n
+@@ -3090,14 +3086,12 @@ static struct vm_struct *__get_vm_area_n
if (!(flags & VM_NO_GUARD))
size += PAGE_SIZE;
@@ -141,7 +141,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/*
* Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
* best-effort approach, as they can be mapped outside of vmalloc code.
-@@ -4673,7 +4667,7 @@ retry:
+@@ -4672,7 +4666,7 @@ retry:
spin_lock(&vn->busy.lock);
insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
diff --git a/patches/mm-vmalloc-enable-memory-allocation-profiling-fix-2.patch b/patches/mm-vmalloc-enable-memory-allocation-profiling-fix-2.patch
index dc73531dc..ffe1efac4 100644
--- a/patches/mm-vmalloc-enable-memory-allocation-profiling-fix-2.patch
+++ b/patches/mm-vmalloc-enable-memory-allocation-profiling-fix-2.patch
@@ -87,7 +87,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
*/
--- a/mm/vmalloc.c~mm-vmalloc-enable-memory-allocation-profiling-fix-2
+++ a/mm/vmalloc.c
-@@ -3704,7 +3704,7 @@ fail:
+@@ -3703,7 +3703,7 @@ fail:
}
/**
@@ -96,7 +96,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
* @align: desired alignment
* @start: vm area range start
-@@ -3860,7 +3860,7 @@ fail:
+@@ -3859,7 +3859,7 @@ fail:
}
/**
@@ -105,7 +105,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
* @align: desired alignment
* @gfp_mask: flags for the page level allocator
-@@ -3940,7 +3940,7 @@ void *vmalloc_huge_noprof(unsigned long
+@@ -3939,7 +3939,7 @@ void *vmalloc_huge_noprof(unsigned long
EXPORT_SYMBOL_GPL(vmalloc_huge_noprof);
/**
@@ -114,7 +114,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
*
* Allocate enough pages to cover @size from the page level
-@@ -3978,7 +3978,7 @@ void *vmalloc_user_noprof(unsigned long
+@@ -3977,7 +3977,7 @@ void *vmalloc_user_noprof(unsigned long
EXPORT_SYMBOL(vmalloc_user_noprof);
/**
@@ -123,7 +123,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
* @node: numa node
*
-@@ -3998,7 +3998,7 @@ void *vmalloc_node_noprof(unsigned long
+@@ -3997,7 +3997,7 @@ void *vmalloc_node_noprof(unsigned long
EXPORT_SYMBOL(vmalloc_node_noprof);
/**
@@ -132,7 +132,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
* @node: numa node
*
-@@ -4028,7 +4028,7 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
+@@ -4027,7 +4027,7 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
#endif
/**
@@ -141,7 +141,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
*
* Allocate enough 32bit PA addressable pages to cover @size from the
-@@ -4044,7 +4044,7 @@ void *vmalloc_32_noprof(unsigned long si
+@@ -4043,7 +4043,7 @@ void *vmalloc_32_noprof(unsigned long si
EXPORT_SYMBOL(vmalloc_32_noprof);
/**
diff --git a/patches/mm-vmalloc-enable-memory-allocation-profiling.patch b/patches/mm-vmalloc-enable-memory-allocation-profiling.patch
index 043cbbe9c..996cf9cf4 100644
--- a/patches/mm-vmalloc-enable-memory-allocation-profiling.patch
+++ b/patches/mm-vmalloc-enable-memory-allocation-profiling.patch
@@ -383,7 +383,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
--- a/mm/vmalloc.c~mm-vmalloc-enable-memory-allocation-profiling
+++ a/mm/vmalloc.c
-@@ -3524,12 +3524,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
+@@ -3523,12 +3523,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* but mempolicy wants to alloc memory by interleaving.
*/
if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
@@ -398,7 +398,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
nr_pages_request,
pages + nr_allocated);
-@@ -3559,9 +3559,9 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
+@@ -3558,9 +3558,9 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
break;
if (nid == NUMA_NO_NODE)
@@ -410,7 +410,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (unlikely(!page)) {
if (!nofail)
break;
-@@ -3618,10 +3618,10 @@ static void *__vmalloc_area_node(struct
+@@ -3617,10 +3617,10 @@ static void *__vmalloc_area_node(struct
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
@@ -423,7 +423,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
if (!area->pages) {
-@@ -3704,7 +3704,7 @@ fail:
+@@ -3703,7 +3703,7 @@ fail:
}
/**
@@ -432,7 +432,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
* @align: desired alignment
* @start: vm area range start
-@@ -3731,7 +3731,7 @@ fail:
+@@ -3730,7 +3730,7 @@ fail:
*
* Return: the address of the area or %NULL on failure
*/
@@ -441,7 +441,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller)
-@@ -3860,7 +3860,7 @@ fail:
+@@ -3859,7 +3859,7 @@ fail:
}
/**
@@ -450,7 +450,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
* @align: desired alignment
* @gfp_mask: flags for the page level allocator
-@@ -3878,10 +3878,10 @@ fail:
+@@ -3877,10 +3877,10 @@ fail:
*
* Return: pointer to the allocated memory or %NULL on error
*/
@@ -463,7 +463,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
gfp_mask, PAGE_KERNEL, 0, node, caller);
}
/*
-@@ -3890,15 +3890,15 @@ void *__vmalloc_node(unsigned long size,
+@@ -3889,15 +3889,15 @@ void *__vmalloc_node(unsigned long size,
* than that.
*/
#ifdef CONFIG_TEST_VMALLOC_MODULE
@@ -483,7 +483,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/**
* vmalloc - allocate virtually contiguous memory
-@@ -3912,12 +3912,12 @@ EXPORT_SYMBOL(__vmalloc);
+@@ -3911,12 +3911,12 @@ EXPORT_SYMBOL(__vmalloc);
*
* Return: pointer to the allocated memory or %NULL on error
*/
@@ -499,7 +499,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/**
* vmalloc_huge - allocate virtually contiguous memory, allow huge pages
-@@ -3931,16 +3931,16 @@ EXPORT_SYMBOL(vmalloc);
+@@ -3930,16 +3930,16 @@ EXPORT_SYMBOL(vmalloc);
*
* Return: pointer to the allocated memory or %NULL on error
*/
@@ -520,7 +520,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
*
* Allocate enough pages to cover @size from the page level
-@@ -3952,12 +3952,12 @@ EXPORT_SYMBOL_GPL(vmalloc_huge);
+@@ -3951,12 +3951,12 @@ EXPORT_SYMBOL_GPL(vmalloc_huge);
*
* Return: pointer to the allocated memory or %NULL on error
*/
@@ -536,7 +536,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/**
* vmalloc_user - allocate zeroed virtually contiguous memory for userspace
-@@ -3968,17 +3968,17 @@ EXPORT_SYMBOL(vzalloc);
+@@ -3967,17 +3967,17 @@ EXPORT_SYMBOL(vzalloc);
*
* Return: pointer to the allocated memory or %NULL on error
*/
@@ -558,7 +558,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
* @node: numa node
*
-@@ -3990,15 +3990,15 @@ EXPORT_SYMBOL(vmalloc_user);
+@@ -3989,15 +3989,15 @@ EXPORT_SYMBOL(vmalloc_user);
*
* Return: pointer to the allocated memory or %NULL on error
*/
@@ -578,7 +578,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
* @node: numa node
*
-@@ -4008,12 +4008,12 @@ EXPORT_SYMBOL(vmalloc_node);
+@@ -4007,12 +4007,12 @@ EXPORT_SYMBOL(vmalloc_node);
*
* Return: pointer to the allocated memory or %NULL on error
*/
@@ -594,7 +594,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
-@@ -4028,7 +4028,7 @@ EXPORT_SYMBOL(vzalloc_node);
+@@ -4027,7 +4027,7 @@ EXPORT_SYMBOL(vzalloc_node);
#endif
/**
@@ -603,7 +603,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
*
* Allocate enough 32bit PA addressable pages to cover @size from the
-@@ -4036,15 +4036,15 @@ EXPORT_SYMBOL(vzalloc_node);
+@@ -4035,15 +4035,15 @@ EXPORT_SYMBOL(vzalloc_node);
*
* Return: pointer to the allocated memory or %NULL on error
*/
@@ -623,7 +623,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @size: allocation size
*
* The resulting memory area is 32bit addressable and zeroed so it can be
-@@ -4052,14 +4052,14 @@ EXPORT_SYMBOL(vmalloc_32);
+@@ -4051,14 +4051,14 @@ EXPORT_SYMBOL(vmalloc_32);
*
* Return: pointer to the allocated memory or %NULL on error
*/
diff --git a/patches/mm-vmallocc-optimize-to-reduce-arguments-of-alloc_vmap_area.patch b/patches/mm-vmallocc-optimize-to-reduce-arguments-of-alloc_vmap_area.patch
index 05657680f..0151928b0 100644
--- a/patches/mm-vmallocc-optimize-to-reduce-arguments-of-alloc_vmap_area.patch
+++ b/patches/mm-vmallocc-optimize-to-reduce-arguments-of-alloc_vmap_area.patch
@@ -20,7 +20,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/vmalloc.c~mm-vmallocc-optimize-to-reduce-arguments-of-alloc_vmap_area
+++ a/mm/vmalloc.c
-@@ -1945,8 +1945,7 @@ static struct vmap_area *alloc_vmap_area
+@@ -1944,8 +1944,7 @@ static struct vmap_area *alloc_vmap_area
unsigned long align,
unsigned long vstart, unsigned long vend,
int node, gfp_t gfp_mask,
@@ -30,7 +30,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
struct vmap_node *vn;
struct vmap_area *va;
-@@ -2009,8 +2008,11 @@ retry:
+@@ -2008,8 +2007,11 @@ retry:
va->vm = NULL;
va->flags = (va_flags | vn_id);
@@ -44,7 +44,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
vn = addr_to_node(va->va_start);
-@@ -2589,8 +2591,7 @@ static void *new_vmap_block(unsigned int
+@@ -2588,8 +2590,7 @@ static void *new_vmap_block(unsigned int
va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
VMALLOC_START, VMALLOC_END,
node, gfp_mask,
@@ -54,7 +54,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (IS_ERR(va)) {
kfree(vb);
return ERR_CAST(va);
-@@ -2948,7 +2949,7 @@ void *vm_map_ram(struct page **pages, un
+@@ -2947,7 +2948,7 @@ void *vm_map_ram(struct page **pages, un
va = alloc_vmap_area(size, PAGE_SIZE,
VMALLOC_START, VMALLOC_END,
node, GFP_KERNEL, VMAP_RAM,
@@ -63,7 +63,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (IS_ERR(va))
return NULL;
-@@ -3087,7 +3088,10 @@ static struct vm_struct *__get_vm_area_n
+@@ -3086,7 +3087,10 @@ static struct vm_struct *__get_vm_area_n
if (!(flags & VM_NO_GUARD))
size += PAGE_SIZE;
diff --git a/pc/devel-series b/pc/devel-series
index 8a9b9746c..64f59cbd6 100644
--- a/pc/devel-series
+++ b/pc/devel-series
@@ -424,10 +424,12 @@ selftests-mm-mremap_test-optimize-execution-time-from-minutes-to-seconds-using-c
selftests-mm-mremap_test-use-sscanf-to-parse-proc-self-maps.patch
#
khugepaged-inline-hpage_collapse_alloc_folio.patch
+#khugepaged-convert-alloc_charge_hpage-to-alloc_charge_folio.patch: https://lkml.kernel.org/r/ZhBpotSop-UJIC2n@fedora
khugepaged-convert-alloc_charge_hpage-to-alloc_charge_folio.patch
khugepaged-remove-hpage-from-collapse_huge_page.patch
khugepaged-pass-a-folio-to-__collapse_huge_page_copy.patch
khugepaged-remove-hpage-from-collapse_file.patch
+#khugepaged-use-a-folio-throughout-collapse_file.patch: https://lkml.kernel.org/r/ZhBrHNET9X5RiBuF@fedora
khugepaged-use-a-folio-throughout-collapse_file.patch
khugepaged-use-a-folio-throughout-hpage_collapse_scan_file.patch
#
@@ -493,7 +495,7 @@ mm-convert-free_zone_device_page-to-free_zone_device_folio.patch
#
mm-set-pageblock_order-to-hpage_pmd_order-in-case-with-config_hugetlb_page-but-thp-enabled.patch
#
-#mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch+1: docs? acks?
+#mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch+1: docs? acks? TBU?
mm-add-per-order-mthp-anon_alloc-and-anon_alloc_fallback-counters.patch
mm-add-per-order-mthp-anon_swpout-and-anon_swpout_fallback-counters.patch
#
diff --git a/txt/khugepaged-inline-hpage_collapse_alloc_folio.txt b/txt/khugepaged-inline-hpage_collapse_alloc_folio.txt
index b3dea29fd..6604d324d 100644
--- a/txt/khugepaged-inline-hpage_collapse_alloc_folio.txt
+++ b/txt/khugepaged-inline-hpage_collapse_alloc_folio.txt
@@ -19,3 +19,4 @@ read, reason about and modify.
Link: https://lkml.kernel.org/r/20240403171838.1445826-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20240403171838.1445826-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
diff --git a/txt/khugepaged-pass-a-folio-to-__collapse_huge_page_copy.txt b/txt/khugepaged-pass-a-folio-to-__collapse_huge_page_copy.txt
index 57801fe5e..ee27b375e 100644
--- a/txt/khugepaged-pass-a-folio-to-__collapse_huge_page_copy.txt
+++ b/txt/khugepaged-pass-a-folio-to-__collapse_huge_page_copy.txt
@@ -7,3 +7,4 @@ it.
Link: https://lkml.kernel.org/r/20240403171838.1445826-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
diff --git a/txt/khugepaged-remove-hpage-from-collapse_file.txt b/txt/khugepaged-remove-hpage-from-collapse_file.txt
index a99070976..870a811bb 100644
--- a/txt/khugepaged-remove-hpage-from-collapse_file.txt
+++ b/txt/khugepaged-remove-hpage-from-collapse_file.txt
@@ -6,3 +6,4 @@ Use new_folio throughout where we had been using hpage.
Link: https://lkml.kernel.org/r/20240403171838.1445826-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
diff --git a/txt/khugepaged-remove-hpage-from-collapse_huge_page.txt b/txt/khugepaged-remove-hpage-from-collapse_huge_page.txt
index 9f9ab451d..a48ac51df 100644
--- a/txt/khugepaged-remove-hpage-from-collapse_huge_page.txt
+++ b/txt/khugepaged-remove-hpage-from-collapse_huge_page.txt
@@ -7,3 +7,4 @@ in put_page().
Link: https://lkml.kernel.org/r/20240403171838.1445826-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
diff --git a/txt/khugepaged-use-a-folio-throughout-hpage_collapse_scan_file.txt b/txt/khugepaged-use-a-folio-throughout-hpage_collapse_scan_file.txt
index f1fb6be8c..ee6c64888 100644
--- a/txt/khugepaged-use-a-folio-throughout-hpage_collapse_scan_file.txt
+++ b/txt/khugepaged-use-a-folio-throughout-hpage_collapse_scan_file.txt
@@ -8,3 +8,4 @@ compound_head() and removes some uses of obsolete functions.
Link: https://lkml.kernel.org/r/20240403171838.1445826-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>