aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-02-27 17:42:43 +0000
committerAndrew Morton <akpm@linux-foundation.org>2024-03-04 17:01:24 -0800
commit31b2ff82aefb33ce92496a1becddd6ce51060db2 (patch)
tree2ba6d731853cab9b0913088f0006bd3a4ba329be /mm
parentf1ee018baee9f4e724e08859c2559323be768be3 (diff)
downloadlinux-31b2ff82aefb33ce92496a1becddd6ce51060db2.tar.gz
mm: handle large folios in free_unref_folios()
Call folio_undo_large_rmappable() if needed. free_unref_page_prepare() destroys the ability to call folio_order(), so stash the order in folio->private for the benefit of the second loop. Link: https://lkml.kernel.org/r/20240227174254.710559-10-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 31d97322feea03..025ad1a7df7bc6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2516,7 +2516,7 @@ void free_unref_page(struct page *page, unsigned int order)
}
/*
- * Free a batch of 0-order pages
+ * Free a batch of folios
*/
void free_unref_folios(struct folio_batch *folios)
{
@@ -2529,19 +2529,25 @@ void free_unref_folios(struct folio_batch *folios)
for (i = 0, j = 0; i < folios->nr; i++) {
struct folio *folio = folios->folios[i];
unsigned long pfn = folio_pfn(folio);
- if (!free_unref_page_prepare(&folio->page, pfn, 0))
+ unsigned int order = folio_order(folio);
+
+ if (order > 0 && folio_test_large_rmappable(folio))
+ folio_undo_large_rmappable(folio);
+ if (!free_unref_page_prepare(&folio->page, pfn, order))
continue;
/*
- * Free isolated folios directly to the allocator, see
- * comment in free_unref_page.
+ * Free isolated folios and orders not handled on the PCP
+ * directly to the allocator, see comment in free_unref_page.
*/
migratetype = get_pcppage_migratetype(&folio->page);
- if (unlikely(is_migrate_isolate(migratetype))) {
+ if (!pcp_allowed_order(order) ||
+ is_migrate_isolate(migratetype)) {
free_one_page(folio_zone(folio), &folio->page, pfn,
- 0, migratetype, FPI_NONE);
+ order, migratetype, FPI_NONE);
continue;
}
+ folio->private = (void *)(unsigned long)order;
if (j != i)
folios->folios[j] = folio;
j++;
@@ -2551,7 +2557,9 @@ void free_unref_folios(struct folio_batch *folios)
for (i = 0; i < folios->nr; i++) {
struct folio *folio = folios->folios[i];
struct zone *zone = folio_zone(folio);
+ unsigned int order = (unsigned long)folio->private;
+ folio->private = NULL;
migratetype = get_pcppage_migratetype(&folio->page);
/* Different zone requires a different pcp lock */
@@ -2570,7 +2578,7 @@ void free_unref_folios(struct folio_batch *folios)
if (unlikely(!pcp)) {
pcp_trylock_finish(UP_flags);
free_one_page(zone, &folio->page,
- folio_pfn(folio), 0,
+ folio_pfn(folio), order,
migratetype, FPI_NONE);
locked_zone = NULL;
continue;
@@ -2586,7 +2594,8 @@ void free_unref_folios(struct folio_batch *folios)
migratetype = MIGRATE_MOVABLE;
trace_mm_page_free_batched(&folio->page);
- free_unref_page_commit(zone, pcp, &folio->page, migratetype, 0);
+ free_unref_page_commit(zone, pcp, &folio->page, migratetype,
+ order);
}
if (pcp) {