diff options
author | Luis Chamberlain <mcgrof@kernel.org> | 2024-04-23 17:43:08 -0700 |
---|---|---|
committer | Luis Chamberlain <mcgrof@kernel.org> | 2024-04-23 19:24:53 -0700 |
commit | 5df9e5b00d5e43e2ba445ab1099df2eab8ab4162 (patch) | |
tree | 1047b1a9918af91a797777742de80c9b2e1a2d01 | |
parent | f2593908ff286890e347d5fc433a9750dba25647 (diff) | |
download | linux-20240423-lbs-clean.tar.gz |
mm: split a folio in minimum folio order chunks20240423-lbs-clean
split_folio() and split_folio_to_list() assume order 0, to support
minorder we must expand these to check the folio mapping order and use
that.
Set new_order to be at least minimum folio order if it is set in
split_huge_page_to_list() so that we can maintain minimum folio order
requirement in the page cache.
Update the debugfs write files used for testing to ensure the order
is respected as well.
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
-rw-r--r-- | include/linux/huge_mm.h | 12 | ||||
-rw-r--r-- | mm/huge_memory.c | 40 |
2 files changed, 46 insertions, 6 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index de0c891050769..06748a8fa43be 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -87,6 +87,8 @@ extern struct kobj_attribute shmem_enabled_attr; #define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \ (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order))) +#define split_folio(f) split_folio_to_list(f, NULL) + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define HPAGE_PMD_SHIFT PMD_SHIFT #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) @@ -267,9 +269,10 @@ void folio_prep_large_rmappable(struct folio *folio); bool can_split_folio(struct folio *folio, int *pextra_pins); int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, unsigned int new_order); +int split_folio_to_list(struct folio *folio, struct list_head *list); static inline int split_huge_page(struct page *page) { - return split_huge_page_to_list_to_order(page, NULL, 0); + return split_folio(page_folio(page)); } void deferred_split_folio(struct folio *folio); @@ -432,6 +435,10 @@ static inline int split_huge_page(struct page *page) { return 0; } +static inline int split_folio_to_list(struct page *page, struct list_head *list) +{ + return 0; +} static inline void deferred_split_folio(struct folio *folio) {} #define split_huge_pmd(__vma, __pmd, __address) \ do { } while (0) @@ -532,9 +539,6 @@ static inline int split_folio_to_order(struct folio *folio, int new_order) return split_folio_to_list_to_order(folio, NULL, new_order); } -#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0) -#define split_folio(f) split_folio_to_order(f, 0) - /* * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to * limitations in the implementation like arm64 MTE can override this to diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8d186a5db996f..432ab95828b64 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3035,6 +3035,9 @@ bool can_split_folio(struct folio *folio, int *pextra_pins) * Returns 0 if the hugepage is split successfully. * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under * us. + * + * Callers should ensure that the order respects the address space mapping + * min-order if one is set. */ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, unsigned int new_order) @@ -3059,11 +3062,20 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, return -EBUSY; if (!folio_test_anon(folio)) { + unsigned int min_order; + /* Truncated ? */ if (!folio->mapping) { ret = -EBUSY; goto out; } + min_order = mapping_min_folio_order(folio->mapping); + if (new_order < min_order) { + VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u", + min_order); + ret = -EINVAL; + goto out; + } } else if (new_order == 1) { /* Cannot split anonymous THP to order-1 */ VM_WARN_ONCE(1, "Cannot split to order-1 folio"); @@ -3235,6 +3247,16 @@ out: return ret; } +int split_folio_to_list(struct folio *folio, struct list_head *list) +{ + unsigned int min_order = 0; + + if (!folio_test_anon(folio)) + min_order = mapping_min_folio_order(folio->mapping); + + return split_huge_page_to_list_to_order(&folio->page, list, min_order); +} + void folio_undo_large_rmappable(struct folio *folio) { struct deferred_split *ds_queue; @@ -3497,6 +3519,15 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, if (new_order >= folio_order(folio)) goto next; + if (!folio_test_anon(folio)) { + unsigned int min_order = mapping_min_folio_order(folio->mapping); + if (min_order > new_order) { + pr_debug("cannot split below min_order: %u\n", + min_order); + goto next; + } + } + total++; /* * For folios with private, split_huge_page_to_list_to_order() @@ -3537,6 +3568,7 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, pgoff_t index, max_idx; int nr_pages = 1; unsigned long total = 0, split = 0; + unsigned int min_order; file = getname_kernel(file_path); if (IS_ERR(file)) @@ -3551,8 +3583,12 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, if (off_end > max_idx) off_end = max_idx; - pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", - file_path, off_start, off_end); + min_order = mapping_min_folio_order(mapping); + if (new_order < min_order) + new_order = min_order; + + pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx] with order: %u\n", + file_path, off_start, off_end, new_order); for (index = off_start; index < off_end; index += nr_pages) { struct folio *folio = filemap_get_folio(mapping, index); |