aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuis Chamberlain <mcgrof@kernel.org>2024-04-23 17:43:08 -0700
committerLuis Chamberlain <mcgrof@kernel.org>2024-04-23 19:03:55 -0700
commit9cf528e9795349b84e2be8f2d01b234877894980 (patch)
treec270877b912fad4d2d5586bef725ad091e5b34b9
parent5e45638123b62316ddc511f3f4bf7d3957f13bdc (diff)
downloadlinux-20240423-lbs-works.tar.gz
mm: split a folio in minimum folio order chunks20240423-lbs-works
Set new_order to be at least minimum folio order if it is set in split_huge_page_to_list() so that we can maintain minimum folio order requirement in the page cache. Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
-rw-r--r--include/linux/huge_mm.h12
-rw-r--r--mm/huge_memory.c62
-rw-r--r--mm/page-writeback.c1
3 files changed, 60 insertions, 15 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index de0c891050769..06748a8fa43be 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -87,6 +87,8 @@ extern struct kobj_attribute shmem_enabled_attr;
#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
(!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+#define split_folio(f) split_folio_to_list(f, NULL)
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
@@ -267,9 +269,10 @@ void folio_prep_large_rmappable(struct folio *folio);
bool can_split_folio(struct folio *folio, int *pextra_pins);
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
+int split_folio_to_list(struct folio *folio, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
- return split_huge_page_to_list_to_order(page, NULL, 0);
+ return split_folio(page_folio(page));
}
void deferred_split_folio(struct folio *folio);
@@ -432,6 +435,10 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}
+static inline int split_folio_to_list(struct page *page, struct list_head *list)
+{
+ return 0;
+}
static inline void deferred_split_folio(struct folio *folio) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
@@ -532,9 +539,6 @@ static inline int split_folio_to_order(struct folio *folio, int new_order)
return split_folio_to_list_to_order(folio, NULL, new_order);
}
-#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
-#define split_folio(f) split_folio_to_order(f, 0)
-
/*
* archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
* limitations in the implementation like arm64 MTE can override this to
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e5ec74ca57cd3..0119f05ca3d95 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3035,6 +3035,9 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
* Returns 0 if the hugepage is split successfully.
* Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
* us.
+ *
+ * Callers should ensure that the order respects the address space mapping
+ * min-order if one is set.
*/
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order)
@@ -3055,6 +3058,27 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
if (new_order >= folio_order(folio))
return -EINVAL;
+ if (folio_test_writeback(folio))
+ return -EBUSY;
+
+ if (!folio_test_anon(folio)) {
+ unsigned int min_order;
+
+ /* Truncated ? */
+ if (!folio->mapping) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ min_order = mapping_min_folio_order(folio->mapping);
+ if (new_order < min_order) {
+ VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
+ min_order);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
/* Cannot split anonymous THP to order-1 */
if (new_order == 1 && folio_test_anon(folio)) {
VM_WARN_ONCE(1, "Cannot split to order-1 folio");
@@ -3086,9 +3110,6 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
return -EBUSY;
}
- if (folio_test_writeback(folio))
- return -EBUSY;
-
if (folio_test_anon(folio)) {
/*
* The caller does not necessarily hold an mmap_lock that would
@@ -3111,12 +3132,6 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
mapping = folio->mapping;
- /* Truncated ? */
- if (!mapping) {
- ret = -EBUSY;
- goto out;
- }
-
/*
* Do not split if mapping has minimum folio order
* requirement.
@@ -3198,6 +3213,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
int nr = folio_nr_pages(folio);
xas_split(&xas, folio, folio_order(folio));
+ BUG_ON(folio_test_hugetlb(folio));
if (folio_test_pmd_mappable(folio) &&
new_order < HPAGE_PMD_ORDER) {
if (folio_test_swapbacked(folio)) {
@@ -3236,6 +3252,16 @@ out:
return ret;
}
+int split_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ unsigned int min_order = 0;
+
+ if (!folio_test_anon(folio))
+ min_order = mapping_min_folio_order(folio->mapping);
+
+ return split_huge_page_to_list_to_order(&folio->page, list, min_order);
+}
+
void folio_undo_large_rmappable(struct folio *folio)
{
struct deferred_split *ds_queue;
@@ -3498,6 +3524,15 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
if (new_order >= folio_order(folio))
goto next;
+ if (!folio_test_anon(folio)) {
+ unsigned int min_order = mapping_min_folio_order(folio->mapping);
+ if (min_order > new_order) {
+ pr_debug("cannot split below min_order: %u\n",
+ min_order);
+ goto next;
+ }
+ }
+
total++;
/*
* For folios with private, split_huge_page_to_list_to_order()
@@ -3538,6 +3573,7 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
pgoff_t index, max_idx;
int nr_pages = 1;
unsigned long total = 0, split = 0;
+ unsigned int min_order;
file = getname_kernel(file_path);
if (IS_ERR(file))
@@ -3552,8 +3588,12 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
if (off_end > max_idx)
off_end = max_idx;
- pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
- file_path, off_start, off_end);
+ min_order = mapping_min_folio_order(mapping);
+ if (new_order < min_order)
+ new_order = min_order;
+
+ pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx] with order: %u\n",
+ file_path, off_start, off_end, new_order);
for (index = off_start; index < off_end; index += nr_pages) {
struct folio *folio = filemap_get_folio(mapping, index);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3e19b87049db1..2c554ce039ab3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2385,6 +2385,7 @@ static bool folio_prepare_writeback(struct address_space *mapping,
folio_wait_writeback(folio);
}
BUG_ON(folio_test_writeback(folio));
+ BUG_ON(folio->mapping != mapping);
if (!folio_clear_dirty_for_io(folio))
return false;