aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/huge_mm.h4
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/khugepaged.c2
-rw-r--r--mm/mempolicy.c15
-rw-r--r--mm/page_alloc.c7
5 files changed, 16 insertions, 16 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ceda26a208306..fa0350b0812ab 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -140,7 +140,7 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
-void prep_transhuge_page(struct page *page);
+void folio_prep_large_rmappable(struct folio *folio);
bool can_split_folio(struct folio *folio, int *pextra_pins);
int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
@@ -280,7 +280,7 @@ static inline bool hugepage_vma_check(struct vm_area_struct *vma,
return false;
}
-static inline void prep_transhuge_page(struct page *page) {}
+static inline void folio_prep_large_rmappable(struct folio *folio) {}
#define transparent_hugepage_flags 0UL
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b33456683b935..5817bf77f1f07 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -577,10 +577,8 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio)
}
#endif
-void prep_transhuge_page(struct page *page)
+void folio_prep_large_rmappable(struct folio *folio)
{
- struct folio *folio = (struct folio *)page;
-
VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
INIT_LIST_HEAD(&folio->_deferred_list);
folio_set_compound_dtor(folio, TRANSHUGE_PAGE_DTOR);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9a6e0d5077593..40d43eccdee86 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -896,7 +896,7 @@ static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
return false;
}
- prep_transhuge_page(*hpage);
+ folio_prep_large_rmappable((struct folio *)*hpage);
count_vm_event(THP_COLLAPSE_ALLOC);
return true;
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ec2eaceffd74b..42b5567e37738 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2195,9 +2195,9 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
mpol_cond_put(pol);
gfp |= __GFP_COMP;
page = alloc_page_interleave(gfp, order, nid);
- if (page && order > 1)
- prep_transhuge_page(page);
folio = (struct folio *)page;
+ if (folio && order > 1)
+ folio_prep_large_rmappable(folio);
goto out;
}
@@ -2208,9 +2208,9 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
gfp |= __GFP_COMP;
page = alloc_pages_preferred_many(gfp, order, node, pol);
mpol_cond_put(pol);
- if (page && order > 1)
- prep_transhuge_page(page);
folio = (struct folio *)page;
+ if (folio && order > 1)
+ folio_prep_large_rmappable(folio);
goto out;
}
@@ -2306,10 +2306,11 @@ EXPORT_SYMBOL(alloc_pages);
struct folio *folio_alloc(gfp_t gfp, unsigned order)
{
struct page *page = alloc_pages(gfp | __GFP_COMP, order);
+ struct folio *folio = (struct folio *)page;
- if (page && order > 1)
- prep_transhuge_page(page);
- return (struct folio *)page;
+ if (folio && order > 1)
+ folio_prep_large_rmappable(folio);
+ return folio;
}
EXPORT_SYMBOL(folio_alloc);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4047b58974430..a97d6fa9cea0b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4489,10 +4489,11 @@ struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
{
struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
preferred_nid, nodemask);
+ struct folio *folio = (struct folio *)page;
- if (page && order > 1)
- prep_transhuge_page(page);
- return (struct folio *)page;
+ if (folio && order > 1)
+ folio_prep_large_rmappable(folio);
+ return folio;
}
EXPORT_SYMBOL(__folio_alloc);