aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-03-10 10:34:00 -0500
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-01-04 13:15:33 -0500
commita5d4ad098528f5753340e7df1bf903fa2c03f599 (patch)
tree42d0f8786a169b4e4f4aa80834bb1b697e006d82 /mm/filemap.c
parent9d427b4eb45689c5fca08ef0afe9215e72545df2 (diff)
downloadlinux-a5d4ad098528f5753340e7df1bf903fa2c03f599.tar.gz
filemap: Convert filemap_create_page to folio
This is all internal to filemap and saves 100 bytes of text. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 581f9fdb340662..b044afef78efcb 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2475,47 +2475,48 @@ unlock_mapping:
return error;
}
-static int filemap_create_page(struct file *file,
+static int filemap_create_folio(struct file *file,
struct address_space *mapping, pgoff_t index,
struct pagevec *pvec)
{
- struct page *page;
+ struct folio *folio;
int error;
- page = page_cache_alloc(mapping);
- if (!page)
+ folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
+ if (!folio)
return -ENOMEM;
/*
- * Protect against truncate / hole punch. Grabbing invalidate_lock here
- * assures we cannot instantiate and bring uptodate new pagecache pages
- * after evicting page cache during truncate and before actually
- * freeing blocks. Note that we could release invalidate_lock after
- * inserting the page into page cache as the locked page would then be
- * enough to synchronize with hole punching. But there are code paths
- * such as filemap_update_page() filling in partially uptodate pages or
- * ->readpages() that need to hold invalidate_lock while mapping blocks
- * for IO so let's hold the lock here as well to keep locking rules
- * simple.
+ * Protect against truncate / hole punch. Grabbing invalidate_lock
+ * here assures we cannot instantiate and bring uptodate new
+ * pagecache folios after evicting page cache during truncate
+ * and before actually freeing blocks. Note that we could
+ * release invalidate_lock after inserting the folio into
+ * the page cache as the locked folio would then be enough to
+ * synchronize with hole punching. But there are code paths
+ * such as filemap_update_page() filling in partially uptodate
+ * pages or ->readpages() that need to hold invalidate_lock
+ * while mapping blocks for IO so let's hold the lock here as
+ * well to keep locking rules simple.
*/
filemap_invalidate_lock_shared(mapping);
- error = add_to_page_cache_lru(page, mapping, index,
+ error = filemap_add_folio(mapping, folio, index,
mapping_gfp_constraint(mapping, GFP_KERNEL));
if (error == -EEXIST)
error = AOP_TRUNCATED_PAGE;
if (error)
goto error;
- error = filemap_read_folio(file, mapping, page_folio(page));
+ error = filemap_read_folio(file, mapping, folio);
if (error)
goto error;
filemap_invalidate_unlock_shared(mapping);
- pagevec_add(pvec, page);
+ pagevec_add(pvec, &folio->page);
return 0;
error:
filemap_invalidate_unlock_shared(mapping);
- put_page(page);
+ folio_put(folio);
return error;
}
@@ -2557,7 +2558,7 @@ retry:
if (!pagevec_count(pvec)) {
if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
return -EAGAIN;
- err = filemap_create_page(filp, mapping,
+ err = filemap_create_folio(filp, mapping,
iocb->ki_pos >> PAGE_SHIFT, pvec);
if (err == AOP_TRUNCATED_PAGE)
goto retry;