aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-24 19:55:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-24 19:55:07 -0700
commitfdaf9a5840acaab18694a19e0eb0aa51162eeeed (patch)
treea027770138bccf9114cc83bafaa57accc13c91a6
parent8642174b52214dde4d8113f28fb4c9be5a432126 (diff)
parent516edb456f121e819d2130571004ed82f9566c4d (diff)
downloadnf-fdaf9a5840acaab18694a19e0eb0aa51162eeeed.tar.gz
Merge tag 'folio-5.19' of git://git.infradead.org/users/willy/pagecache
Pull page cache updates from Matthew Wilcox: - Appoint myself page cache maintainer - Fix how scsicam uses the page cache - Use the memalloc_nofs_save() API to replace AOP_FLAG_NOFS - Remove the AOP flags entirely - Remove pagecache_write_begin() and pagecache_write_end() - Documentation updates - Convert several address_space operations to use folios: - is_dirty_writeback - readpage becomes read_folio - releasepage becomes release_folio - freepage becomes free_folio - Change filler_t to require a struct file pointer be the first argument like ->read_folio * tag 'folio-5.19' of git://git.infradead.org/users/willy/pagecache: (107 commits) nilfs2: Fix some kernel-doc comments Appoint myself page cache maintainer fs: Remove aops->freepage secretmem: Convert to free_folio nfs: Convert to free_folio orangefs: Convert to free_folio fs: Add free_folio address space operation fs: Convert drop_buffers() to use a folio fs: Change try_to_free_buffers() to take a folio jbd2: Convert release_buffer_page() to use a folio jbd2: Convert jbd2_journal_try_to_free_buffers to take a folio reiserfs: Convert release_buffer_page() to use a folio fs: Remove last vestiges of releasepage ubifs: Convert to release_folio reiserfs: Convert to release_folio orangefs: Convert to release_folio ocfs2: Convert to release_folio nilfs2: Remove comment about releasepage nfs: Convert to release_folio jfs: Convert to release_folio ...
-rw-r--r--Documentation/filesystems/caching/netfs-api.rst4
-rw-r--r--Documentation/filesystems/fscrypt.rst2
-rw-r--r--Documentation/filesystems/fsverity.rst2
-rw-r--r--Documentation/filesystems/locking.rst36
-rw-r--r--Documentation/filesystems/netfs_library.rst9
-rw-r--r--Documentation/filesystems/porting.rst2
-rw-r--r--Documentation/filesystems/vfs.rst86
-rw-r--r--MAINTAINERS13
-rw-r--r--block/fops.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c23
-rw-r--r--drivers/scsi/scsicam.c11
-rw-r--r--fs/9p/vfs_addr.c23
-rw-r--r--fs/adfs/inode.c10
-rw-r--r--fs/affs/file.c21
-rw-r--r--fs/affs/symlink.c5
-rw-r--r--fs/afs/dir.c7
-rw-r--r--fs/afs/file.c28
-rw-r--r--fs/afs/internal.h4
-rw-r--r--fs/afs/write.c4
-rw-r--r--fs/befs/linuxvfs.c17
-rw-r--r--fs/bfs/file.c11
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/btrfs/extent_io.c17
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/file.c9
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/inode.c28
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/relocation.c13
-rw-r--r--fs/btrfs/send.c6
-rw-r--r--fs/buffer.c214
-rw-r--r--fs/ceph/addr.c32
-rw-r--r--fs/cifs/file.c31
-rw-r--r--fs/coda/symlink.c7
-rw-r--r--fs/cramfs/README8
-rw-r--r--fs/cramfs/inode.c7
-rw-r--r--fs/ecryptfs/mmap.c15
-rw-r--r--fs/efs/inode.c8
-rw-r--r--fs/efs/symlink.c5
-rw-r--r--fs/erofs/data.c6
-rw-r--r--fs/erofs/fscache.c16
-rw-r--r--fs/erofs/super.c16
-rw-r--r--fs/erofs/zdata.c7
-rw-r--r--fs/exfat/inode.c10
-rw-r--r--fs/ext2/inode.c19
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/inline.c41
-rw-r--r--fs/ext4/inode.c48
-rw-r--r--fs/ext4/move_extent.c17
-rw-r--r--fs/ext4/readpage.c4
-rw-r--r--fs/ext4/verity.c9
-rw-r--r--fs/f2fs/checkpoint.c2
-rw-r--r--fs/f2fs/compress.c2
-rw-r--r--fs/f2fs/data.c42
-rw-r--r--fs/f2fs/f2fs.h11
-rw-r--r--fs/f2fs/node.c2
-rw-r--r--fs/f2fs/super.c2
-rw-r--r--fs/f2fs/verity.c9
-rw-r--r--fs/fat/inode.c10
-rw-r--r--fs/freevxfs/vxfs_immed.c15
-rw-r--r--fs/freevxfs/vxfs_subr.c17
-rw-r--r--fs/fuse/dir.c10
-rw-r--r--fs/fuse/file.c12
-rw-r--r--fs/gfs2/aops.c81
-rw-r--r--fs/gfs2/inode.h2
-rw-r--r--fs/gfs2/meta_io.c4
-rw-r--r--fs/hfs/extent.c6
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/inode.c38
-rw-r--r--fs/hfsplus/extents.c8
-rw-r--r--fs/hfsplus/hfsplus_fs.h2
-rw-r--r--fs/hfsplus/inode.c38
-rw-r--r--fs/hostfs/hostfs_kern.c9
-rw-r--r--fs/hpfs/file.c10
-rw-r--r--fs/hpfs/namei.c5
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/iomap/buffered-io.c38
-rw-r--r--fs/iomap/trace.h2
-rw-r--r--fs/isofs/compress.c5
-rw-r--r--fs/isofs/inode.c6
-rw-r--r--fs/isofs/rock.c7
-rw-r--r--fs/jbd2/commit.c14
-rw-r--r--fs/jbd2/transaction.c14
-rw-r--r--fs/jffs2/file.c23
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/gc.c2
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jfs/inode.c11
-rw-r--r--fs/jfs/jfs_metapage.c21
-rw-r--r--fs/libfs.c18
-rw-r--r--fs/minix/inode.c11
-rw-r--r--fs/mpage.c20
-rw-r--r--fs/namei.c28
-rw-r--r--fs/netfs/buffered_read.c25
-rw-r--r--fs/nfs/dir.c9
-rw-r--r--fs/nfs/file.c51
-rw-r--r--fs/nfs/fscache.h14
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/symlink.c16
-rw-r--r--fs/nilfs2/inode.c27
-rw-r--r--fs/nilfs2/recovery.c2
-rw-r--r--fs/ntfs/aops.c40
-rw-r--r--fs/ntfs/aops.h6
-rw-r--r--fs/ntfs/attrib.c2
-rw-r--r--fs/ntfs/compress.c4
-rw-r--r--fs/ntfs/file.c4
-rw-r--r--fs/ntfs/inode.c4
-rw-r--r--fs/ntfs/mft.h2
-rw-r--r--fs/ntfs3/file.c7
-rw-r--r--fs/ntfs3/inode.c27
-rw-r--r--fs/ntfs3/ntfs_fs.h5
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c23
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/refcounttree.c6
-rw-r--r--fs/ocfs2/symlink.c5
-rw-r--r--fs/omfs/file.c11
-rw-r--r--fs/orangefs/inode.c52
-rw-r--r--fs/qnx4/inode.c7
-rw-r--r--fs/qnx6/inode.c6
-rw-r--r--fs/reiserfs/file.c2
-rw-r--r--fs/reiserfs/inode.c36
-rw-r--r--fs/reiserfs/journal.c14
-rw-r--r--fs/romfs/super.c9
-rw-r--r--fs/squashfs/file.c5
-rw-r--r--fs/squashfs/super.c2
-rw-r--r--fs/squashfs/symlink.c5
-rw-r--r--fs/sysv/itree.c10
-rw-r--r--fs/ubifs/file.c41
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--fs/udf/file.c14
-rw-r--r--fs/udf/inode.c10
-rw-r--r--fs/udf/symlink.c5
-rw-r--r--fs/ufs/inode.c13
-rw-r--r--fs/vboxsf/file.c5
-rw-r--r--fs/verity/enable.c29
-rw-r--r--fs/xfs/xfs_aops.c10
-rw-r--r--fs/zonefs/super.c8
-rw-r--r--include/linux/buffer_head.h14
-rw-r--r--include/linux/fs.h32
-rw-r--r--include/linux/iomap.h4
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/mpage.h2
-rw-r--r--include/linux/netfs.h4
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/pagemap.h78
-rw-r--r--include/trace/events/ext4.h21
-rw-r--r--include/trace/events/f2fs.h12
-rw-r--r--kernel/events/uprobes.c7
-rw-r--r--mm/filemap.c99
-rw-r--r--mm/folio-compat.c4
-rw-r--r--mm/memory.c4
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page-writeback.c10
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/readahead.c37
-rw-r--r--mm/secretmem.c8
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/vmscan.c12
161 files changed, 1233 insertions, 1221 deletions
diff --git a/Documentation/filesystems/caching/netfs-api.rst b/Documentation/filesystems/caching/netfs-api.rst
index 7308d76a29dc78..1d18e9def183ee 100644
--- a/Documentation/filesystems/caching/netfs-api.rst
+++ b/Documentation/filesystems/caching/netfs-api.rst
@@ -433,11 +433,11 @@ has done a write and then the page it wrote from has been released by the VM,
after which it *has* to look in the cache.
To inform fscache that a page might now be in the cache, the following function
-should be called from the ``releasepage`` address space op::
+should be called from the ``release_folio`` address space op::
void fscache_note_page_release(struct fscache_cookie *cookie);
-if the page has been released (ie. releasepage returned true).
+if the page has been released (ie. release_folio returned true).
Page release and page invalidation should also wait for any mark left on the
page to say that a DIO write is underway from that page::
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 6ccd5efb25b779..2e9aaa295125a2 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -1256,7 +1256,7 @@ inline encryption hardware will encrypt/decrypt the file contents.
When inline encryption isn't used, filesystems must encrypt/decrypt
the file contents themselves, as described below:
-For the read path (->readpage()) of regular files, filesystems can
+For the read path (->read_folio()) of regular files, filesystems can
read the ciphertext into the page cache and decrypt it in-place. The
page lock must be held until decryption has finished, to prevent the
page from becoming visible to userspace prematurely.
diff --git a/Documentation/filesystems/fsverity.rst b/Documentation/filesystems/fsverity.rst
index b7d42fd65e9d00..756f2c215ba135 100644
--- a/Documentation/filesystems/fsverity.rst
+++ b/Documentation/filesystems/fsverity.rst
@@ -559,7 +559,7 @@ already verified). Below, we describe how filesystems implement this.
Pagecache
~~~~~~~~~
-For filesystems using Linux's pagecache, the ``->readpage()`` and
+For filesystems using Linux's pagecache, the ``->read_folio()`` and
``->readahead()`` methods must be modified to verify pages before they
are marked Uptodate. Merely hooking ``->read_iter()`` would be
insufficient, since ``->read_iter()`` is not used for memory maps.
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index c26d854275a0ec..515bc48ab58b45 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -237,20 +237,20 @@ address_space_operations
prototypes::
int (*writepage)(struct page *page, struct writeback_control *wbc);
- int (*readpage)(struct file *, struct page *);
+ int (*read_folio)(struct file *, struct folio *);
int (*writepages)(struct address_space *, struct writeback_control *);
bool (*dirty_folio)(struct address_space *, struct folio *folio);
void (*readahead)(struct readahead_control *);
int (*write_begin)(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
- int (*releasepage) (struct page *, int);
- void (*freepage)(struct page *);
+ bool (*release_folio)(struct folio *, gfp_t);
+ void (*free_folio)(struct folio *);
int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
bool (*isolate_page) (struct page *, isolate_mode_t);
int (*migratepage)(struct address_space *, struct page *, struct page *);
@@ -262,22 +262,22 @@ prototypes::
int (*swap_deactivate)(struct file *);
locking rules:
- All except dirty_folio and freepage may block
+ All except dirty_folio and free_folio may block
====================== ======================== ========= ===============
-ops PageLocked(page) i_rwsem invalidate_lock
+ops folio locked i_rwsem invalidate_lock
====================== ======================== ========= ===============
writepage: yes, unlocks (see below)
-readpage: yes, unlocks shared
+read_folio: yes, unlocks shared
writepages:
-dirty_folio maybe
+dirty_folio: maybe
readahead: yes, unlocks shared
write_begin: locks the page exclusive
write_end: yes, unlocks exclusive
bmap:
invalidate_folio: yes exclusive
-releasepage: yes
-freepage: yes
+release_folio: yes
+free_folio: yes
direct_IO:
isolate_page: yes
migratepage: yes (both)
@@ -289,13 +289,13 @@ swap_activate: no
swap_deactivate: no
====================== ======================== ========= ===============
-->write_begin(), ->write_end() and ->readpage() may be called from
+->write_begin(), ->write_end() and ->read_folio() may be called from
the request handler (/dev/loop).
-->readpage() unlocks the page, either synchronously or via I/O
+->read_folio() unlocks the folio, either synchronously or via I/O
completion.
-->readahead() unlocks the pages that I/O is attempted on like ->readpage().
+->readahead() unlocks the folios that I/O is attempted on like ->read_folio().
->writepage() is used for two purposes: for "memory cleansing" and for
"sync". These are quite different operations and the behaviour may differ
@@ -372,12 +372,12 @@ invalidate_lock before invalidating page cache in truncate / hole punch
path (and thus calling into ->invalidate_folio) to block races between page
cache invalidation and page cache filling functions (fault, read, ...).
-->releasepage() is called when the kernel is about to try to drop the
-buffers from the page in preparation for freeing it. It returns zero to
-indicate that the buffers are (or may be) freeable. If ->releasepage is zero,
-the kernel assumes that the fs has no private interest in the buffers.
+->release_folio() is called when the kernel is about to try to drop the
+buffers from the folio in preparation for freeing it. It returns false to
+indicate that the buffers are (or may be) freeable. If ->release_folio is
+NULL, the kernel assumes that the fs has no private interest in the buffers.
-->freepage() is called when the kernel is done dropping the page
+->free_folio() is called when the kernel has dropped the folio
from the page cache.
->launder_folio() may be called prior to releasing a folio if
diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst
index 69f00179fdfeb6..a80a59941d2fe1 100644
--- a/Documentation/filesystems/netfs_library.rst
+++ b/Documentation/filesystems/netfs_library.rst
@@ -96,7 +96,7 @@ attached to an inode (or NULL if fscache is disabled)::
Buffered Read Helpers
=====================
-The library provides a set of read helpers that handle the ->readpage(),
+The library provides a set of read helpers that handle the ->read_folio(),
->readahead() and much of the ->write_begin() VM operations and translate them
into a common call framework.
@@ -136,20 +136,19 @@ Read Helper Functions
Three read helpers are provided::
void netfs_readahead(struct readahead_control *ractl);
- int netfs_readpage(struct file *file,
- struct page *page);
+ int netfs_read_folio(struct file *file,
+ struct folio *folio);
int netfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos,
unsigned int len,
- unsigned int flags,
struct folio **_folio,
void **_fsdata);
Each corresponds to a VM address space operation. These operations use the
state in the per-inode context.
-For ->readahead() and ->readpage(), the network filesystem just point directly
+For ->readahead() and ->read_folio(), the network filesystem just point directly
at the corresponding read helper; whereas for ->write_begin(), it may be a
little more complicated as the network filesystem might want to flush
conflicting writes or track dirty data and needs to put the acquired folio if
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 7c1583dbeb59be..2e0e4f0e0c6fb7 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -624,7 +624,7 @@ any symlink that might use page_follow_link_light/page_put_link() must
have inode_nohighmem(inode) called before anything might start playing with
its pagecache. No highmem pages should end up in the pagecache of such
symlinks. That includes any preseeding that might be done during symlink
-creation. __page_symlink() will honour the mapping gfp flags, so once
+creation. page_symlink() will honour the mapping gfp flags, so once
you've done inode_nohighmem() it's safe to use, but if you allocate and
insert the page manually, make sure to use the right gfp flags.
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index 794bd1a66bfbdf..12a011d2cbc6b3 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -620,9 +620,9 @@ Writeback.
The first can be used independently to the others. The VM can try to
either write dirty pages in order to clean them, or release clean pages
in order to reuse them. To do this it can call the ->writepage method
-on dirty pages, and ->releasepage on clean pages with PagePrivate set.
-Clean pages without PagePrivate and with no external references will be
-released without notice being given to the address_space.
+on dirty pages, and ->release_folio on clean folios with the private
+flag set. Clean pages without PagePrivate and with no external references
+will be released without notice being given to the address_space.
To achieve this functionality, pages need to be placed on an LRU with
lru_cache_add and mark_page_active needs to be called whenever the page
@@ -656,7 +656,7 @@ by memory-mapping the page. Data is written into the address space by
the application, and then written-back to storage typically in whole
pages, however the address_space has finer control of write sizes.
-The read process essentially only requires 'readpage'. The write
+The read process essentially only requires 'read_folio'. The write
process is more complicated and uses write_begin/write_end or
dirty_folio to write data into the address_space, and writepage and
writepages to writeback data to storage.
@@ -722,20 +722,20 @@ cache in your filesystem. The following members are defined:
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
- int (*readpage)(struct file *, struct page *);
+ int (*read_folio)(struct file *, struct folio *);
int (*writepages)(struct address_space *, struct writeback_control *);
bool (*dirty_folio)(struct address_space *, struct folio *);
void (*readahead)(struct readahead_control *);
int (*write_begin)(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
- int (*releasepage) (struct page *, int);
- void (*freepage)(struct page *);
+ bool (*release_folio)(struct folio *, gfp_t);
+ void (*free_folio)(struct folio *);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/* isolate a page for migration */
bool (*isolate_page) (struct page *, isolate_mode_t);
@@ -747,7 +747,7 @@ cache in your filesystem. The following members are defined:
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
- void (*is_dirty_writeback) (struct page *, bool *, bool *);
+ void (*is_dirty_writeback)(struct folio *, bool *, bool *);
int (*error_remove_page) (struct mapping *mapping, struct page *page);
int (*swap_activate)(struct file *);
int (*swap_deactivate)(struct file *);
@@ -772,14 +772,14 @@ cache in your filesystem. The following members are defined:
See the file "Locking" for more details.
-``readpage``
- called by the VM to read a page from backing store. The page
- will be Locked when readpage is called, and should be unlocked
- and marked uptodate once the read completes. If ->readpage
- discovers that it needs to unlock the page for some reason, it
- can do so, and then return AOP_TRUNCATED_PAGE. In this case,
- the page will be relocated, relocked and if that all succeeds,
- ->readpage will be called again.
+``read_folio``
+ called by the VM to read a folio from backing store. The folio
+ will be locked when read_folio is called, and should be unlocked
+ and marked uptodate once the read completes. If ->read_folio
+ discovers that it cannot perform the I/O at this time, it can
+ unlock the folio and return AOP_TRUNCATED_PAGE. In this case,
+ the folio will be looked up again, relocked and if that all succeeds,
+ ->read_folio will be called again.
``writepages``
called by the VM to write out pages associated with the
@@ -832,9 +832,6 @@ cache in your filesystem. The following members are defined:
passed to write_begin is greater than the number of bytes copied
into the page).
- flags is a field for AOP_FLAG_xxx flags, described in
- include/linux/fs.h.
-
A void * may be returned in fsdata, which then gets passed into
write_end.
@@ -867,36 +864,35 @@ cache in your filesystem. The following members are defined:
address space. This generally corresponds to either a
truncation, punch hole or a complete invalidation of the address
space (in the latter case 'offset' will always be 0 and 'length'
- will be folio_size()). Any private data associated with the page
+ will be folio_size()). Any private data associated with the folio
should be updated to reflect this truncation. If offset is 0
and length is folio_size(), then the private data should be
- released, because the page must be able to be completely
- discarded. This may be done by calling the ->releasepage
+ released, because the folio must be able to be completely
+ discarded. This may be done by calling the ->release_folio
function, but in this case the release MUST succeed.
-``releasepage``
- releasepage is called on PagePrivate pages to indicate that the
- page should be freed if possible. ->releasepage should remove
- any private data from the page and clear the PagePrivate flag.
- If releasepage() fails for some reason, it must indicate failure
- with a 0 return value. releasepage() is used in two distinct
- though related cases. The first is when the VM finds a clean
- page with no active users and wants to make it a free page. If
- ->releasepage succeeds, the page will be removed from the
- address_space and become free.
+``release_folio``
+ release_folio is called on folios with private data to tell the
+ filesystem that the folio is about to be freed. ->release_folio
+ should remove any private data from the folio and clear the
+ private flag. If release_folio() fails, it should return false.
+ release_folio() is used in two distinct though related cases.
+ The first is when the VM wants to free a clean folio with no
+ active users. If ->release_folio succeeds, the folio will be
+ removed from the address_space and be freed.
The second case is when a request has been made to invalidate
- some or all pages in an address_space. This can happen through
- the fadvise(POSIX_FADV_DONTNEED) system call or by the
- filesystem explicitly requesting it as nfs and 9fs do (when they
+ some or all folios in an address_space. This can happen
+ through the fadvise(POSIX_FADV_DONTNEED) system call or by the
+ filesystem explicitly requesting it as nfs and 9p do (when they
believe the cache may be out of date with storage) by calling
invalidate_inode_pages2(). If the filesystem makes such a call,
- and needs to be certain that all pages are invalidated, then its
- releasepage will need to ensure this. Possibly it can clear the
- PageUptodate bit if it cannot free private data yet.
+ and needs to be certain that all folios are invalidated, then
+ its release_folio will need to ensure this. Possibly it can
+ clear the uptodate flag if it cannot free private data yet.
-``freepage``
- freepage is called once the page is no longer visible in the
+``free_folio``
+ free_folio is called once the folio is no longer visible in the
page cache in order to allow the cleanup of any private data.
Since it may be called by the memory reclaimer, it should not
assume that the original address_space mapping still exists, and
@@ -935,14 +931,14 @@ cache in your filesystem. The following members are defined:
without needing I/O to bring the whole page up to date.
``is_dirty_writeback``
- Called by the VM when attempting to reclaim a page. The VM uses
+ Called by the VM when attempting to reclaim a folio. The VM uses
dirty and writeback information to determine if it needs to
stall to allow flushers a chance to complete some IO.
- Ordinarily it can use PageDirty and PageWriteback but some
- filesystems have more complex state (unstable pages in NFS
+ Ordinarily it can use folio_test_dirty and folio_test_writeback but
+ some filesystems have more complex state (unstable folios in NFS
prevent reclaim) or do not set those flags due to locking
problems. This callback allows a filesystem to indicate to the
- VM if a page should be treated as dirty or writeback for the
+ VM if a folio should be treated as dirty or writeback for the
purposes of stalling.
``error_remove_page``
diff --git a/MAINTAINERS b/MAINTAINERS
index c89e5f126c4f55..6618e9b91b6c39 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -14878,6 +14878,19 @@ F: Documentation/core-api/padata.rst
F: include/linux/padata.h
F: kernel/padata.c
+PAGE CACHE
+M: Matthew Wilcox (Oracle) <willy@infradead.org>
+L: linux-fsdevel@vger.kernel.org
+S: Supported
+T: git git://git.infradead.org/users/willy/pagecache.git
+F: Documentation/filesystems/locking.rst
+F: Documentation/filesystems/vfs.rst
+F: include/linux/pagemap.h
+F: mm/filemap.c
+F: mm/page-writeback.c
+F: mm/readahead.c
+F: mm/truncate.c
+
PAGE POOL
M: Jesper Dangaard Brouer <hawk@kernel.org>
M: Ilias Apalodimas <ilias.apalodimas@linaro.org>
diff --git a/block/fops.c b/block/fops.c
index b9b83030e0dfa4..d6b3276a6c6808 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -372,9 +372,9 @@ static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, blkdev_get_block, wbc);
}
-static int blkdev_readpage(struct file * file, struct page * page)
+static int blkdev_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, blkdev_get_block);
+ return block_read_full_folio(folio, blkdev_get_block);
}
static void blkdev_readahead(struct readahead_control *rac)
@@ -383,11 +383,9 @@ static void blkdev_readahead(struct readahead_control *rac)
}
static int blkdev_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags, struct page **pagep,
- void **fsdata)
+ loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
- return block_write_begin(mapping, pos, len, flags, pagep,
- blkdev_get_block);
+ return block_write_begin(mapping, pos, len, pagep, blkdev_get_block);
}
static int blkdev_write_end(struct file *file, struct address_space *mapping,
@@ -412,7 +410,7 @@ static int blkdev_writepages(struct address_space *mapping,
const struct address_space_operations def_blk_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = blkdev_readpage,
+ .read_folio = blkdev_read_folio,
.readahead = blkdev_readahead,
.writepage = blkdev_writepage,
.write_begin = blkdev_write_begin,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 3a1c782ed791af..e92cc9d7257c53 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -408,6 +408,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg)
{
struct address_space *mapping = obj->base.filp->f_mapping;
+ const struct address_space_operations *aops = mapping->a_ops;
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
u64 remain, offset;
unsigned int pg;
@@ -465,9 +466,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (err)
return err;
- err = pagecache_write_begin(obj->base.filp, mapping,
- offset, len, 0,
- &page, &data);
+ err = aops->write_begin(obj->base.filp, mapping, offset, len,
+ &page, &data);
if (err < 0)
return err;
@@ -477,9 +477,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
len);
kunmap_atomic(vaddr);
- err = pagecache_write_end(obj->base.filp, mapping,
- offset, len, len - unwritten,
- page, data);
+ err = aops->write_end(obj->base.filp, mapping, offset, len,
+ len - unwritten, page, data);
if (err < 0)
return err;
@@ -622,6 +621,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
{
struct drm_i915_gem_object *obj;
struct file *file;
+ const struct address_space_operations *aops;
resource_size_t offset;
int err;
@@ -633,15 +633,15 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
file = obj->base.filp;
+ aops = file->f_mapping->a_ops;
offset = 0;
do {
unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
struct page *page;
void *pgdata, *vaddr;
- err = pagecache_write_begin(file, file->f_mapping,
- offset, len, 0,
- &page, &pgdata);
+ err = aops->write_begin(file, file->f_mapping, offset, len,
+ &page, &pgdata);
if (err < 0)
goto fail;
@@ -649,9 +649,8 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
memcpy(vaddr, data, len);
kunmap(page);
- err = pagecache_write_end(file, file->f_mapping,
- offset, len, len,
- page, pgdata);
+ err = aops->write_end(file, file->f_mapping, offset, len, len,
+ page, pgdata);
if (err < 0)
goto fail;
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index acdc0aceca5ef9..e2c7d8ef205fcc 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -34,15 +34,14 @@ unsigned char *scsi_bios_ptable(struct block_device *dev)
{
struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping;
unsigned char *res = NULL;
- struct page *page;
+ struct folio *folio;
- page = read_mapping_page(mapping, 0, NULL);
- if (IS_ERR(page))
+ folio = read_mapping_folio(mapping, 0, NULL);
+ if (IS_ERR(folio))
return NULL;
- if (!PageError(page))
- res = kmemdup(page_address(page) + 0x1be, 66, GFP_KERNEL);
- put_page(page);
+ res = kmemdup(folio_address(folio) + 0x1be, 66, GFP_KERNEL);
+ folio_put(folio);
return res;
}
EXPORT_SYMBOL(scsi_bios_ptable);
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 5011281883432d..8ce82ff1e40afd 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -100,29 +100,28 @@ const struct netfs_request_ops v9fs_req_ops = {
};
/**
- * v9fs_release_page - release the private state associated with a page
- * @page: The page to be released
+ * v9fs_release_folio - release the private state associated with a folio
+ * @folio: The folio to be released
* @gfp: The caller's allocation restrictions
*
- * Returns 1 if the page can be released, false otherwise.
+ * Returns true if the page can be released, false otherwise.
*/
-static int v9fs_release_page(struct page *page, gfp_t gfp)
+static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
{
- struct folio *folio = page_folio(page);
struct inode *inode = folio_inode(folio);
if (folio_test_private(folio))
- return 0;
+ return false;
#ifdef CONFIG_9P_FSCACHE
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
- return 0;
+ return false;
folio_wait_fscache(folio);
}
#endif
fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
- return 1;
+ return true;
}
static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
@@ -260,7 +259,7 @@ v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int flags,
+ loff_t pos, unsigned int len,
struct page **subpagep, void **fsdata)
{
int retval;
@@ -275,7 +274,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
* file. We need to do this before we get a lock on the page in case
* there's more than one writer competing for the same cache block.
*/
- retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata);
+ retval = netfs_write_begin(filp, mapping, pos, len, &folio, fsdata);
if (retval < 0)
return retval;
@@ -336,13 +335,13 @@ static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
#endif
const struct address_space_operations v9fs_addr_operations = {
- .readpage = netfs_readpage,
+ .read_folio = netfs_read_folio,
.readahead = netfs_readahead,
.dirty_folio = v9fs_dirty_folio,
.writepage = v9fs_vfs_writepage,
.write_begin = v9fs_write_begin,
.write_end = v9fs_write_end,
- .releasepage = v9fs_release_page,
+ .release_folio = v9fs_release_folio,
.invalidate_folio = v9fs_invalidate_folio,
.launder_folio = v9fs_launder_folio,
.direct_IO = v9fs_direct_IO,
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 561bc748c04a0f..ee22278b0cfc02 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -38,9 +38,9 @@ static int adfs_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, adfs_get_block, wbc);
}
-static int adfs_readpage(struct file *file, struct page *page)
+static int adfs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, adfs_get_block);
+ return block_read_full_folio(folio, adfs_get_block);
}
static void adfs_write_failed(struct address_space *mapping, loff_t to)
@@ -52,13 +52,13 @@ static void adfs_write_failed(struct address_space *mapping, loff_t to)
}
static int adfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
adfs_get_block,
&ADFS_I(mapping->host)->mmu_private);
if (unlikely(ret))
@@ -75,7 +75,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations adfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = adfs_readpage,
+ .read_folio = adfs_read_folio,
.writepage = adfs_writepage,
.write_begin = adfs_write_begin,
.write_end = generic_write_end,
diff --git a/fs/affs/file.c b/fs/affs/file.c
index b3f81d84ff4cf7..cd00a4c68a121f 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -375,9 +375,9 @@ static int affs_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, affs_get_block, wbc);
}
-static int affs_readpage(struct file *file, struct page *page)
+static int affs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, affs_get_block);
+ return block_read_full_folio(folio, affs_get_block);
}
static void affs_write_failed(struct address_space *mapping, loff_t to)
@@ -414,13 +414,13 @@ affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
static int affs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
affs_get_block,
&AFFS_I(mapping->host)->mmu_private);
if (unlikely(ret))
@@ -455,7 +455,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations affs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = affs_readpage,
+ .read_folio = affs_read_folio,
.writepage = affs_writepage,
.write_begin = affs_write_begin,
.write_end = affs_write_end,
@@ -629,8 +629,9 @@ out:
}
static int
-affs_readpage_ofs(struct file *file, struct page *page)
+affs_read_folio_ofs(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
u32 to;
int err;
@@ -650,7 +651,7 @@ affs_readpage_ofs(struct file *file, struct page *page)
}
static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
@@ -670,7 +671,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
}
index = pos >> PAGE_SHIFT;
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
*pagep = page;
@@ -837,7 +838,7 @@ err_bh:
const struct address_space_operations affs_aops_ofs = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = affs_readpage_ofs,
+ .read_folio = affs_read_folio_ofs,
//.writepage = affs_writepage_ofs,
.write_begin = affs_write_begin_ofs,
.write_end = affs_write_end_ofs
@@ -887,7 +888,7 @@ affs_truncate(struct inode *inode)
loff_t isize = inode->i_size;
int res;
- res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, 0, &page, &fsdata);
+ res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &page, &fsdata);
if (!res)
res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata);
else
diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c
index a7531b26e8f029..31d6446dc16689 100644
--- a/fs/affs/symlink.c
+++ b/fs/affs/symlink.c
@@ -11,8 +11,9 @@
#include "affs.h"
-static int affs_symlink_readpage(struct file *file, struct page *page)
+static int affs_symlink_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct buffer_head *bh;
struct inode *inode = page->mapping->host;
char *link = page_address(page);
@@ -67,7 +68,7 @@ fail:
}
const struct address_space_operations affs_symlink_aops = {
- .readpage = affs_symlink_readpage,
+ .read_folio = affs_symlink_read_folio,
};
const struct inode_operations affs_symlink_inode_operations = {
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 932e61e28e5d9f..94aa7356248e2e 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -41,7 +41,7 @@ static int afs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags);
-static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags);
+static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags);
static void afs_dir_invalidate_folio(struct folio *folio, size_t offset,
size_t length);
@@ -75,7 +75,7 @@ const struct inode_operations afs_dir_inode_operations = {
const struct address_space_operations afs_dir_aops = {
.dirty_folio = afs_dir_dirty_folio,
- .releasepage = afs_dir_releasepage,
+ .release_folio = afs_dir_release_folio,
.invalidate_folio = afs_dir_invalidate_folio,
};
@@ -2002,9 +2002,8 @@ error:
* Release a directory folio and clean up its private state if it's not busy
* - return true if the folio can now be released, false if not
*/
-static int afs_dir_releasepage(struct page *subpage, gfp_t gfp_flags)
+static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags)
{
- struct folio *folio = page_folio(subpage);
struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
_enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio_index(folio));
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 26292a110a8f9a..a8e8832179e4e5 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -19,10 +19,10 @@
#include "internal.h"
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
-static int afs_symlink_readpage(struct file *file, struct page *page);
+static int afs_symlink_read_folio(struct file *file, struct folio *folio);
static void afs_invalidate_folio(struct folio *folio, size_t offset,
size_t length);
-static int afs_releasepage(struct page *page, gfp_t gfp_flags);
+static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags);
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
static void afs_vm_open(struct vm_area_struct *area);
@@ -50,11 +50,11 @@ const struct inode_operations afs_file_inode_operations = {
};
const struct address_space_operations afs_file_aops = {
- .readpage = netfs_readpage,
+ .read_folio = netfs_read_folio,
.readahead = netfs_readahead,
.dirty_folio = afs_dirty_folio,
.launder_folio = afs_launder_folio,
- .releasepage = afs_releasepage,
+ .release_folio = afs_release_folio,
.invalidate_folio = afs_invalidate_folio,
.write_begin = afs_write_begin,
.write_end = afs_write_end,
@@ -63,8 +63,8 @@ const struct address_space_operations afs_file_aops = {
};
const struct address_space_operations afs_symlink_aops = {
- .readpage = afs_symlink_readpage,
- .releasepage = afs_releasepage,
+ .read_folio = afs_symlink_read_folio,
+ .release_folio = afs_release_folio,
.invalidate_folio = afs_invalidate_folio,
};
@@ -332,11 +332,10 @@ static void afs_issue_read(struct netfs_io_subrequest *subreq)
afs_put_read(fsreq);
}
-static int afs_symlink_readpage(struct file *file, struct page *page)
+static int afs_symlink_read_folio(struct file *file, struct folio *folio)
{
- struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+ struct afs_vnode *vnode = AFS_FS_I(folio->mapping->host);
struct afs_read *fsreq;
- struct folio *folio = page_folio(page);
int ret;
fsreq = afs_alloc_read(GFP_NOFS);
@@ -347,13 +346,13 @@ static int afs_symlink_readpage(struct file *file, struct page *page)
fsreq->len = folio_size(folio);
fsreq->vnode = vnode;
fsreq->iter = &fsreq->def_iter;
- iov_iter_xarray(&fsreq->def_iter, READ, &page->mapping->i_pages,
+ iov_iter_xarray(&fsreq->def_iter, READ, &folio->mapping->i_pages,
fsreq->pos, fsreq->len);
ret = afs_fetch_data(fsreq->vnode, fsreq);
if (ret == 0)
- SetPageUptodate(page);
- unlock_page(page);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
return ret;
}
@@ -482,16 +481,15 @@ static void afs_invalidate_folio(struct folio *folio, size_t offset,
* release a page and clean up its private state if it's not busy
* - return true if the page can now be released, false if not
*/
-static int afs_releasepage(struct page *page, gfp_t gfp)
+static bool afs_release_folio(struct folio *folio, gfp_t gfp)
{
- struct folio *folio = page_folio(page);
struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
_enter("{{%llx:%llu}[%lu],%lx},%x",
vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
gfp);
- /* deny if page is being written to the cache and the caller hasn't
+ /* deny if folio is being written to the cache and the caller hasn't
* elected to wait */
#ifdef CONFIG_AFS_FSCACHE
if (folio_test_fscache(folio)) {
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 7b7ef945dc78e4..a30995901266fd 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -311,7 +311,7 @@ struct afs_net {
atomic_t n_lookup; /* Number of lookups done */
atomic_t n_reval; /* Number of dentries needing revalidation */
atomic_t n_inval; /* Number of invalidations by the server */
- atomic_t n_relpg; /* Number of invalidations by releasepage */
+ atomic_t n_relpg; /* Number of invalidations by release_folio */
atomic_t n_read_dir; /* Number of directory pages read */
atomic_t n_dir_cr; /* Number of directory entry creation edits */
atomic_t n_dir_rm; /* Number of directory entry removal edits */
@@ -1535,7 +1535,7 @@ bool afs_dirty_folio(struct address_space *, struct folio *);
#define afs_dirty_folio filemap_dirty_folio
#endif
extern int afs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
extern int afs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 4763132ca57e7f..5224e346fbad53 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -42,7 +42,7 @@ static void afs_folio_start_fscache(bool caching, struct folio *folio)
* prepare to perform part of a write to a page
*/
int afs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **_page, void **fsdata)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
@@ -60,7 +60,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
* file. We need to do this before we get a lock on the page in case
* there's more than one writer competing for the same cache block.
*/
- ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata);
+ ret = netfs_write_begin(file, mapping, pos, len, &folio, fsdata);
if (ret < 0)
return ret;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index b4b3567ac65539..be383fa46b12a8 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -40,7 +40,7 @@ MODULE_LICENSE("GPL");
static int befs_readdir(struct file *, struct dir_context *);
static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
-static int befs_readpage(struct file *file, struct page *page);
+static int befs_read_folio(struct file *file, struct folio *folio);
static sector_t befs_bmap(struct address_space *mapping, sector_t block);
static struct dentry *befs_lookup(struct inode *, struct dentry *,
unsigned int);
@@ -48,7 +48,7 @@ static struct inode *befs_iget(struct super_block *, unsigned long);
static struct inode *befs_alloc_inode(struct super_block *sb);
static void befs_free_inode(struct inode *inode);
static void befs_destroy_inodecache(void);
-static int befs_symlink_readpage(struct file *, struct page *);
+static int befs_symlink_read_folio(struct file *, struct folio *);
static int befs_utf2nls(struct super_block *sb, const char *in, int in_len,
char **out, int *out_len);
static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
@@ -87,12 +87,12 @@ static const struct inode_operations befs_dir_inode_operations = {
};
static const struct address_space_operations befs_aops = {
- .readpage = befs_readpage,
+ .read_folio = befs_read_folio,
.bmap = befs_bmap,
};
static const struct address_space_operations befs_symlink_aops = {
- .readpage = befs_symlink_readpage,
+ .read_folio = befs_symlink_read_folio,
};
static const struct export_operations befs_export_operations = {
@@ -102,16 +102,16 @@ static const struct export_operations befs_export_operations = {
};
/*
- * Called by generic_file_read() to read a page of data
+ * Called by generic_file_read() to read a folio of data
*
* In turn, simply calls a generic block read function and
* passes it the address of befs_get_block, for mapping file
* positions to disk blocks.
*/
static int
-befs_readpage(struct file *file, struct page *page)
+befs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, befs_get_block);
+ return block_read_full_folio(folio, befs_get_block);
}
static sector_t
@@ -468,8 +468,9 @@ befs_destroy_inodecache(void)
* The data stream become link name. Unless the LONG_SYMLINK
* flag is set.
*/
-static int befs_symlink_readpage(struct file *unused, struct page *page)
+static int befs_symlink_read_folio(struct file *unused, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct super_block *sb = inode->i_sb;
struct befs_inode_info *befs_ino = BEFS_I(inode);
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index 03139344568f57..57ae5ee6deec12 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -155,9 +155,9 @@ static int bfs_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, bfs_get_block, wbc);
}
-static int bfs_readpage(struct file *file, struct page *page)
+static int bfs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, bfs_get_block);
+ return block_read_full_folio(folio, bfs_get_block);
}
static void bfs_write_failed(struct address_space *mapping, loff_t to)
@@ -169,13 +169,12 @@ static void bfs_write_failed(struct address_space *mapping, loff_t to)
}
static int bfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, flags, pagep,
- bfs_get_block);
+ ret = block_write_begin(mapping, pos, len, pagep, bfs_get_block);
if (unlikely(ret))
bfs_write_failed(mapping, pos + len);
@@ -190,7 +189,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations bfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = bfs_readpage,
+ .read_folio = bfs_read_folio,
.writepage = bfs_writepage,
.write_begin = bfs_write_begin,
.write_end = generic_write_end,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 14f8a90df3217b..89e94ea2fef5b4 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -996,12 +996,12 @@ static int btree_writepages(struct address_space *mapping,
return btree_write_cache_pages(mapping, wbc);
}
-static int btree_releasepage(struct page *page, gfp_t gfp_flags)
+static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
{
- if (PageWriteback(page) || PageDirty(page))
- return 0;
+ if (folio_test_writeback(folio) || folio_test_dirty(folio))
+ return false;
- return try_release_extent_buffer(page);
+ return try_release_extent_buffer(&folio->page);
}
static void btree_invalidate_folio(struct folio *folio, size_t offset,
@@ -1010,7 +1010,7 @@ static void btree_invalidate_folio(struct folio *folio, size_t offset,
struct extent_io_tree *tree;
tree = &BTRFS_I(folio->mapping->host)->io_tree;
extent_invalidate_folio(tree, folio, offset);
- btree_releasepage(&folio->page, GFP_NOFS);
+ btree_release_folio(folio, GFP_NOFS);
if (folio_get_private(folio)) {
btrfs_warn(BTRFS_I(folio->mapping->host)->root->fs_info,
"folio private not zero on folio %llu",
@@ -1071,7 +1071,7 @@ static bool btree_dirty_folio(struct address_space *mapping,
static const struct address_space_operations btree_aops = {
.writepages = btree_writepages,
- .releasepage = btree_releasepage,
+ .release_folio = btree_release_folio,
.invalidate_folio = btree_invalidate_folio,
#ifdef CONFIG_MIGRATION
.migratepage = btree_migratepage,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 588c7c606a2c6a..8f6b544ae61674 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3799,8 +3799,9 @@ out:
return ret;
}
-int btrfs_readpage(struct file *file, struct page *page)
+int btrfs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
u64 start = page_offset(page);
u64 end = start + PAGE_SIZE - 1;
@@ -5306,7 +5307,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
}
/*
- * a helper for releasepage, this tests for areas of the page that
+ * a helper for release_folio, this tests for areas of the page that
* are locked or under IO and drops the related state bits if it is safe
* to drop the page.
*/
@@ -5342,7 +5343,7 @@ static int try_release_extent_state(struct extent_io_tree *tree,
}
/*
- * a helper for releasepage. As long as there are no locked extents
+ * a helper for release_folio. As long as there are no locked extents
* in the range corresponding to the page, both state records and extent
* map records are removed
*/
@@ -6042,10 +6043,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
*
* It is only cleared in two cases: freeing the last non-tree
* reference to the extent_buffer when its STALE bit is set or
- * calling releasepage when the tree reference is the only reference.
+ * calling release_folio when the tree reference is the only reference.
*
* In both cases, care is taken to ensure that the extent_buffer's
- * pages are not under io. However, releasepage can be concurrently
+ * pages are not under io. However, release_folio can be concurrently
* called with creating new references, which is prone to race
* conditions between the calls to check_buffer_tree_ref in those
* codepaths and clearing TREE_REF in try_release_extent_buffer.
@@ -6310,7 +6311,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
/*
* We can't unlock the pages just yet since the extent buffer
* hasn't been properly inserted in the radix tree, this
- * opens a race with btree_releasepage which can free a page
+ * opens a race with btree_release_folio which can free a page
* while we are still filling in all pages for the buffer and
* we could crash.
*/
@@ -6339,7 +6340,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
/*
* Now it's safe to unlock the pages because any calls to
- * btree_releasepage will correctly detect that a page belongs to a
+ * btree_release_folio will correctly detect that a page belongs to a
* live buffer and won't free them prematurely.
*/
for (i = 0; i < num_pages; i++)
@@ -6721,7 +6722,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
eb->read_mirror = 0;
atomic_set(&eb->io_pages, num_reads);
/*
- * It is possible for releasepage to clear the TREE_REF bit before we
+ * It is possible for release_folio to clear the TREE_REF bit before we
* set io_pages. See check_buffer_tree_ref for a more detailed comment.
*/
check_buffer_tree_ref(eb);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 956fa434df4358..23d4103c883160 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -149,7 +149,7 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
int try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page);
-int btrfs_readpage(struct file *file, struct page *page);
+int btrfs_read_folio(struct file *file, struct folio *folio);
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
int extent_write_locked_range(struct inode *inode, u64 start, u64 end);
int extent_writepages(struct address_space *mapping,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 46c2baa8fdf540..1fd827b99c1ba2 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1307,11 +1307,12 @@ static int prepare_uptodate_page(struct inode *inode,
struct page *page, u64 pos,
bool force_uptodate)
{
+ struct folio *folio = page_folio(page);
int ret = 0;
if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
!PageUptodate(page)) {
- ret = btrfs_readpage(NULL, page);
+ ret = btrfs_read_folio(NULL, folio);
if (ret)
return ret;
lock_page(page);
@@ -1321,8 +1322,8 @@ static int prepare_uptodate_page(struct inode *inode,
}
/*
- * Since btrfs_readpage() will unlock the page before it
- * returns, there is a window where btrfs_releasepage() can be
+ * Since btrfs_read_folio() will unlock the folio before it
+ * returns, there is a window where btrfs_release_folio() can be
* called to release the page. Here we check both inode
* mapping and PagePrivate() to make sure the page was not
* released.
@@ -2364,7 +2365,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct address_space *mapping = filp->f_mapping;
- if (!mapping->a_ops->readpage)
+ if (!mapping->a_ops->read_folio)
return -ENOEXEC;
file_accessed(filp);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index f7adee6fa05ece..b1ae3ba2ca2c37 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -465,7 +465,7 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
io_ctl->pages[i] = page;
if (uptodate && !PageUptodate(page)) {
- btrfs_readpage(NULL, page);
+ btrfs_read_folio(NULL, page_folio(page));
lock_page(page);
if (page->mapping != inode->i_mapping) {
btrfs_err(BTRFS_I(inode)->root->fs_info,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index da13bd0d10f126..81737eff92f3d8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4809,7 +4809,7 @@ again:
goto out_unlock;
if (!PageUptodate(page)) {
- ret = btrfs_readpage(NULL, page);
+ ret = btrfs_read_folio(NULL, page_folio(page));
lock_page(page);
if (page->mapping != mapping) {
unlock_page(page);
@@ -8204,7 +8204,7 @@ static void btrfs_readahead(struct readahead_control *rac)
}
/*
- * For releasepage() and invalidate_folio() we have a race window where
+ * For release_folio() and invalidate_folio() we have a race window where
* folio_end_writeback() is called but the subpage spinlock is not yet released.
* If we continue to release/invalidate the page, we could cause use-after-free
* for subpage spinlock. So this function is to spin and wait for subpage
@@ -8236,22 +8236,22 @@ static void wait_subpage_spinlock(struct page *page)
spin_unlock_irq(&subpage->lock);
}
-static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
+static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
- int ret = try_release_extent_mapping(page, gfp_flags);
+ int ret = try_release_extent_mapping(&folio->page, gfp_flags);
if (ret == 1) {
- wait_subpage_spinlock(page);
- clear_page_extent_mapped(page);
+ wait_subpage_spinlock(&folio->page);
+ clear_page_extent_mapped(&folio->page);
}
return ret;
}
-static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
+static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
- if (PageWriteback(page) || PageDirty(page))
- return 0;
- return __btrfs_releasepage(page, gfp_flags);
+ if (folio_test_writeback(folio) || folio_test_dirty(folio))
+ return false;
+ return __btrfs_release_folio(folio, gfp_flags);
}
#ifdef CONFIG_MIGRATION
@@ -8322,7 +8322,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
* still safe to wait for ordered extent to finish.
*/
if (!(offset == 0 && length == folio_size(folio))) {
- btrfs_releasepage(&folio->page, GFP_NOFS);
+ btrfs_release_folio(folio, GFP_NOFS);
return;
}
@@ -8446,7 +8446,7 @@ next:
ASSERT(!folio_test_ordered(folio));
btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio));
if (!inode_evicting)
- __btrfs_releasepage(&folio->page, GFP_NOFS);
+ __btrfs_release_folio(folio, GFP_NOFS);
clear_page_extent_mapped(&folio->page);
}
@@ -11415,13 +11415,13 @@ static const struct file_operations btrfs_dir_file_operations = {
* For now we're avoiding this by dropping bmap.
*/
static const struct address_space_operations btrfs_aops = {
- .readpage = btrfs_readpage,
+ .read_folio = btrfs_read_folio,
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readahead = btrfs_readahead,
.direct_IO = noop_direct_IO,
.invalidate_folio = btrfs_invalidate_folio,
- .releasepage = btrfs_releasepage,
+ .release_folio = btrfs_release_folio,
#ifdef CONFIG_MIGRATION
.migratepage = btrfs_migratepage,
#endif
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 43b6f23bbd8926..0f79af919bc4ea 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1358,7 +1358,7 @@ again:
* make it uptodate.
*/
if (!PageUptodate(page)) {
- btrfs_readpage(NULL, page);
+ btrfs_read_folio(NULL, page_folio(page));
lock_page(page);
if (page->mapping != mapping || !PagePrivate(page)) {
unlock_page(page);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index edddd93d2118e4..a6dc827e75af06 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1101,7 +1101,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
continue;
/*
- * if we are modifying block in fs tree, wait for readpage
+ * if we are modifying block in fs tree, wait for read_folio
* to complete and drop the extent cache
*/
if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
@@ -1563,7 +1563,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
end = (u64)-1;
}
- /* the lock_extent waits for readpage to complete */
+ /* the lock_extent waits for read_folio to complete */
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
@@ -2818,7 +2818,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
* Subpage can't handle page with DIRTY but without UPTODATE
* bit as it can lead to the following deadlock:
*
- * btrfs_readpage()
+ * btrfs_read_folio()
* | Page already *locked*
* |- btrfs_lock_and_flush_ordered_range()
* |- btrfs_start_ordered_extent()
@@ -2967,11 +2967,12 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
goto release_page;
if (PageReadahead(page))
- page_cache_async_readahead(inode->i_mapping, ra, NULL, page,
- page_index, last_index + 1 - page_index);
+ page_cache_async_readahead(inode->i_mapping, ra, NULL,
+ page_folio(page), page_index,
+ last_index + 1 - page_index);
if (!PageUptodate(page)) {
- btrfs_readpage(NULL, page);
+ btrfs_read_folio(NULL, page_folio(page));
lock_page(page);
if (!PageUptodate(page)) {
ret = -EIO;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 5a05beabf0c34c..fa56890ff81fc0 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4907,11 +4907,11 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
if (PageReadahead(page))
page_cache_async_readahead(sctx->cur_inode->i_mapping,
- &sctx->ra, NULL, page, index,
- last_index + 1 - index);
+ &sctx->ra, NULL, page_folio(page),
+ index, last_index + 1 - index);
if (!PageUptodate(page)) {
- btrfs_readpage(NULL, page);
+ btrfs_read_folio(NULL, page_folio(page));
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
diff --git a/fs/buffer.c b/fs/buffer.c
index 2b5561ae5d0b32..898c7f301b1b92 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -79,26 +79,26 @@ void unlock_buffer(struct buffer_head *bh)
EXPORT_SYMBOL(unlock_buffer);
/*
- * Returns if the page has dirty or writeback buffers. If all the buffers
- * are unlocked and clean then the PageDirty information is stale. If
- * any of the pages are locked, it is assumed they are locked for IO.
+ * Returns if the folio has dirty or writeback buffers. If all the buffers
+ * are unlocked and clean then the folio_test_dirty information is stale. If
+ * any of the buffers are locked, it is assumed they are locked for IO.
*/
-void buffer_check_dirty_writeback(struct page *page,
+void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback)
{
struct buffer_head *head, *bh;
*dirty = false;
*writeback = false;
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
- if (!page_has_buffers(page))
+ head = folio_buffers(folio);
+ if (!head)
return;
- if (PageWriteback(page))
+ if (folio_test_writeback(folio))
*writeback = true;
- head = page_buffers(page);
bh = head;
do {
if (buffer_locked(bh))
@@ -314,7 +314,7 @@ static void decrypt_bh(struct work_struct *work)
}
/*
- * I/O completion handler for block_read_full_page() - pages
+ * I/O completion handler for block_read_full_folio() - pages
* which come unlocked at the end of I/O.
*/
static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
@@ -955,7 +955,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
size);
goto done;
}
- if (!try_to_free_buffers(page))
+ if (!try_to_free_buffers(page_folio(page)))
goto failed;
}
@@ -1060,8 +1060,8 @@ __getblk_slow(struct block_device *bdev, sector_t block,
* Also. When blockdev buffers are explicitly read with bread(), they
* individually become uptodate. But their backing page remains not
* uptodate - even if all of its buffers are uptodate. A subsequent
- * block_read_full_page() against that page will discover all the uptodate
- * buffers, will set the page uptodate and will perform no I/O.
+ * block_read_full_folio() against that folio will discover all the uptodate
+ * buffers, will set the folio uptodate and will perform no I/O.
*/
/**
@@ -2088,7 +2088,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
/*
* If this is a partial write which happened to make all buffers
- * uptodate then we can optimize away a bogus readpage() for
+ * uptodate then we can optimize away a bogus read_folio() for
* the next read(). Here we 'discover' whether the page went
* uptodate as a result of this (potentially partial) write.
*/
@@ -2104,13 +2104,13 @@ static int __block_commit_write(struct inode *inode, struct page *page,
* The filesystem needs to handle block truncation upon failure.
*/
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- unsigned flags, struct page **pagep, get_block_t *get_block)
+ struct page **pagep, get_block_t *get_block)
{
pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
int status;
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
@@ -2137,12 +2137,12 @@ int block_write_end(struct file *file, struct address_space *mapping,
if (unlikely(copied < len)) {
/*
- * The buffers that were written will now be uptodate, so we
- * don't have to worry about a readpage reading them and
- * overwriting a partial write. However if we have encountered
- * a short write and only partially written into a buffer, it
- * will not be marked uptodate, so a readpage might come in and
- * destroy our partial write.
+ * The buffers that were written will now be uptodate, so
+ * we don't have to worry about a read_folio reading them
+ * and overwriting a partial write. However if we have
+ * encountered a short write and only partially written
+ * into a buffer, it will not be marked uptodate, so a
+ * read_folio might come in and destroy our partial write.
*
* Do the simplest thing, and just treat any short write to a
* non uptodate page as a zero-length write, and force the
@@ -2245,26 +2245,28 @@ bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
EXPORT_SYMBOL(block_is_partially_uptodate);
/*
- * Generic "read page" function for block devices that have the normal
+ * Generic "read_folio" function for block devices that have the normal
* get_block functionality. This is most of the block device filesystems.
- * Reads the page asynchronously --- the unlock_buffer() and
+ * Reads the folio asynchronously --- the unlock_buffer() and
* set/clear_buffer_uptodate() functions propagate buffer state into the
- * page struct once IO has completed.
+ * folio once IO has completed.
*/
-int block_read_full_page(struct page *page, get_block_t *get_block)
+int block_read_full_folio(struct folio *folio, get_block_t *get_block)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
sector_t iblock, lblock;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
unsigned int blocksize, bbits;
int nr, i;
int fully_mapped = 1;
- head = create_page_buffers(page, inode, 0);
+ VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
+
+ head = create_page_buffers(&folio->page, inode, 0);
blocksize = head->b_size;
bbits = block_size_bits(blocksize);
- iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
+ iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
lblock = (i_size_read(inode)+blocksize-1) >> bbits;
bh = head;
nr = 0;
@@ -2282,10 +2284,11 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
- SetPageError(page);
+ folio_set_error(folio);
}
if (!buffer_mapped(bh)) {
- zero_user(page, i * blocksize, blocksize);
+ folio_zero_range(folio, i * blocksize,
+ blocksize);
if (!err)
set_buffer_uptodate(bh);
continue;
@@ -2301,16 +2304,16 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
} while (i++, iblock++, (bh = bh->b_this_page) != head);
if (fully_mapped)
- SetPageMappedToDisk(page);
+ folio_set_mappedtodisk(folio);
if (!nr) {
/*
- * All buffers are uptodate - we can set the page uptodate
+ * All buffers are uptodate - we can set the folio uptodate
* as well. But not if get_block() returned an error.
*/
- if (!PageError(page))
- SetPageUptodate(page);
- unlock_page(page);
+ if (!folio_test_error(folio))
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
return 0;
}
@@ -2335,7 +2338,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
}
return 0;
}
-EXPORT_SYMBOL(block_read_full_page);
+EXPORT_SYMBOL(block_read_full_folio);
/* utility function for filesystems that need to do work on expanding
* truncates. Uses filesystem pagecache writes to allow the filesystem to
@@ -2344,6 +2347,7 @@ EXPORT_SYMBOL(block_read_full_page);
int generic_cont_expand_simple(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
+ const struct address_space_operations *aops = mapping->a_ops;
struct page *page;
void *fsdata;
int err;
@@ -2352,11 +2356,11 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
if (err)
goto out;
- err = pagecache_write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
+ err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
if (err)
goto out;
- err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
+ err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
BUG_ON(err > 0);
out:
@@ -2368,6 +2372,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
loff_t pos, loff_t *bytes)
{
struct inode *inode = mapping->host;
+ const struct address_space_operations *aops = mapping->a_ops;
unsigned int blocksize = i_blocksize(inode);
struct page *page;
void *fsdata;
@@ -2387,12 +2392,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
}
len = PAGE_SIZE - zerofrom;
- err = pagecache_write_begin(file, mapping, curpos, len, 0,
+ err = aops->write_begin(file, mapping, curpos, len,
&page, &fsdata);
if (err)
goto out;
zero_user(page, zerofrom, len);
- err = pagecache_write_end(file, mapping, curpos, len, len,
+ err = aops->write_end(file, mapping, curpos, len, len,
page, fsdata);
if (err < 0)
goto out;
@@ -2420,12 +2425,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
}
len = offset - zerofrom;
- err = pagecache_write_begin(file, mapping, curpos, len, 0,
+ err = aops->write_begin(file, mapping, curpos, len,
&page, &fsdata);
if (err)
goto out;
zero_user(page, zerofrom, len);
- err = pagecache_write_end(file, mapping, curpos, len, len,
+ err = aops->write_end(file, mapping, curpos, len, len,
page, fsdata);
if (err < 0)
goto out;
@@ -2441,7 +2446,7 @@ out:
* We may have to extend the file.
*/
int cont_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata,
get_block_t *get_block, loff_t *bytes)
{
@@ -2460,7 +2465,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
(*bytes)++;
}
- return block_write_begin(mapping, pos, len, flags, pagep, get_block);
+ return block_write_begin(mapping, pos, len, pagep, get_block);
}
EXPORT_SYMBOL(cont_write_begin);
@@ -2568,8 +2573,7 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
* On exit the page is fully uptodate in the areas outside (from,to)
* The filesystem needs to handle block truncation upon failure.
*/
-int nobh_write_begin(struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+int nobh_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
struct page **pagep, void **fsdata,
get_block_t *get_block)
{
@@ -2591,7 +2595,7 @@ int nobh_write_begin(struct address_space *mapping,
from = pos & (PAGE_SIZE - 1);
to = from + len;
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
*pagep = page;
@@ -2790,44 +2794,28 @@ int nobh_truncate_page(struct address_space *mapping,
loff_t from, get_block_t *get_block)
{
pgoff_t index = from >> PAGE_SHIFT;
- unsigned offset = from & (PAGE_SIZE-1);
- unsigned blocksize;
- sector_t iblock;
- unsigned length, pos;
struct inode *inode = mapping->host;
- struct page *page;
+ unsigned blocksize = i_blocksize(inode);
+ struct folio *folio;
struct buffer_head map_bh;
+ size_t offset;
+ sector_t iblock;
int err;
- blocksize = i_blocksize(inode);
- length = offset & (blocksize - 1);
-
/* Block boundary? Nothing to do */
- if (!length)
+ if (!(from & (blocksize - 1)))
return 0;
- length = blocksize - length;
- iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
-
- page = grab_cache_page(mapping, index);
+ folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_CREAT,
+ mapping_gfp_mask(mapping));
err = -ENOMEM;
- if (!page)
+ if (!folio)
goto out;
- if (page_has_buffers(page)) {
-has_buffers:
- unlock_page(page);
- put_page(page);
- return block_truncate_page(mapping, from, get_block);
- }
-
- /* Find the buffer that contains "offset" */
- pos = blocksize;
- while (offset >= pos) {
- iblock++;
- pos += blocksize;
- }
+ if (folio_buffers(folio))
+ goto has_buffers;
+ iblock = from >> inode->i_blkbits;
map_bh.b_size = blocksize;
map_bh.b_state = 0;
err = get_block(inode, iblock, &map_bh, 0);
@@ -2838,29 +2826,35 @@ has_buffers:
goto unlock;
/* Ok, it's mapped. Make sure it's up-to-date */
- if (!PageUptodate(page)) {
- err = mapping->a_ops->readpage(NULL, page);
+ if (!folio_test_uptodate(folio)) {
+ err = mapping->a_ops->read_folio(NULL, folio);
if (err) {
- put_page(page);
+ folio_put(folio);
goto out;
}
- lock_page(page);
- if (!PageUptodate(page)) {
+ folio_lock(folio);
+ if (!folio_test_uptodate(folio)) {
err = -EIO;
goto unlock;
}
- if (page_has_buffers(page))
+ if (folio_buffers(folio))
goto has_buffers;
}
- zero_user(page, offset, length);
- set_page_dirty(page);
+ offset = offset_in_folio(folio, from);
+ folio_zero_segment(folio, offset, round_up(offset, blocksize));
+ folio_mark_dirty(folio);
err = 0;
unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
out:
return err;
+
+has_buffers:
+ folio_unlock(folio);
+ folio_put(folio);
+ return block_truncate_page(mapping, from, get_block);
}
EXPORT_SYMBOL(nobh_truncate_page);
@@ -3161,20 +3155,20 @@ int sync_dirty_buffer(struct buffer_head *bh)
EXPORT_SYMBOL(sync_dirty_buffer);
/*
- * try_to_free_buffers() checks if all the buffers on this particular page
+ * try_to_free_buffers() checks if all the buffers on this particular folio
* are unused, and releases them if so.
*
* Exclusion against try_to_free_buffers may be obtained by either
- * locking the page or by holding its mapping's private_lock.
+ * locking the folio or by holding its mapping's private_lock.
*
- * If the page is dirty but all the buffers are clean then we need to
- * be sure to mark the page clean as well. This is because the page
+ * If the folio is dirty but all the buffers are clean then we need to
+ * be sure to mark the folio clean as well. This is because the folio
* may be against a block device, and a later reattachment of buffers
- * to a dirty page will set *all* buffers dirty. Which would corrupt
+ * to a dirty folio will set *all* buffers dirty. Which would corrupt
* filesystem data on the same device.
*
- * The same applies to regular filesystem pages: if all the buffers are
- * clean then we set the page clean and proceed. To do that, we require
+ * The same applies to regular filesystem folios: if all the buffers are
+ * clean then we set the folio clean and proceed. To do that, we require
* total exclusion from block_dirty_folio(). That is obtained with
* private_lock.
*
@@ -3186,10 +3180,10 @@ static inline int buffer_busy(struct buffer_head *bh)
(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
}
-static int
-drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
+static bool
+drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
{
- struct buffer_head *head = page_buffers(page);
+ struct buffer_head *head = folio_buffers(folio);
struct buffer_head *bh;
bh = head;
@@ -3207,46 +3201,46 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
bh = next;
} while (bh != head);
*buffers_to_free = head;
- detach_page_private(page);
- return 1;
+ folio_detach_private(folio);
+ return true;
failed:
- return 0;
+ return false;
}
-int try_to_free_buffers(struct page *page)
+bool try_to_free_buffers(struct folio *folio)
{
- struct address_space * const mapping = page->mapping;
+ struct address_space * const mapping = folio->mapping;
struct buffer_head *buffers_to_free = NULL;
- int ret = 0;
+ bool ret = 0;
- BUG_ON(!PageLocked(page));
- if (PageWriteback(page))
- return 0;
+ BUG_ON(!folio_test_locked(folio));
+ if (folio_test_writeback(folio))
+ return false;
if (mapping == NULL) { /* can this still happen? */
- ret = drop_buffers(page, &buffers_to_free);
+ ret = drop_buffers(folio, &buffers_to_free);
goto out;
}
spin_lock(&mapping->private_lock);
- ret = drop_buffers(page, &buffers_to_free);
+ ret = drop_buffers(folio, &buffers_to_free);
/*
* If the filesystem writes its buffers by hand (eg ext3)
- * then we can have clean buffers against a dirty page. We
- * clean the page here; otherwise the VM will never notice
+ * then we can have clean buffers against a dirty folio. We
+ * clean the folio here; otherwise the VM will never notice
* that the filesystem did any IO at all.
*
* Also, during truncate, discard_buffer will have marked all
- * the page's buffers clean. We discover that here and clean
- * the page also.
+ * the folio's buffers clean. We discover that here and clean
+ * the folio also.
*
* private_lock must be held over this entire operation in order
* to synchronise against block_dirty_folio and prevent the
* dirty bit from being lost.
*/
if (ret)
- cancel_dirty_page(page);
+ folio_cancel_dirty(folio);
spin_unlock(&mapping->private_lock);
out:
if (buffers_to_free) {
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index b6edcf89a429f6..7584aa6e50252e 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -162,24 +162,24 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
folio_wait_fscache(folio);
}
-static int ceph_releasepage(struct page *page, gfp_t gfp)
+static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
- dout("%llx:%llx releasepage %p idx %lu (%sdirty)\n",
- ceph_vinop(inode), page,
- page->index, PageDirty(page) ? "" : "not ");
+ dout("%llx:%llx release_folio idx %lu (%sdirty)\n",
+ ceph_vinop(inode),
+ folio->index, folio_test_dirty(folio) ? "" : "not ");
- if (PagePrivate(page))
- return 0;
+ if (folio_test_private(folio))
+ return false;
- if (PageFsCache(page)) {
+ if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
- return 0;
- wait_on_page_fscache(page);
+ return false;
+ folio_wait_fscache(folio);
}
ceph_fscache_note_page_release(inode);
- return 1;
+ return true;
}
static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
@@ -1314,14 +1314,14 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned
* clean, or already dirty within the same snap context.
*/
static int ceph_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned aop_flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct inode *inode = file_inode(file);
struct folio *folio = NULL;
int r;
- r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL);
+ r = netfs_write_begin(file, inode->i_mapping, pos, len, &folio, NULL);
if (r == 0)
folio_wait_fscache(folio);
if (r < 0) {
@@ -1375,7 +1375,7 @@ out:
}
const struct address_space_operations ceph_aops = {
- .readpage = netfs_readpage,
+ .read_folio = netfs_read_folio,
.readahead = netfs_readahead,
.writepage = ceph_writepage,
.writepages = ceph_writepages_start,
@@ -1383,7 +1383,7 @@ const struct address_space_operations ceph_aops = {
.write_end = ceph_write_end,
.dirty_folio = ceph_dirty_folio,
.invalidate_folio = ceph_invalidate_folio,
- .releasepage = ceph_releasepage,
+ .release_folio = ceph_release_folio,
.direct_IO = noop_direct_IO,
};
@@ -1775,7 +1775,7 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma)
{
struct address_space *mapping = file->f_mapping;
- if (!mapping->a_ops->readpage)
+ if (!mapping->a_ops->read_folio)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &ceph_vmops;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d511a78383c38e..06003bb9cbe971 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -4612,8 +4612,9 @@ read_complete:
return rc;
}
-static int cifs_readpage(struct file *file, struct page *page)
+static int cifs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
loff_t offset = page_file_offset(page);
int rc = -EACCES;
unsigned int xid;
@@ -4626,7 +4627,7 @@ static int cifs_readpage(struct file *file, struct page *page)
return rc;
}
- cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
+ cifs_dbg(FYI, "read_folio %p at offset %d 0x%x\n",
page, (int)offset, (int)offset);
rc = cifs_readpage_worker(file, page, &offset);
@@ -4681,7 +4682,7 @@ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
}
static int cifs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int oncethru = 0;
@@ -4695,7 +4696,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
start:
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page) {
rc = -ENOMEM;
goto out;
@@ -4757,16 +4758,16 @@ out:
return rc;
}
-static int cifs_release_page(struct page *page, gfp_t gfp)
+static bool cifs_release_folio(struct folio *folio, gfp_t gfp)
{
- if (PagePrivate(page))
+ if (folio_test_private(folio))
return 0;
- if (PageFsCache(page)) {
+ if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
- wait_on_page_fscache(page);
+ folio_wait_fscache(folio);
}
- fscache_note_page_release(cifs_inode_cookie(page->mapping->host));
+ fscache_note_page_release(cifs_inode_cookie(folio->mapping->host));
return true;
}
@@ -4965,14 +4966,14 @@ static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio)
#endif
const struct address_space_operations cifs_addr_ops = {
- .readpage = cifs_readpage,
+ .read_folio = cifs_read_folio,
.readahead = cifs_readahead,
.writepage = cifs_writepage,
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
.dirty_folio = cifs_dirty_folio,
- .releasepage = cifs_release_page,
+ .release_folio = cifs_release_folio,
.direct_IO = cifs_direct_io,
.invalidate_folio = cifs_invalidate_folio,
.launder_folio = cifs_launder_folio,
@@ -4986,18 +4987,18 @@ const struct address_space_operations cifs_addr_ops = {
};
/*
- * cifs_readpages requires the server to support a buffer large enough to
+ * cifs_readahead requires the server to support a buffer large enough to
* contain the header plus one complete page of data. Otherwise, we need
- * to leave cifs_readpages out of the address space operations.
+ * to leave cifs_readahead out of the address space operations.
*/
const struct address_space_operations cifs_addr_ops_smallbuf = {
- .readpage = cifs_readpage,
+ .read_folio = cifs_read_folio,
.writepage = cifs_writepage,
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
.dirty_folio = cifs_dirty_folio,
- .releasepage = cifs_release_page,
+ .release_folio = cifs_release_folio,
.invalidate_folio = cifs_invalidate_folio,
.launder_folio = cifs_launder_folio,
};
diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c
index 8907d050819884..8adf810424986b 100644
--- a/fs/coda/symlink.c
+++ b/fs/coda/symlink.c
@@ -20,9 +20,10 @@
#include "coda_psdev.h"
#include "coda_linux.h"
-static int coda_symlink_filler(struct file *file, struct page *page)
+static int coda_symlink_filler(struct file *file, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct page *page = &folio->page;
+ struct inode *inode = folio->mapping->host;
int error;
struct coda_inode_info *cii;
unsigned int len = PAGE_SIZE;
@@ -44,5 +45,5 @@ fail:
}
const struct address_space_operations coda_symlink_aops = {
- .readpage = coda_symlink_filler,
+ .read_folio = coda_symlink_filler,
};
diff --git a/fs/cramfs/README b/fs/cramfs/README
index d71b27e0ff15c9..778df5c4d70bb2 100644
--- a/fs/cramfs/README
+++ b/fs/cramfs/README
@@ -115,7 +115,7 @@ Block Size
(Block size in cramfs refers to the size of input data that is
compressed at a time. It's intended to be somewhere around
-PAGE_SIZE for cramfs_readpage's convenience.)
+PAGE_SIZE for cramfs_read_folio's convenience.)
The superblock ought to indicate the block size that the fs was
written for, since comments in <linux/pagemap.h> indicate that
@@ -161,7 +161,7 @@ size. The options are:
PAGE_SIZE.
It's easy enough to change the kernel to use a smaller value than
-PAGE_SIZE: just make cramfs_readpage read multiple blocks.
+PAGE_SIZE: just make cramfs_read_folio read multiple blocks.
The cost of option 1 is that kernels with a larger PAGE_SIZE
value don't get as good compression as they can.
@@ -173,9 +173,9 @@ they don't mind their cramfs being inaccessible to kernels with
smaller PAGE_SIZE values.
Option 3 is easy to implement if we don't mind being CPU-inefficient:
-e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
+e.g. get read_folio to decompress to a buffer of size MAX_BLKSIZE (which
must be no larger than 32KB) and discard what it doesn't need.
-Getting readpage to read into all the covered pages is harder.
+Getting read_folio to read into all the covered pages is harder.
The main advantage of option 3 over 1, 2, is better compression. The
cost is greater complexity. Probably not worth it, but I hope someone
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 666aa380011e00..7ae59a6afc5c19 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -414,7 +414,7 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
/*
* Let's create a mixed map if we can't map it all.
* The normal paging machinery will take care of the
- * unpopulated ptes via cramfs_readpage().
+ * unpopulated ptes via cramfs_read_folio().
*/
int i;
vma->vm_flags |= VM_MIXEDMAP;
@@ -814,8 +814,9 @@ out:
return d_splice_alias(inode, dentry);
}
-static int cramfs_readpage(struct file *file, struct page *page)
+static int cramfs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
u32 maxblock;
int bytes_filled;
@@ -925,7 +926,7 @@ err:
}
static const struct address_space_operations cramfs_aops = {
- .readpage = cramfs_readpage
+ .read_folio = cramfs_read_folio
};
/*
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 9ad61b582f0771..19af229eb7caef 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -170,16 +170,17 @@ out:
}
/**
- * ecryptfs_readpage
+ * ecryptfs_read_folio
* @file: An eCryptfs file
- * @page: Page from eCryptfs inode mapping into which to stick the read data
+ * @folio: Folio from eCryptfs inode mapping into which to stick the read data
*
- * Read in a page, decrypting if necessary.
+ * Read in a folio, decrypting if necessary.
*
* Returns zero on success; non-zero on error.
*/
-static int ecryptfs_readpage(struct file *file, struct page *page)
+static int ecryptfs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
int rc = 0;
@@ -264,7 +265,7 @@ out:
*/
static int ecryptfs_write_begin(struct file *file,
struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
@@ -272,7 +273,7 @@ static int ecryptfs_write_begin(struct file *file,
loff_t prev_page_end_size;
int rc = 0;
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
*pagep = page;
@@ -549,7 +550,7 @@ const struct address_space_operations ecryptfs_aops = {
.invalidate_folio = block_invalidate_folio,
#endif
.writepage = ecryptfs_writepage,
- .readpage = ecryptfs_readpage,
+ .read_folio = ecryptfs_read_folio,
.write_begin = ecryptfs_write_begin,
.write_end = ecryptfs_write_end,
.bmap = ecryptfs_bmap,
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index 89e73a6f0d361c..3ba94bb005a600 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -14,16 +14,18 @@
#include "efs.h"
#include <linux/efs_fs_sb.h>
-static int efs_readpage(struct file *file, struct page *page)
+static int efs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page,efs_get_block);
+ return block_read_full_folio(folio, efs_get_block);
}
+
static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,efs_get_block);
}
+
static const struct address_space_operations efs_aops = {
- .readpage = efs_readpage,
+ .read_folio = efs_read_folio,
.bmap = _efs_bmap
};
diff --git a/fs/efs/symlink.c b/fs/efs/symlink.c
index 923eb91654d5c9..3b03a573cb1a0f 100644
--- a/fs/efs/symlink.c
+++ b/fs/efs/symlink.c
@@ -12,8 +12,9 @@
#include <linux/buffer_head.h>
#include "efs.h"
-static int efs_symlink_readpage(struct file *file, struct page *page)
+static int efs_symlink_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
char *link = page_address(page);
struct buffer_head * bh;
struct inode * inode = page->mapping->host;
@@ -49,5 +50,5 @@ fail:
}
const struct address_space_operations efs_symlink_aops = {
- .readpage = efs_symlink_readpage
+ .read_folio = efs_symlink_read_folio
};
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 252f4ee977d569..fbb037ba326e5f 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -351,9 +351,9 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
* since we dont have write or truncate flows, so no inode
* locking needs to be held at the moment.
*/
-static int erofs_readpage(struct file *file, struct page *page)
+static int erofs_read_folio(struct file *file, struct folio *folio)
{
- return iomap_readpage(page, &erofs_iomap_ops);
+ return iomap_read_folio(folio, &erofs_iomap_ops);
}
static void erofs_readahead(struct readahead_control *rac)
@@ -408,7 +408,7 @@ static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
/* for uncompressed (aligned) files and raw access for other files */
const struct address_space_operations erofs_raw_access_aops = {
- .readpage = erofs_readpage,
+ .read_folio = erofs_read_folio,
.readahead = erofs_readahead,
.bmap = erofs_bmap,
.direct_IO = noop_direct_IO,
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index 7e4417167d0b41..a5cc4ed2cd0d0d 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -205,10 +205,9 @@ out:
return ret;
}
-static int erofs_fscache_meta_readpage(struct file *data, struct page *page)
+static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
{
int ret;
- struct folio *folio = page_folio(page);
struct super_block *sb = folio_mapping(folio)->host->i_sb;
struct netfs_io_request *rreq;
struct erofs_map_dev mdev = {
@@ -232,7 +231,7 @@ out:
return ret;
}
-static int erofs_fscache_readpage_inline(struct folio *folio,
+static int erofs_fscache_read_folio_inline(struct folio *folio,
struct erofs_map_blocks *map)
{
struct super_block *sb = folio_mapping(folio)->host->i_sb;
@@ -259,9 +258,8 @@ static int erofs_fscache_readpage_inline(struct folio *folio,
return 0;
}
-static int erofs_fscache_readpage(struct file *file, struct page *page)
+static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
{
- struct folio *folio = page_folio(page);
struct inode *inode = folio_mapping(folio)->host;
struct super_block *sb = inode->i_sb;
struct erofs_map_blocks map;
@@ -286,7 +284,7 @@ static int erofs_fscache_readpage(struct file *file, struct page *page)
}
if (map.m_flags & EROFS_MAP_META) {
- ret = erofs_fscache_readpage_inline(folio, &map);
+ ret = erofs_fscache_read_folio_inline(folio, &map);
goto out_uptodate;
}
@@ -376,7 +374,7 @@ static void erofs_fscache_readahead(struct readahead_control *rac)
if (map.m_flags & EROFS_MAP_META) {
struct folio *folio = readahead_folio(rac);
- ret = erofs_fscache_readpage_inline(folio, &map);
+ ret = erofs_fscache_read_folio_inline(folio, &map);
if (!ret) {
folio_mark_uptodate(folio);
ret = folio_size(folio);
@@ -410,11 +408,11 @@ static void erofs_fscache_readahead(struct readahead_control *rac)
}
static const struct address_space_operations erofs_fscache_meta_aops = {
- .readpage = erofs_fscache_meta_readpage,
+ .read_folio = erofs_fscache_meta_read_folio,
};
const struct address_space_operations erofs_fscache_access_aops = {
- .readpage = erofs_fscache_readpage,
+ .read_folio = erofs_fscache_read_folio,
.readahead = erofs_fscache_readahead,
};
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index c6f5fa4ab24453..95addc5c9d34dd 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -578,16 +578,16 @@ static int erofs_fc_parse_param(struct fs_context *fc,
#ifdef CONFIG_EROFS_FS_ZIP
static const struct address_space_operations managed_cache_aops;
-static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
+static bool erofs_managed_cache_release_folio(struct folio *folio, gfp_t gfp)
{
- int ret = 1; /* 0 - busy */
- struct address_space *const mapping = page->mapping;
+ bool ret = true;
+ struct address_space *const mapping = folio->mapping;
- DBG_BUGON(!PageLocked(page));
+ DBG_BUGON(!folio_test_locked(folio));
DBG_BUGON(mapping->a_ops != &managed_cache_aops);
- if (PagePrivate(page))
- ret = erofs_try_to_free_cached_page(page);
+ if (folio_test_private(folio))
+ ret = erofs_try_to_free_cached_page(&folio->page);
return ret;
}
@@ -608,12 +608,12 @@ static void erofs_managed_cache_invalidate_folio(struct folio *folio,
DBG_BUGON(stop > folio_size(folio) || stop < length);
if (offset == 0 && stop == folio_size(folio))
- while (!erofs_managed_cache_releasepage(&folio->page, GFP_NOFS))
+ while (!erofs_managed_cache_release_folio(folio, GFP_NOFS))
cond_resched();
}
static const struct address_space_operations managed_cache_aops = {
- .releasepage = erofs_managed_cache_releasepage,
+ .release_folio = erofs_managed_cache_release_folio,
.invalidate_folio = erofs_managed_cache_invalidate_folio,
};
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index e6dea6dfca1613..95efc127b2baa0 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -791,7 +791,7 @@ err_out:
static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
unsigned int readahead_pages)
{
- /* auto: enable for readpage, disable for readahead */
+ /* auto: enable for read_folio, disable for readahead */
if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
!readahead_pages)
return true;
@@ -1488,8 +1488,9 @@ skip:
}
}
-static int z_erofs_readpage(struct file *file, struct page *page)
+static int z_erofs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *const inode = page->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
@@ -1563,6 +1564,6 @@ static void z_erofs_readahead(struct readahead_control *rac)
}
const struct address_space_operations z_erofs_aops = {
- .readpage = z_erofs_readpage,
+ .read_folio = z_erofs_read_folio,
.readahead = z_erofs_readahead,
};
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index fc0ea16848803d..0133d385d8e890 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -357,9 +357,9 @@ unlock_ret:
return err;
}
-static int exfat_readpage(struct file *file, struct page *page)
+static int exfat_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, exfat_get_block);
+ return mpage_read_folio(folio, exfat_get_block);
}
static void exfat_readahead(struct readahead_control *rac)
@@ -389,13 +389,13 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to)
}
static int exfat_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int flags,
+ loff_t pos, unsigned int len,
struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
exfat_get_block,
&EXFAT_I(mapping->host)->i_size_ondisk);
@@ -492,7 +492,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from)
static const struct address_space_operations exfat_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = exfat_readpage,
+ .read_folio = exfat_read_folio,
.readahead = exfat_readahead,
.writepage = exfat_writepage,
.writepages = exfat_writepages,
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 52377a0ee735f9..9e1ecd89f47fa6 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -875,9 +875,9 @@ static int ext2_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, ext2_get_block, wbc);
}
-static int ext2_readpage(struct file *file, struct page *page)
+static int ext2_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, ext2_get_block);
+ return mpage_read_folio(folio, ext2_get_block);
}
static void ext2_readahead(struct readahead_control *rac)
@@ -887,13 +887,11 @@ static void ext2_readahead(struct readahead_control *rac)
static int
ext2_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, flags, pagep,
- ext2_get_block);
+ ret = block_write_begin(mapping, pos, len, pagep, ext2_get_block);
if (ret < 0)
ext2_write_failed(mapping, pos + len);
return ret;
@@ -913,12 +911,11 @@ static int ext2_write_end(struct file *file, struct address_space *mapping,
static int
ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
int ret;
- ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
+ ret = nobh_write_begin(mapping, pos, len, pagep, fsdata,
ext2_get_block);
if (ret < 0)
ext2_write_failed(mapping, pos + len);
@@ -969,7 +966,7 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc
const struct address_space_operations ext2_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = ext2_readpage,
+ .read_folio = ext2_read_folio,
.readahead = ext2_readahead,
.writepage = ext2_writepage,
.write_begin = ext2_write_begin,
@@ -985,7 +982,7 @@ const struct address_space_operations ext2_aops = {
const struct address_space_operations ext2_nobh_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = ext2_readpage,
+ .read_folio = ext2_read_folio,
.readahead = ext2_readahead,
.writepage = ext2_nobh_writepage,
.write_begin = ext2_nobh_write_begin,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index d5cea9c2e2a2dd..75b8d81b24692c 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -3539,7 +3539,6 @@ extern int ext4_readpage_inline(struct inode *inode, struct page *page);
extern int ext4_try_to_write_inline_data(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
- unsigned flags,
struct page **pagep);
extern int ext4_write_inline_data_end(struct inode *inode,
loff_t pos, unsigned len,
@@ -3552,7 +3551,6 @@ ext4_journalled_write_inline_data(struct inode *inode,
extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
- unsigned flags,
struct page **pagep,
void **fsdata);
extern int ext4_try_add_inline_entry(handle_t *handle,
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 513762c087a995..cff52ff6549d2d 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -527,13 +527,13 @@ int ext4_readpage_inline(struct inode *inode, struct page *page)
}
static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
- struct inode *inode,
- unsigned flags)
+ struct inode *inode)
{
int ret, needed_blocks, no_expand;
handle_t *handle = NULL;
int retries = 0, sem_held = 0;
struct page *page = NULL;
+ unsigned int flags;
unsigned from, to;
struct ext4_iloc iloc;
@@ -562,9 +562,9 @@ retry:
/* We cannot recurse into the filesystem as the transaction is already
* started */
- flags |= AOP_FLAG_NOFS;
-
- page = grab_cache_page_write_begin(mapping, 0, flags);
+ flags = memalloc_nofs_save();
+ page = grab_cache_page_write_begin(mapping, 0);
+ memalloc_nofs_restore(flags);
if (!page) {
ret = -ENOMEM;
goto out;
@@ -649,11 +649,11 @@ out:
int ext4_try_to_write_inline_data(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
- unsigned flags,
struct page **pagep)
{
int ret;
handle_t *handle;
+ unsigned int flags;
struct page *page;
struct ext4_iloc iloc;
@@ -691,9 +691,9 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
if (ret)
goto out;
- flags |= AOP_FLAG_NOFS;
-
- page = grab_cache_page_write_begin(mapping, 0, flags);
+ flags = memalloc_nofs_save();
+ page = grab_cache_page_write_begin(mapping, 0);
+ memalloc_nofs_restore(flags);
if (!page) {
ret = -ENOMEM;
goto out;
@@ -727,8 +727,7 @@ out:
brelse(iloc.bh);
return ret;
convert:
- return ext4_convert_inline_data_to_extent(mapping,
- inode, flags);
+ return ext4_convert_inline_data_to_extent(mapping, inode);
}
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
@@ -848,13 +847,12 @@ ext4_journalled_write_inline_data(struct inode *inode,
*/
static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
struct inode *inode,
- unsigned flags,
void **fsdata)
{
int ret = 0, inline_size;
struct page *page;
- page = grab_cache_page_write_begin(mapping, 0, flags);
+ page = grab_cache_page_write_begin(mapping, 0);
if (!page)
return -ENOMEM;
@@ -907,7 +905,6 @@ out:
int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
- unsigned flags,
struct page **pagep,
void **fsdata)
{
@@ -916,6 +913,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct page *page;
struct ext4_iloc iloc;
int retries = 0;
+ unsigned int flags;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
@@ -932,17 +930,10 @@ retry_journal:
if (ret && ret != -ENOSPC)
goto out_journal;
- /*
- * We cannot recurse into the filesystem as the transaction
- * is already started.
- */
- flags |= AOP_FLAG_NOFS;
-
if (ret == -ENOSPC) {
ext4_journal_stop(handle);
ret = ext4_da_convert_inline_data_to_extent(mapping,
inode,
- flags,
fsdata);
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -950,7 +941,13 @@ retry_journal:
goto out;
}
- page = grab_cache_page_write_begin(mapping, 0, flags);
+ /*
+ * We cannot recurse into the filesystem as the transaction
+ * is already started.
+ */
+ flags = memalloc_nofs_save();
+ page = grab_cache_page_write_begin(mapping, 0);
+ memalloc_nofs_restore(flags);
if (!page) {
ret = -ENOMEM;
goto out_journal;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7555cbe7714808..3dce7d058985b3 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1142,7 +1142,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
#endif
static int ext4_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
@@ -1156,7 +1156,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO;
- trace_ext4_write_begin(inode, pos, len, flags);
+ trace_ext4_write_begin(inode, pos, len);
/*
* Reserve one block more for addition to orphan list in case
* we allocate blocks but write fails for some reason
@@ -1168,7 +1168,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
- flags, pagep);
+ pagep);
if (ret < 0)
return ret;
if (ret == 1)
@@ -1183,7 +1183,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
* the page (if needed) without using GFP_NOFS.
*/
retry_grab:
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
unlock_page(page);
@@ -2943,7 +2943,7 @@ static int ext4_nonda_switch(struct super_block *sb)
}
static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret, retries = 0;
@@ -2959,14 +2959,13 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
return ext4_write_begin(file, mapping, pos,
- len, flags, pagep, fsdata);
+ len, pagep, fsdata);
}
*fsdata = (void *)0;
- trace_ext4_da_write_begin(inode, pos, len, flags);
+ trace_ext4_da_write_begin(inode, pos, len);
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
- ret = ext4_da_write_inline_data_begin(mapping, inode,
- pos, len, flags,
+ ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
pagep, fsdata);
if (ret < 0)
return ret;
@@ -2975,7 +2974,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
}
retry:
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
@@ -3192,8 +3191,9 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
return iomap_bmap(mapping, block, &ext4_iomap_ops);
}
-static int ext4_readpage(struct file *file, struct page *page)
+static int ext4_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
int ret = -EAGAIN;
struct inode *inode = page->mapping->host;
@@ -3254,19 +3254,19 @@ static void ext4_journalled_invalidate_folio(struct folio *folio,
WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
}
-static int ext4_releasepage(struct page *page, gfp_t wait)
+static bool ext4_release_folio(struct folio *folio, gfp_t wait)
{
- journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+ journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
- trace_ext4_releasepage(page);
+ trace_ext4_releasepage(&folio->page);
/* Page has dirty journalled data -> cannot release */
- if (PageChecked(page))
- return 0;
+ if (folio_test_checked(folio))
+ return false;
if (journal)
- return jbd2_journal_try_to_free_buffers(journal, page);
+ return jbd2_journal_try_to_free_buffers(journal, folio);
else
- return try_to_free_buffers(page);
+ return try_to_free_buffers(folio);
}
static bool ext4_inode_datasync_dirty(struct inode *inode)
@@ -3620,7 +3620,7 @@ static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
}
static const struct address_space_operations ext4_aops = {
- .readpage = ext4_readpage,
+ .read_folio = ext4_read_folio,
.readahead = ext4_readahead,
.writepage = ext4_writepage,
.writepages = ext4_writepages,
@@ -3629,7 +3629,7 @@ static const struct address_space_operations ext4_aops = {
.dirty_folio = ext4_dirty_folio,
.bmap = ext4_bmap,
.invalidate_folio = ext4_invalidate_folio,
- .releasepage = ext4_releasepage,
+ .release_folio = ext4_release_folio,
.direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
@@ -3638,7 +3638,7 @@ static const struct address_space_operations ext4_aops = {
};
static const struct address_space_operations ext4_journalled_aops = {
- .readpage = ext4_readpage,
+ .read_folio = ext4_read_folio,
.readahead = ext4_readahead,
.writepage = ext4_writepage,
.writepages = ext4_writepages,
@@ -3647,7 +3647,7 @@ static const struct address_space_operations ext4_journalled_aops = {
.dirty_folio = ext4_journalled_dirty_folio,
.bmap = ext4_bmap,
.invalidate_folio = ext4_journalled_invalidate_folio,
- .releasepage = ext4_releasepage,
+ .release_folio = ext4_release_folio,
.direct_IO = noop_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
@@ -3655,7 +3655,7 @@ static const struct address_space_operations ext4_journalled_aops = {
};
static const struct address_space_operations ext4_da_aops = {
- .readpage = ext4_readpage,
+ .read_folio = ext4_read_folio,
.readahead = ext4_readahead,
.writepage = ext4_writepage,
.writepages = ext4_writepages,
@@ -3664,7 +3664,7 @@ static const struct address_space_operations ext4_da_aops = {
.dirty_folio = ext4_dirty_folio,
.bmap = ext4_bmap,
.invalidate_folio = ext4_invalidate_folio,
- .releasepage = ext4_releasepage,
+ .release_folio = ext4_release_folio,
.direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 95aa212f08632d..701f1d6a217f79 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -8,6 +8,7 @@
#include <linux/fs.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
+#include <linux/sched/mm.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "ext4_extents.h"
@@ -127,7 +128,7 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
pgoff_t index1, pgoff_t index2, struct page *page[2])
{
struct address_space *mapping[2];
- unsigned fl = AOP_FLAG_NOFS;
+ unsigned int flags;
BUG_ON(!inode1 || !inode2);
if (inode1 < inode2) {
@@ -139,11 +140,15 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
mapping[1] = inode1->i_mapping;
}
- page[0] = grab_cache_page_write_begin(mapping[0], index1, fl);
- if (!page[0])
+ flags = memalloc_nofs_save();
+ page[0] = grab_cache_page_write_begin(mapping[0], index1);
+ if (!page[0]) {
+ memalloc_nofs_restore(flags);
return -ENOMEM;
+ }
- page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
+ page[1] = grab_cache_page_write_begin(mapping[1], index2);
+ memalloc_nofs_restore(flags);
if (!page[1]) {
unlock_page(page[0]);
put_page(page[0]);
@@ -664,8 +669,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
* Up semaphore to avoid following problems:
* a. transaction deadlock among ext4_journal_start,
* ->write_begin via pagefault, and jbd2_journal_commit
- * b. racing with ->readpage, ->write_begin, and ext4_get_block
- * in move_extent_per_page
+ * b. racing with ->read_folio, ->write_begin, and
+ * ext4_get_block in move_extent_per_page
*/
ext4_double_up_write_data_sem(orig_inode, donor_inode);
/* Swap original branches with new branches */
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index af491e170c4a90..e02a5f14e0211e 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -163,7 +163,7 @@ static bool bio_post_read_required(struct bio *bio)
*
* The mpage code never puts partial pages into a BIO (except for end-of-file).
* If a page does not map to a contiguous run of blocks then it simply falls
- * back to block_read_full_page().
+ * back to block_read_full_folio().
*
* Why is this? If a page's completion depends on a number of different BIOs
* which can complete in any order (or at the same time) then determining the
@@ -394,7 +394,7 @@ int ext4_mpage_readpages(struct inode *inode,
bio = NULL;
}
if (!PageUptodate(page))
- block_read_full_page(page, ext4_get_block);
+ block_read_full_folio(page_folio(page), ext4_get_block);
else
unlock_page(page);
next_page:
diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
index eacbd489e3bf17..b051d19b5c8a04 100644
--- a/fs/ext4/verity.c
+++ b/fs/ext4/verity.c
@@ -69,6 +69,9 @@ static int pagecache_read(struct inode *inode, void *buf, size_t count,
static int pagecache_write(struct inode *inode, const void *buf, size_t count,
loff_t pos)
{
+ struct address_space *mapping = inode->i_mapping;
+ const struct address_space_operations *aops = mapping->a_ops;
+
if (pos + count > inode->i_sb->s_maxbytes)
return -EFBIG;
@@ -79,15 +82,13 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
void *fsdata;
int res;
- res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
- &page, &fsdata);
+ res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata);
if (res)
return res;
memcpy_to_page(page, offset_in_page(pos), buf, n);
- res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
- page, fsdata);
+ res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata);
if (res < 0)
return res;
if (res != n)
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 909085a78f9c3f..456c1e89386aa3 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -468,7 +468,7 @@ const struct address_space_operations f2fs_meta_aops = {
.writepages = f2fs_write_meta_pages,
.dirty_folio = f2fs_dirty_meta_folio,
.invalidate_folio = f2fs_invalidate_folio,
- .releasepage = f2fs_release_page,
+ .release_folio = f2fs_release_folio,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 12a56f9e15722d..24824cd96f3675 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1746,7 +1746,7 @@ unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
}
const struct address_space_operations f2fs_compress_aops = {
- .releasepage = f2fs_release_page,
+ .release_folio = f2fs_release_folio,
.invalidate_folio = f2fs_invalidate_folio,
};
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9a1a526f20920b..8f38c26bb16ca5 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2372,8 +2372,9 @@ next_page:
return ret;
}
-static int f2fs_read_data_page(struct file *file, struct page *page)
+static int f2fs_read_data_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page_file_mapping(page)->host;
int ret = -EAGAIN;
@@ -3314,8 +3315,7 @@ unlock_out:
}
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -3325,7 +3325,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
block_t blkaddr = NULL_ADDR;
int err = 0;
- trace_f2fs_write_begin(inode, pos, len, flags);
+ trace_f2fs_write_begin(inode, pos, len);
if (!f2fs_is_checkpoint_ready(sbi)) {
err = -ENOSPC;
@@ -3528,28 +3528,30 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
folio_detach_private(folio);
}
-int f2fs_release_page(struct page *page, gfp_t wait)
+bool f2fs_release_folio(struct folio *folio, gfp_t wait)
{
- /* If this is dirty page, keep PagePrivate */
- if (PageDirty(page))
- return 0;
+ struct f2fs_sb_info *sbi;
+
+ /* If this is dirty folio, keep private data */
+ if (folio_test_dirty(folio))
+ return false;
/* This is atomic written page, keep Private */
- if (page_private_atomic(page))
- return 0;
+ if (page_private_atomic(&folio->page))
+ return false;
- if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
- struct inode *inode = page->mapping->host;
+ sbi = F2FS_M_SB(folio->mapping);
+ if (test_opt(sbi, COMPRESS_CACHE)) {
+ struct inode *inode = folio->mapping->host;
- if (inode->i_ino == F2FS_COMPRESS_INO(F2FS_I_SB(inode)))
- clear_page_private_data(page);
+ if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
+ clear_page_private_data(&folio->page);
}
- clear_page_private_gcing(page);
+ clear_page_private_gcing(&folio->page);
- detach_page_private(page);
- set_page_private(page, 0);
- return 1;
+ folio_detach_private(folio);
+ return true;
}
static bool f2fs_dirty_data_folio(struct address_space *mapping,
@@ -3936,7 +3938,7 @@ static void f2fs_swap_deactivate(struct file *file)
#endif
const struct address_space_operations f2fs_dblock_aops = {
- .readpage = f2fs_read_data_page,
+ .read_folio = f2fs_read_data_folio,
.readahead = f2fs_readahead,
.writepage = f2fs_write_data_page,
.writepages = f2fs_write_data_pages,
@@ -3944,7 +3946,7 @@ const struct address_space_operations f2fs_dblock_aops = {
.write_end = f2fs_write_end,
.dirty_folio = f2fs_dirty_data_folio,
.invalidate_folio = f2fs_invalidate_folio,
- .releasepage = f2fs_release_page,
+ .release_folio = f2fs_release_folio,
.direct_IO = noop_direct_IO,
.bmap = f2fs_bmap,
.swap_activate = f2fs_swap_activate,
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 2b2b3c87e45e05..10d1f138d14feb 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -18,6 +18,7 @@
#include <linux/kobject.h>
#include <linux/sched.h>
#include <linux/cred.h>
+#include <linux/sched/mm.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -2654,6 +2655,7 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
pgoff_t index, bool for_write)
{
struct page *page;
+ unsigned int flags;
if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
if (!for_write)
@@ -2673,7 +2675,12 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
if (!for_write)
return grab_cache_page(mapping, index);
- return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
+
+ flags = memalloc_nofs_save();
+ page = grab_cache_page_write_begin(mapping, index);
+ memalloc_nofs_restore(flags);
+
+ return page;
}
static inline struct page *f2fs_pagecache_get_page(
@@ -3761,7 +3768,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
int compr_blocks, bool allow_balance);
void f2fs_write_failed(struct inode *inode, loff_t to);
void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
-int f2fs_release_page(struct page *page, gfp_t wait);
+bool f2fs_release_folio(struct folio *folio, gfp_t wait);
#ifdef CONFIG_MIGRATION
int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page, enum migrate_mode mode);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index c45d341dcf6e53..8ccff18560ff1a 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -2165,7 +2165,7 @@ const struct address_space_operations f2fs_node_aops = {
.writepages = f2fs_write_node_pages,
.dirty_folio = f2fs_dirty_node_folio,
.invalidate_folio = f2fs_invalidate_folio,
- .releasepage = f2fs_release_page,
+ .release_folio = f2fs_release_folio,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 4368f90571bd61..ed3e8b7a8260c2 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2483,7 +2483,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
tocopy = min_t(unsigned long, sb->s_blocksize - offset,
towrite);
retry:
- err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
+ err = a_ops->write_begin(NULL, mapping, off, tocopy,
&page, &fsdata);
if (unlikely(err)) {
if (err == -ENOMEM) {
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index 3d793202cc9fef..65395ae188aac1 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -74,6 +74,9 @@ static int pagecache_read(struct inode *inode, void *buf, size_t count,
static int pagecache_write(struct inode *inode, const void *buf, size_t count,
loff_t pos)
{
+ struct address_space *mapping = inode->i_mapping;
+ const struct address_space_operations *aops = mapping->a_ops;
+
if (pos + count > inode->i_sb->s_maxbytes)
return -EFBIG;
@@ -85,8 +88,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
void *addr;
int res;
- res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
- &page, &fsdata);
+ res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata);
if (res)
return res;
@@ -94,8 +96,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
memcpy(addr + offset_in_page(pos), buf, n);
kunmap_atomic(addr);
- res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
- page, fsdata);
+ res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata);
if (res < 0)
return res;
if (res != n)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 3d1afb95a925a6..69b4d4ae64d79a 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -205,9 +205,9 @@ static int fat_writepages(struct address_space *mapping,
return mpage_writepages(mapping, wbc, fat_get_block);
}
-static int fat_readpage(struct file *file, struct page *page)
+static int fat_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, fat_get_block);
+ return mpage_read_folio(folio, fat_get_block);
}
static void fat_readahead(struct readahead_control *rac)
@@ -226,13 +226,13 @@ static void fat_write_failed(struct address_space *mapping, loff_t to)
}
static int fat_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int err;
*pagep = NULL;
- err = cont_write_begin(file, mapping, pos, len, flags,
+ err = cont_write_begin(file, mapping, pos, len,
pagep, fsdata, fat_get_block,
&MSDOS_I(mapping->host)->mmu_private);
if (err < 0)
@@ -344,7 +344,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from)
static const struct address_space_operations fat_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = fat_readpage,
+ .read_folio = fat_read_folio,
.readahead = fat_readahead,
.writepage = fat_writepage,
.writepages = fat_writepages,
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index bfc780c682fb8b..a37431e443d3b6 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -38,33 +38,34 @@
#include "vxfs_inode.h"
-static int vxfs_immed_readpage(struct file *, struct page *);
+static int vxfs_immed_read_folio(struct file *, struct folio *);
/*
* Address space operations for immed files and directories.
*/
const struct address_space_operations vxfs_immed_aops = {
- .readpage = vxfs_immed_readpage,
+ .read_folio = vxfs_immed_read_folio,
};
/**
- * vxfs_immed_readpage - read part of an immed inode into pagecache
+ * vxfs_immed_read_folio - read part of an immed inode into pagecache
* @file: file context (unused)
- * @page: page frame to fill in.
+ * @folio: folio to fill in.
*
* Description:
- * vxfs_immed_readpage reads a part of the immed area of the
+ * vxfs_immed_read_folio reads a part of the immed area of the
* file that hosts @pp into the pagecache.
*
* Returns:
* Zero on success, else a negative error code.
*
* Locking status:
- * @page is locked and will be unlocked.
+ * @folio is locked and will be unlocked.
*/
static int
-vxfs_immed_readpage(struct file *fp, struct page *pp)
+vxfs_immed_read_folio(struct file *fp, struct folio *folio)
{
+ struct page *pp = &folio->page;
struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host);
u_int64_t offset = (u_int64_t)pp->index << PAGE_SHIFT;
caddr_t kaddr;
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index e806694d4145e2..6143ebab940d63 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -38,11 +38,11 @@
#include "vxfs_extern.h"
-static int vxfs_readpage(struct file *, struct page *);
+static int vxfs_read_folio(struct file *, struct folio *);
static sector_t vxfs_bmap(struct address_space *, sector_t);
const struct address_space_operations vxfs_aops = {
- .readpage = vxfs_readpage,
+ .read_folio = vxfs_read_folio,
.bmap = vxfs_bmap,
};
@@ -141,24 +141,23 @@ vxfs_getblk(struct inode *ip, sector_t iblock,
}
/**
- * vxfs_readpage - read one page synchronously into the pagecache
+ * vxfs_read_folio - read one page synchronously into the pagecache
* @file: file context (unused)
- * @page: page frame to fill in.
+ * @folio: folio to fill in.
*
* Description:
- * The vxfs_readpage routine reads @page synchronously into the
+ * The vxfs_read_folio routine reads @folio synchronously into the
* pagecache.
*
* Returns:
* Zero on success, else a negative error code.
*
* Locking status:
- * @page is locked and will be unlocked.
+ * @folio is locked and will be unlocked.
*/
-static int
-vxfs_readpage(struct file *file, struct page *page)
+static int vxfs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, vxfs_getblk);
+ return block_read_full_folio(folio, vxfs_getblk);
}
/**
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 9ff27b8a9782c4..74303d6e987b32 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1957,20 +1957,20 @@ void fuse_init_dir(struct inode *inode)
fi->rdc.version = 0;
}
-static int fuse_symlink_readpage(struct file *null, struct page *page)
+static int fuse_symlink_read_folio(struct file *null, struct folio *folio)
{
- int err = fuse_readlink_page(page->mapping->host, page);
+ int err = fuse_readlink_page(folio->mapping->host, &folio->page);
if (!err)
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
- unlock_page(page);
+ folio_unlock(folio);
return err;
}
static const struct address_space_operations fuse_symlink_aops = {
- .readpage = fuse_symlink_readpage,
+ .read_folio = fuse_symlink_read_folio,
};
void fuse_init_symlink(struct inode *inode)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index f18d14d5fea199..05caa2b9272e80 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -857,8 +857,9 @@ static int fuse_do_readpage(struct file *file, struct page *page)
return 0;
}
-static int fuse_readpage(struct file *file, struct page *page)
+static int fuse_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
int err;
@@ -1174,7 +1175,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
break;
err = -ENOMEM;
- page = grab_cache_page_write_begin(mapping, index, 0);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
break;
@@ -2273,8 +2274,7 @@ out:
* but how to implement it without killing performance need more thinking.
*/
static int fuse_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
struct fuse_conn *fc = get_fuse_conn(file_inode(file));
@@ -2284,7 +2284,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
WARN_ON(!fc->writeback_cache);
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
goto error;
@@ -3175,7 +3175,7 @@ static const struct file_operations fuse_file_operations = {
};
static const struct address_space_operations fuse_file_aops = {
- .readpage = fuse_readpage,
+ .read_folio = fuse_read_folio,
.readahead = fuse_readahead,
.writepage = fuse_writepage,
.writepages = fuse_writepages,
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 72c9f31ce72446..106e90a365838e 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -464,22 +464,26 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
return 0;
}
-
-static int __gfs2_readpage(void *file, struct page *page)
+/**
+ * gfs2_read_folio - read a folio from a file
+ * @file: The file to read
+ * @folio: The folio in the file
+ */
+static int gfs2_read_folio(struct file *file, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
int error;
if (!gfs2_is_jdata(ip) ||
- (i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
- error = iomap_readpage(page, &gfs2_iomap_ops);
+ (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
+ error = iomap_read_folio(folio, &gfs2_iomap_ops);
} else if (gfs2_is_stuffed(ip)) {
- error = stuffed_readpage(ip, page);
- unlock_page(page);
+ error = stuffed_readpage(ip, &folio->page);
+ folio_unlock(folio);
} else {
- error = mpage_readpage(page, gfs2_block_map);
+ error = mpage_read_folio(folio, gfs2_block_map);
}
if (unlikely(gfs2_withdrawn(sdp)))
@@ -489,17 +493,6 @@ static int __gfs2_readpage(void *file, struct page *page)
}
/**
- * gfs2_readpage - read a page of a file
- * @file: The file to read
- * @page: The page of the file
- */
-
-static int gfs2_readpage(struct file *file, struct page *page)
-{
- return __gfs2_readpage(file, page);
-}
-
-/**
* gfs2_internal_read - read an internal file
* @ip: The gfs2 inode
* @buf: The buffer to fill
@@ -523,7 +516,7 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
amt = size - copied;
if (offset + size > PAGE_SIZE)
amt = PAGE_SIZE - offset;
- page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
+ page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
p = kmap_atomic(page);
@@ -698,38 +691,40 @@ out:
}
/**
- * gfs2_releasepage - free the metadata associated with a page
- * @page: the page that's being released
+ * gfs2_release_folio - free the metadata associated with a folio
+ * @folio: the folio that's being released
* @gfp_mask: passed from Linux VFS, ignored by us
*
- * Calls try_to_free_buffers() to free the buffers and put the page if the
+ * Calls try_to_free_buffers() to free the buffers and put the folio if the
* buffers can be released.
*
- * Returns: 1 if the page was put or else 0
+ * Returns: true if the folio was put or else false
*/
-int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
+bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct buffer_head *bh, *head;
struct gfs2_bufdata *bd;
- if (!page_has_buffers(page))
- return 0;
+ head = folio_buffers(folio);
+ if (!head)
+ return false;
/*
- * From xfs_vm_releasepage: mm accommodates an old ext3 case where
- * clean pages might not have had the dirty bit cleared. Thus, it can
- * send actual dirty pages to ->releasepage() via shrink_active_list().
+ * mm accommodates an old ext3 case where clean folios might
+ * not have had the dirty bit cleared. Thus, it can send actual
+ * dirty folios to ->release_folio() via shrink_active_list().
*
- * As a workaround, we skip pages that contain dirty buffers below.
- * Once ->releasepage isn't called on dirty pages anymore, we can warn
- * on dirty buffers like we used to here again.
+ * As a workaround, we skip folios that contain dirty buffers
+ * below. Once ->release_folio isn't called on dirty folios
+ * anymore, we can warn on dirty buffers like we used to here
+ * again.
*/
gfs2_log_lock(sdp);
- head = bh = page_buffers(page);
+ bh = head;
do {
if (atomic_read(&bh->b_count))
goto cannot_release;
@@ -739,9 +734,9 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
goto cannot_release;
bh = bh->b_this_page;
- } while(bh != head);
+ } while (bh != head);
- head = bh = page_buffers(page);
+ bh = head;
do {
bd = bh->b_private;
if (bd) {
@@ -762,20 +757,20 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
} while (bh != head);
gfs2_log_unlock(sdp);
- return try_to_free_buffers(page);
+ return try_to_free_buffers(folio);
cannot_release:
gfs2_log_unlock(sdp);
- return 0;
+ return false;
}
static const struct address_space_operations gfs2_aops = {
.writepage = gfs2_writepage,
.writepages = gfs2_writepages,
- .readpage = gfs2_readpage,
+ .read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
.dirty_folio = filemap_dirty_folio,
- .releasepage = iomap_releasepage,
+ .release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
.bmap = gfs2_bmap,
.direct_IO = noop_direct_IO,
@@ -787,12 +782,12 @@ static const struct address_space_operations gfs2_aops = {
static const struct address_space_operations gfs2_jdata_aops = {
.writepage = gfs2_jdata_writepage,
.writepages = gfs2_jdata_writepages,
- .readpage = gfs2_readpage,
+ .read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
.dirty_folio = jdata_dirty_folio,
.bmap = gfs2_bmap,
.invalidate_folio = gfs2_invalidate_folio,
- .releasepage = gfs2_releasepage,
+ .release_folio = gfs2_release_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 7b2c1f390db741..0264d514dda787 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -12,7 +12,7 @@
#include <linux/mm.h>
#include "util.h"
-extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
+bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask);
extern int gfs2_internal_read(struct gfs2_inode *ip,
char *buf, loff_t *pos, unsigned size);
extern void gfs2_set_aops(struct inode *inode);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index d8bd1d48bd7897..868dcc71b58177 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -92,14 +92,14 @@ const struct address_space_operations gfs2_meta_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.writepage = gfs2_aspace_writepage,
- .releasepage = gfs2_releasepage,
+ .release_folio = gfs2_release_folio,
};
const struct address_space_operations gfs2_rgrp_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.writepage = gfs2_aspace_writepage,
- .releasepage = gfs2_releasepage,
+ .release_folio = gfs2_release_folio,
};
/**
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index 263d5028d9d18e..3f7e9bef987430 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -491,10 +491,10 @@ void hfs_file_truncate(struct inode *inode)
/* XXX: Can use generic_cont_expand? */
size = inode->i_size - 1;
- res = pagecache_write_begin(NULL, mapping, size+1, 0, 0,
- &page, &fsdata);
+ res = hfs_write_begin(NULL, mapping, size + 1, 0, &page,
+ &fsdata);
if (!res) {
- res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
+ res = generic_write_end(NULL, mapping, size + 1, 0, 0,
page, fsdata);
}
if (res)
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index b8eb0322a3e594..68d0305880f714 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -201,6 +201,8 @@ extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern const struct address_space_operations hfs_aops;
extern const struct address_space_operations hfs_btree_aops;
+int hfs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, struct page **pagep, void **fsdata);
extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t);
extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
extern int hfs_write_inode(struct inode *, struct writeback_control *);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 55f45e9b4930e3..c4526f16355d54 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -34,9 +34,9 @@ static int hfs_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, hfs_get_block, wbc);
}
-static int hfs_readpage(struct file *file, struct page *page)
+static int hfs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, hfs_get_block);
+ return block_read_full_folio(folio, hfs_get_block);
}
static void hfs_write_failed(struct address_space *mapping, loff_t to)
@@ -49,14 +49,13 @@ static void hfs_write_failed(struct address_space *mapping, loff_t to)
}
}
-static int hfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+int hfs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
hfs_get_block,
&HFS_I(mapping->host)->phys_size);
if (unlikely(ret))
@@ -70,14 +69,15 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, hfs_get_block);
}
-static int hfs_releasepage(struct page *page, gfp_t mask)
+static bool hfs_release_folio(struct folio *folio, gfp_t mask)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct hfs_btree *tree;
struct hfs_bnode *node;
u32 nidx;
- int i, res = 1;
+ int i;
+ bool res = true;
switch (inode->i_ino) {
case HFS_EXT_CNID:
@@ -88,27 +88,27 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
break;
default:
BUG();
- return 0;
+ return false;
}
if (!tree)
- return 0;
+ return false;
if (tree->node_size >= PAGE_SIZE) {
- nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
+ nidx = folio->index >> (tree->node_size_shift - PAGE_SHIFT);
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, nidx);
if (!node)
;
else if (atomic_read(&node->refcnt))
- res = 0;
+ res = false;
if (res && node) {
hfs_bnode_unhash(node);
hfs_bnode_free(node);
}
spin_unlock(&tree->hash_lock);
} else {
- nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
+ nidx = folio->index << (PAGE_SHIFT - tree->node_size_shift);
i = 1 << (PAGE_SHIFT - tree->node_size_shift);
spin_lock(&tree->hash_lock);
do {
@@ -116,7 +116,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
if (!node)
continue;
if (atomic_read(&node->refcnt)) {
- res = 0;
+ res = false;
break;
}
hfs_bnode_unhash(node);
@@ -124,7 +124,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
} while (--i && nidx < tree->node_count);
spin_unlock(&tree->hash_lock);
}
- return res ? try_to_free_buffers(page) : 0;
+ return res ? try_to_free_buffers(folio) : false;
}
static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
@@ -161,18 +161,18 @@ static int hfs_writepages(struct address_space *mapping,
const struct address_space_operations hfs_btree_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = hfs_readpage,
+ .read_folio = hfs_read_folio,
.writepage = hfs_writepage,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
.bmap = hfs_bmap,
- .releasepage = hfs_releasepage,
+ .release_folio = hfs_release_folio,
};
const struct address_space_operations hfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = hfs_readpage,
+ .read_folio = hfs_read_folio,
.writepage = hfs_writepage,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 7054a542689f9c..721f779b4ec3e3 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -557,12 +557,12 @@ void hfsplus_file_truncate(struct inode *inode)
void *fsdata;
loff_t size = inode->i_size;
- res = pagecache_write_begin(NULL, mapping, size, 0, 0,
- &page, &fsdata);
+ res = hfsplus_write_begin(NULL, mapping, size, 0,
+ &page, &fsdata);
if (res)
return;
- res = pagecache_write_end(NULL, mapping, size,
- 0, 0, page, fsdata);
+ res = generic_write_end(NULL, mapping, size, 0, 0,
+ page, fsdata);
if (res < 0)
return;
mark_inode_dirty(inode);
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 1798949f269bb8..396e73aa096128 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -468,6 +468,8 @@ extern const struct address_space_operations hfsplus_aops;
extern const struct address_space_operations hfsplus_btree_aops;
extern const struct dentry_operations hfsplus_dentry_operations;
+int hfsplus_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, struct page **pagep, void **fsdata);
struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
umode_t mode);
void hfsplus_delete_inode(struct inode *inode);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 446a816aa8e1e2..aeab83ed1c9c6e 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -23,9 +23,9 @@
#include "hfsplus_raw.h"
#include "xattr.h"
-static int hfsplus_readpage(struct file *file, struct page *page)
+static int hfsplus_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, hfsplus_get_block);
+ return block_read_full_folio(folio, hfsplus_get_block);
}
static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
@@ -43,14 +43,13 @@ static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
}
}
-static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+int hfsplus_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
hfsplus_get_block,
&HFSPLUS_I(mapping->host)->phys_size);
if (unlikely(ret))
@@ -64,14 +63,15 @@ static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, hfsplus_get_block);
}
-static int hfsplus_releasepage(struct page *page, gfp_t mask)
+static bool hfsplus_release_folio(struct folio *folio, gfp_t mask)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct hfs_btree *tree;
struct hfs_bnode *node;
u32 nidx;
- int i, res = 1;
+ int i;
+ bool res = true;
switch (inode->i_ino) {
case HFSPLUS_EXT_CNID:
@@ -85,26 +85,26 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
break;
default:
BUG();
- return 0;
+ return false;
}
if (!tree)
- return 0;
+ return false;
if (tree->node_size >= PAGE_SIZE) {
- nidx = page->index >>
+ nidx = folio->index >>
(tree->node_size_shift - PAGE_SHIFT);
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, nidx);
if (!node)
;
else if (atomic_read(&node->refcnt))
- res = 0;
+ res = false;
if (res && node) {
hfs_bnode_unhash(node);
hfs_bnode_free(node);
}
spin_unlock(&tree->hash_lock);
} else {
- nidx = page->index <<
+ nidx = folio->index <<
(PAGE_SHIFT - tree->node_size_shift);
i = 1 << (PAGE_SHIFT - tree->node_size_shift);
spin_lock(&tree->hash_lock);
@@ -113,7 +113,7 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
if (!node)
continue;
if (atomic_read(&node->refcnt)) {
- res = 0;
+ res = false;
break;
}
hfs_bnode_unhash(node);
@@ -121,7 +121,7 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
} while (--i && nidx < tree->node_count);
spin_unlock(&tree->hash_lock);
}
- return res ? try_to_free_buffers(page) : 0;
+ return res ? try_to_free_buffers(folio) : false;
}
static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
@@ -158,18 +158,18 @@ static int hfsplus_writepages(struct address_space *mapping,
const struct address_space_operations hfsplus_btree_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = hfsplus_readpage,
+ .read_folio = hfsplus_read_folio,
.writepage = hfsplus_writepage,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
.bmap = hfsplus_bmap,
- .releasepage = hfsplus_releasepage,
+ .release_folio = hfsplus_release_folio,
};
const struct address_space_operations hfsplus_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = hfsplus_readpage,
+ .read_folio = hfsplus_read_folio,
.writepage = hfsplus_writepage,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 14f9ac973a2ef2..cc1bc6f93a0101 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -434,8 +434,9 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
return err;
}
-static int hostfs_readpage(struct file *file, struct page *page)
+static int hostfs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
char *buffer;
loff_t start = page_offset(page);
int bytes_read, ret = 0;
@@ -463,12 +464,12 @@ static int hostfs_readpage(struct file *file, struct page *page)
}
static int hostfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
- *pagep = grab_cache_page_write_begin(mapping, index, flags);
+ *pagep = grab_cache_page_write_begin(mapping, index);
if (!*pagep)
return -ENOMEM;
return 0;
@@ -504,7 +505,7 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
static const struct address_space_operations hostfs_aops = {
.writepage = hostfs_writepage,
- .readpage = hostfs_readpage,
+ .read_folio = hostfs_read_folio,
.dirty_folio = filemap_dirty_folio,
.write_begin = hostfs_write_begin,
.write_end = hostfs_write_end,
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 99493a23c5d0c2..f7547a62c81f6a 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -158,9 +158,9 @@ static const struct iomap_ops hpfs_iomap_ops = {
.iomap_begin = hpfs_iomap_begin,
};
-static int hpfs_readpage(struct file *file, struct page *page)
+static int hpfs_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, hpfs_get_block);
+ return mpage_read_folio(folio, hpfs_get_block);
}
static int hpfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -194,13 +194,13 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to)
}
static int hpfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
hpfs_get_block,
&hpfs_i(mapping->host)->mmu_private);
if (unlikely(ret))
@@ -247,7 +247,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
const struct address_space_operations hpfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = hpfs_readpage,
+ .read_folio = hpfs_read_folio,
.writepage = hpfs_writepage,
.readahead = hpfs_readahead,
.writepages = hpfs_writepages,
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index d73f8a67168e98..15fc63276caae1 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -479,8 +479,9 @@ out:
return err;
}
-static int hpfs_symlink_readpage(struct file *file, struct page *page)
+static int hpfs_symlink_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
char *link = page_address(page);
struct inode *i = page->mapping->host;
struct fnode *fnode;
@@ -508,7 +509,7 @@ fail:
}
const struct address_space_operations hpfs_symlink_aops = {
- .readpage = hpfs_symlink_readpage
+ .read_folio = hpfs_symlink_read_folio
};
static int hpfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index dd3a088db11d1e..2de9ca5d260d54 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -383,7 +383,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
static int hugetlbfs_write_begin(struct file *file,
struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
return -EINVAL;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 94b53cbdefadeb..d2a9f699e17ed3 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -297,7 +297,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
/*
* If the bio_alloc fails, try it again for a single page to
* avoid having to deal with partial page reads. This emulates
- * what do_mpage_readpage does.
+ * what do_mpage_read_folio does.
*/
if (!ctx->bio) {
ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
@@ -320,10 +320,8 @@ done:
return pos - orig_pos + plen;
}
-int
-iomap_readpage(struct page *page, const struct iomap_ops *ops)
+int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
{
- struct folio *folio = page_folio(page);
struct iomap_iter iter = {
.inode = folio->mapping->host,
.pos = folio_pos(folio),
@@ -351,13 +349,13 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
}
/*
- * Just like mpage_readahead and block_read_full_page, we always
- * return 0 and just mark the page as PageError on errors. This
+ * Just like mpage_readahead and block_read_full_folio, we always
+ * return 0 and just set the folio error flag on errors. This
* should be cleaned up throughout the stack eventually.
*/
return 0;
}
-EXPORT_SYMBOL_GPL(iomap_readpage);
+EXPORT_SYMBOL_GPL(iomap_read_folio);
static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
struct iomap_readpage_ctx *ctx)
@@ -454,25 +452,23 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
}
EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
-int
-iomap_releasepage(struct page *page, gfp_t gfp_mask)
+bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
{
- struct folio *folio = page_folio(page);
-
- trace_iomap_releasepage(folio->mapping->host, folio_pos(folio),
+ trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
folio_size(folio));
/*
- * mm accommodates an old ext3 case where clean pages might not have had
- * the dirty bit cleared. Thus, it can send actual dirty pages to
- * ->releasepage() via shrink_active_list(); skip those here.
+ * mm accommodates an old ext3 case where clean folios might
+ * not have had the dirty bit cleared. Thus, it can send actual
+ * dirty folios to ->release_folio() via shrink_active_list();
+ * skip those here.
*/
if (folio_test_dirty(folio) || folio_test_writeback(folio))
- return 0;
+ return false;
iomap_page_release(folio);
- return 1;
+ return true;
}
-EXPORT_SYMBOL_GPL(iomap_releasepage);
+EXPORT_SYMBOL_GPL(iomap_release_folio);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
{
@@ -664,10 +660,10 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
/*
* The blocks that were entirely written will now be uptodate, so we
- * don't have to worry about a readpage reading them and overwriting a
+ * don't have to worry about a read_folio reading them and overwriting a
* partial write. However, if we've encountered a short write and only
* partially written into a block, it will not be marked uptodate, so a
- * readpage might come in and destroy our partial write.
+ * read_folio might come in and destroy our partial write.
*
* Do the simplest thing and just treat any short write to a
* non-uptodate page as a zero-length write, and force the caller to
@@ -1485,7 +1481,7 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
* Skip the page if it's fully outside i_size, e.g. due to a
* truncate operation that's in progress. We must redirty the
* page so that reclaim stops reclaiming it. Otherwise
- * iomap_vm_releasepage() is called on it and gets confused.
+ * iomap_release_folio() is called on it and gets confused.
*
* Note that the end_index is unsigned long. If the given
* offset is greater than 16TB on a 32-bit system then if we
diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
index a6689a563c6e26..d48868fc40d78f 100644
--- a/fs/iomap/trace.h
+++ b/fs/iomap/trace.h
@@ -80,7 +80,7 @@ DEFINE_EVENT(iomap_range_class, name, \
TP_PROTO(struct inode *inode, loff_t off, u64 len),\
TP_ARGS(inode, off, len))
DEFINE_RANGE_EVENT(iomap_writepage);
-DEFINE_RANGE_EVENT(iomap_releasepage);
+DEFINE_RANGE_EVENT(iomap_release_folio);
DEFINE_RANGE_EVENT(iomap_invalidate_folio);
DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index bc12ac7e231272..95a19f25d61cc3 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -296,8 +296,9 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
* per reference. We inject the additional pages into the page
* cache as a form of readahead.
*/
-static int zisofs_readpage(struct file *file, struct page *page)
+static int zisofs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
int err;
@@ -369,7 +370,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
}
const struct address_space_operations zisofs_aops = {
- .readpage = zisofs_readpage,
+ .read_folio = zisofs_read_folio,
/* No bmap operation supported */
};
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index d7491692aea3f4..88bf2030346603 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1174,9 +1174,9 @@ struct buffer_head *isofs_bread(struct inode *inode, sector_t block)
return sb_bread(inode->i_sb, blknr);
}
-static int isofs_readpage(struct file *file, struct page *page)
+static int isofs_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, isofs_get_block);
+ return mpage_read_folio(folio, isofs_get_block);
}
static void isofs_readahead(struct readahead_control *rac)
@@ -1190,7 +1190,7 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
}
static const struct address_space_operations isofs_aops = {
- .readpage = isofs_readpage,
+ .read_folio = isofs_read_folio,
.readahead = isofs_readahead,
.bmap = _isofs_bmap
};
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 4880146babaf90..48f58c6c9e69b5 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -687,11 +687,12 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
}
/*
- * readpage() for symlinks: reads symlink contents into the page and either
+ * read_folio() for symlinks: reads symlink contents into the folio and either
* makes it uptodate and returns 0 or returns error (-EIO)
*/
-static int rock_ridge_symlink_readpage(struct file *file, struct page *page)
+static int rock_ridge_symlink_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct iso_inode_info *ei = ISOFS_I(inode);
struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
@@ -804,5 +805,5 @@ error:
}
const struct address_space_operations isofs_symlink_aops = {
- .readpage = rock_ridge_symlink_readpage
+ .read_folio = rock_ridge_symlink_read_folio
};
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index ac7f067b7bddb7..eb315e81f1a6b9 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -62,6 +62,7 @@ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
*/
static void release_buffer_page(struct buffer_head *bh)
{
+ struct folio *folio;
struct page *page;
if (buffer_dirty(bh))
@@ -71,18 +72,19 @@ static void release_buffer_page(struct buffer_head *bh)
page = bh->b_page;
if (!page)
goto nope;
- if (page->mapping)
+ folio = page_folio(page);
+ if (folio->mapping)
goto nope;
/* OK, it's a truncated page */
- if (!trylock_page(page))
+ if (!folio_trylock(folio))
goto nope;
- get_page(page);
+ folio_get(folio);
__brelse(bh);
- try_to_free_buffers(page);
- unlock_page(page);
- put_page(page);
+ try_to_free_buffers(folio);
+ folio_unlock(folio);
+ folio_put(folio);
return;
nope:
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index fcb9175016a59e..e49bb0938376b7 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -2143,17 +2143,17 @@ out:
* cannot happen because we never reallocate freed data as metadata
* while the data is part of a transaction. Yes?
*
- * Return 0 on failure, 1 on success
+ * Return false on failure, true on success
*/
-int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page)
+bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio)
{
struct buffer_head *head;
struct buffer_head *bh;
- int ret = 0;
+ bool ret = false;
- J_ASSERT(PageLocked(page));
+ J_ASSERT(folio_test_locked(folio));
- head = page_buffers(page);
+ head = folio_buffers(folio);
bh = head;
do {
struct journal_head *jh;
@@ -2175,7 +2175,7 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page)
goto busy;
} while ((bh = bh->b_this_page) != head);
- ret = try_to_free_buffers(page);
+ ret = try_to_free_buffers(folio);
busy:
return ret;
}
@@ -2482,7 +2482,7 @@ int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio,
} while (bh != head);
if (!partial_page) {
- if (may_free && try_to_free_buffers(&folio->page))
+ if (may_free && try_to_free_buffers(folio))
J_ASSERT(!folio_buffers(folio));
}
return 0;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index bd7d58d27bfc63..ba86acbe12d3fb 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -25,9 +25,9 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *pg, void *fsdata);
static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
-static int jffs2_readpage (struct file *filp, struct page *pg);
+static int jffs2_read_folio(struct file *filp, struct folio *folio);
int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
@@ -72,7 +72,7 @@ const struct inode_operations jffs2_file_inode_operations =
const struct address_space_operations jffs2_file_address_operations =
{
- .readpage = jffs2_readpage,
+ .read_folio = jffs2_read_folio,
.write_begin = jffs2_write_begin,
.write_end = jffs2_write_end,
};
@@ -110,27 +110,26 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
return ret;
}
-int jffs2_do_readpage_unlock(void *data, struct page *pg)
+int __jffs2_read_folio(struct file *file, struct folio *folio)
{
- int ret = jffs2_do_readpage_nolock(data, pg);
- unlock_page(pg);
+ int ret = jffs2_do_readpage_nolock(folio->mapping->host, &folio->page);
+ folio_unlock(folio);
return ret;
}
-
-static int jffs2_readpage (struct file *filp, struct page *pg)
+static int jffs2_read_folio(struct file *file, struct folio *folio)
{
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host);
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(folio->mapping->host);
int ret;
mutex_lock(&f->sem);
- ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
+ ret = __jffs2_read_folio(file, folio);
mutex_unlock(&f->sem);
return ret;
}
static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct page *pg;
@@ -213,7 +212,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
* page in read_cache_page(), which causes a deadlock.
*/
mutex_lock(&c->alloc_sem);
- pg = grab_cache_page_write_begin(mapping, index, flags);
+ pg = grab_cache_page_write_begin(mapping, index);
if (!pg) {
ret = -ENOMEM;
goto release_sem;
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 71f03a5d36ed25..00a110f40e10b1 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -178,7 +178,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
jffs2_complete_reservation(c);
/* We have to do the truncate_setsize() without f->sem held, since
- some pages may be locked and waiting for it in readpage().
+ some pages may be locked and waiting for it in read_folio().
We are protected from a simultaneous write() extending i_size
back past iattr->ia_size, because do_truncate() holds the
generic inode semaphore. */
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 373b3b7c9f4457..5c6602f3c18915 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -1327,7 +1327,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
* trying to write out, read_cache_page() will not deadlock. */
mutex_unlock(&f->sem);
page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
- jffs2_do_readpage_unlock, inode);
+ __jffs2_read_folio, NULL);
if (IS_ERR(page)) {
pr_warn("read_cache_page() returned error: %ld\n",
PTR_ERR(page));
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 173eccac691df1..921d782583d6fe 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -155,7 +155,7 @@ extern const struct file_operations jffs2_file_operations;
extern const struct inode_operations jffs2_file_inode_operations;
extern const struct address_space_operations jffs2_file_address_operations;
int jffs2_fsync(struct file *, loff_t, loff_t, int);
-int jffs2_do_readpage_unlock(void *data, struct page *pg);
+int __jffs2_read_folio(struct file *file, struct folio *folio);
/* ioctl.c */
long jffs2_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index d1943a7b4b0407..a5dd7e53754a87 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -293,9 +293,9 @@ static int jfs_writepages(struct address_space *mapping,
return mpage_writepages(mapping, wbc, jfs_get_block);
}
-static int jfs_readpage(struct file *file, struct page *page)
+static int jfs_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, jfs_get_block);
+ return mpage_read_folio(folio, jfs_get_block);
}
static void jfs_readahead(struct readahead_control *rac)
@@ -314,13 +314,12 @@ static void jfs_write_failed(struct address_space *mapping, loff_t to)
}
static int jfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
- ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
- jfs_get_block);
+ ret = nobh_write_begin(mapping, pos, len, pagep, fsdata, jfs_get_block);
if (unlikely(ret))
jfs_write_failed(mapping, pos + len);
@@ -360,7 +359,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
const struct address_space_operations jfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = jfs_readpage,
+ .read_folio = jfs_read_folio,
.readahead = jfs_readahead,
.writepage = jfs_writepage,
.writepages = jfs_writepages,
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index c4220ccdedef6c..387652ae14c216 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -467,8 +467,9 @@ err_out:
return -EIO;
}
-static int metapage_readpage(struct file *fp, struct page *page)
+static int metapage_read_folio(struct file *fp, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct bio *bio = NULL;
int block_offset;
@@ -523,29 +524,29 @@ add_failed:
return -EIO;
}
-static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
+static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
{
struct metapage *mp;
- int ret = 1;
+ bool ret = true;
int offset;
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
- mp = page_to_mp(page, offset);
+ mp = page_to_mp(&folio->page, offset);
if (!mp)
continue;
- jfs_info("metapage_releasepage: mp = 0x%p", mp);
+ jfs_info("metapage_release_folio: mp = 0x%p", mp);
if (mp->count || mp->nohomeok ||
test_bit(META_dirty, &mp->flag)) {
jfs_info("count = %ld, nohomeok = %d", mp->count,
mp->nohomeok);
- ret = 0;
+ ret = false;
continue;
}
if (mp->lsn)
remove_from_logsync(mp);
- remove_metapage(page, mp);
+ remove_metapage(&folio->page, mp);
INCREMENT(mpStat.pagefree);
free_metapage(mp);
}
@@ -559,13 +560,13 @@ static void metapage_invalidate_folio(struct folio *folio, size_t offset,
BUG_ON(folio_test_writeback(folio));
- metapage_releasepage(&folio->page, 0);
+ metapage_release_folio(folio, 0);
}
const struct address_space_operations jfs_metapage_aops = {
- .readpage = metapage_readpage,
+ .read_folio = metapage_read_folio,
.writepage = metapage_writepage,
- .releasepage = metapage_releasepage,
+ .release_folio = metapage_release_folio,
.invalidate_folio = metapage_invalidate_folio,
.dirty_folio = filemap_dirty_folio,
};
diff --git a/fs/libfs.c b/fs/libfs.c
index e64bdedef16835..31b0ddf01c31da 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -539,17 +539,17 @@ int simple_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
}
EXPORT_SYMBOL(simple_setattr);
-static int simple_readpage(struct file *file, struct page *page)
+static int simple_read_folio(struct file *file, struct folio *folio)
{
- clear_highpage(page);
- flush_dcache_page(page);
- SetPageUptodate(page);
- unlock_page(page);
+ folio_zero_range(folio, 0, folio_size(folio));
+ flush_dcache_folio(folio);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
return 0;
}
int simple_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct page *page;
@@ -557,7 +557,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
index = pos >> PAGE_SHIFT;
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
@@ -592,7 +592,7 @@ EXPORT_SYMBOL(simple_write_begin);
* should extend on what's done here with a call to mark_inode_dirty() in the
* case that i_size has changed.
*
- * Use *ONLY* with simple_readpage()
+ * Use *ONLY* with simple_read_folio()
*/
static int simple_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
@@ -628,7 +628,7 @@ static int simple_write_end(struct file *file, struct address_space *mapping,
* Provides ramfs-style behavior: data in the pagecache, but no writeback.
*/
const struct address_space_operations ram_aops = {
- .readpage = simple_readpage,
+ .read_folio = simple_read_folio,
.write_begin = simple_write_begin,
.write_end = simple_write_end,
.dirty_folio = noop_dirty_folio,
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index f1a6610e4ee697..da8bdd1712a70b 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -402,9 +402,9 @@ static int minix_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, minix_get_block, wbc);
}
-static int minix_readpage(struct file *file, struct page *page)
+static int minix_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page,minix_get_block);
+ return block_read_full_folio(folio, minix_get_block);
}
int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len)
@@ -423,13 +423,12 @@ static void minix_write_failed(struct address_space *mapping, loff_t to)
}
static int minix_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, flags, pagep,
- minix_get_block);
+ ret = block_write_begin(mapping, pos, len, pagep, minix_get_block);
if (unlikely(ret))
minix_write_failed(mapping, pos + len);
@@ -444,7 +443,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations minix_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = minix_readpage,
+ .read_folio = minix_read_folio,
.writepage = minix_writepage,
.write_begin = minix_write_begin,
.write_end = generic_write_end,
diff --git a/fs/mpage.c b/fs/mpage.c
index 1fe56f8c495fc2..0d25f44f5707cb 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -36,7 +36,7 @@
*
* The mpage code never puts partial pages into a BIO (except for end-of-file).
* If a page does not map to a contiguous run of blocks then it simply falls
- * back to block_read_full_page().
+ * back to block_read_full_folio().
*
* Why is this? If a page's completion depends on a number of different BIOs
* which can complete in any order (or at the same time) then determining the
@@ -68,7 +68,7 @@ static struct bio *mpage_bio_submit(struct bio *bio)
/*
* support function for mpage_readahead. The fs supplied get_block might
* return an up to date buffer. This is used to map that buffer into
- * the page, which allows readpage to avoid triggering a duplicate call
+ * the page, which allows read_folio to avoid triggering a duplicate call
* to get_block.
*
* The idea is to avoid adding buffers to pages that don't already have
@@ -296,7 +296,7 @@ confused:
if (args->bio)
args->bio = mpage_bio_submit(args->bio);
if (!PageUptodate(page))
- block_read_full_page(page, args->get_block);
+ block_read_full_folio(page_folio(page), args->get_block);
else
unlock_page(page);
goto out;
@@ -364,20 +364,22 @@ EXPORT_SYMBOL(mpage_readahead);
/*
* This isn't called much at all
*/
-int mpage_readpage(struct page *page, get_block_t get_block)
+int mpage_read_folio(struct folio *folio, get_block_t get_block)
{
struct mpage_readpage_args args = {
- .page = page,
+ .page = &folio->page,
.nr_pages = 1,
.get_block = get_block,
};
+ VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
+
args.bio = do_mpage_readpage(&args);
if (args.bio)
mpage_bio_submit(args.bio);
return 0;
}
-EXPORT_SYMBOL(mpage_readpage);
+EXPORT_SYMBOL(mpage_read_folio);
/*
* Writing is not so simple.
@@ -425,11 +427,11 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
/*
* we cannot drop the bh if the page is not uptodate or a concurrent
- * readpage would fail to serialize with the bh and it would read from
+ * read_folio would fail to serialize with the bh and it would read from
* disk before we reach the platter.
*/
if (buffer_heads_over_limit && PageUptodate(page))
- try_to_free_buffers(page);
+ try_to_free_buffers(page_folio(page));
}
/*
@@ -510,7 +512,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
/*
* Page has buffers, but they are all unmapped. The page was
* created by pagein or read over a hole which was handled by
- * block_read_full_page(). If this address_space is also
+ * block_read_full_folio(). If this address_space is also
* using mpage_readahead then this can rarely happen.
*/
goto confused;
diff --git a/fs/namei.c b/fs/namei.c
index 509657fdf4f56d..896ade8b740045 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -22,6 +22,7 @@
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
+#include <linux/sched/mm.h>
#include <linux/fsnotify.h>
#include <linux/personality.h>
#include <linux/security.h>
@@ -5001,28 +5002,28 @@ int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
}
EXPORT_SYMBOL(page_readlink);
-/*
- * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
- */
-int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
+int page_symlink(struct inode *inode, const char *symname, int len)
{
struct address_space *mapping = inode->i_mapping;
+ const struct address_space_operations *aops = mapping->a_ops;
+ bool nofs = !mapping_gfp_constraint(mapping, __GFP_FS);
struct page *page;
void *fsdata;
int err;
- unsigned int flags = 0;
- if (nofs)
- flags |= AOP_FLAG_NOFS;
+ unsigned int flags;
retry:
- err = pagecache_write_begin(NULL, mapping, 0, len-1,
- flags, &page, &fsdata);
+ if (nofs)
+ flags = memalloc_nofs_save();
+ err = aops->write_begin(NULL, mapping, 0, len-1, &page, &fsdata);
+ if (nofs)
+ memalloc_nofs_restore(flags);
if (err)
goto fail;
memcpy(page_address(page), symname, len-1);
- err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
+ err = aops->write_end(NULL, mapping, 0, len-1, len-1,
page, fsdata);
if (err < 0)
goto fail;
@@ -5034,13 +5035,6 @@ retry:
fail:
return err;
}
-EXPORT_SYMBOL(__page_symlink);
-
-int page_symlink(struct inode *inode, const char *symname, int len)
-{
- return __page_symlink(inode, symname, len,
- !mapping_gfp_constraint(inode->i_mapping, __GFP_FS));
-}
EXPORT_SYMBOL(page_symlink);
const struct inode_operations page_symlink_inode_operations = {
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 281a88a5b8dcd4..8742d22dfd2b24 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -198,22 +198,21 @@ cleanup_free:
EXPORT_SYMBOL(netfs_readahead);
/**
- * netfs_readpage - Helper to manage a readpage request
+ * netfs_read_folio - Helper to manage a read_folio request
* @file: The file to read from
- * @subpage: A subpage of the folio to read
+ * @folio: The folio to read
*
- * Fulfil a readpage request by drawing data from the cache if possible, or the
- * netfs if not. Space beyond the EOF is zero-filled. Multiple I/O requests
- * from different sources will get munged together.
+ * Fulfil a read_folio request by drawing data from the cache if
+ * possible, or the netfs if not. Space beyond the EOF is zero-filled.
+ * Multiple I/O requests from different sources will get munged together.
*
* The calling netfs must initialise a netfs context contiguous to the vfs
* inode before calling this.
*
* This is usable whether or not caching is enabled.
*/
-int netfs_readpage(struct file *file, struct page *subpage)
+int netfs_read_folio(struct file *file, struct folio *folio)
{
- struct folio *folio = page_folio(subpage);
struct address_space *mapping = folio_file_mapping(folio);
struct netfs_io_request *rreq;
struct netfs_i_context *ctx = netfs_i_context(mapping->host);
@@ -245,7 +244,7 @@ alloc_error:
folio_unlock(folio);
return ret;
}
-EXPORT_SYMBOL(netfs_readpage);
+EXPORT_SYMBOL(netfs_read_folio);
/*
* Prepare a folio for writing without reading first
@@ -302,7 +301,6 @@ zero_out:
* @mapping: The mapping to read from
* @pos: File position at which the write will begin
* @len: The length of the write (may extend beyond the end of the folio chosen)
- * @aop_flags: AOP_* flags
* @_folio: Where to put the resultant folio
* @_fsdata: Place for the netfs to store a cookie
*
@@ -329,22 +327,19 @@ zero_out:
* This is usable whether or not caching is enabled.
*/
int netfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int aop_flags,
- struct folio **_folio, void **_fsdata)
+ loff_t pos, unsigned int len, struct folio **_folio,
+ void **_fsdata)
{
struct netfs_io_request *rreq;
struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
struct folio *folio;
- unsigned int fgp_flags;
+ unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
pgoff_t index = pos >> PAGE_SHIFT;
int ret;
DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
retry:
- fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
- if (aop_flags & AOP_FLAG_NOFS)
- fgp_flags |= FGP_NOFS;
folio = __filemap_get_folio(mapping, index, fgp_flags,
mapping_gfp_mask(mapping));
if (!folio)
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index c6b263b5faf1fc..a8ecdd527662cc 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -55,7 +55,7 @@ static int nfs_closedir(struct inode *, struct file *);
static int nfs_readdir(struct file *, struct dir_context *);
static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
static loff_t nfs_llseek_dir(struct file *, loff_t, int);
-static void nfs_readdir_clear_array(struct page*);
+static void nfs_readdir_free_folio(struct folio *);
const struct file_operations nfs_dir_operations = {
.llseek = nfs_llseek_dir,
@@ -67,7 +67,7 @@ const struct file_operations nfs_dir_operations = {
};
const struct address_space_operations nfs_dir_aops = {
- .freepage = nfs_readdir_clear_array,
+ .free_folio = nfs_readdir_free_folio,
};
#define NFS_INIT_DTSIZE PAGE_SIZE
@@ -228,6 +228,11 @@ static void nfs_readdir_clear_array(struct page *page)
kunmap_atomic(array);
}
+static void nfs_readdir_free_folio(struct folio *folio)
+{
+ nfs_readdir_clear_array(&folio->page);
+}
+
static void nfs_readdir_page_reinit_array(struct page *page, u64 last_cookie,
u64 change_attr)
{
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 150b7fa8f0a736..d764b3ce79050b 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -313,7 +313,7 @@ static bool nfs_want_read_modify_write(struct file *file, struct page *page,
* increment the page use counts until he is done with the page.
*/
static int nfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
@@ -325,7 +325,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
file, mapping->host->i_ino, len, (long long) pos);
start:
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
*pagep = page;
@@ -337,7 +337,7 @@ start:
} else if (!once_thru &&
nfs_want_read_modify_write(file, page, pos, len)) {
once_thru = 1;
- ret = nfs_readpage(file, page);
+ ret = nfs_read_folio(file, page_folio(page));
put_page(page);
if (!ret)
goto start;
@@ -415,34 +415,31 @@ static void nfs_invalidate_folio(struct folio *folio, size_t offset,
}
/*
- * Attempt to release the private state associated with a page
- * - Called if either PG_private or PG_fscache is set on the page
- * - Caller holds page lock
- * - Return true (may release page) or false (may not)
+ * Attempt to release the private state associated with a folio
+ * - Called if either private or fscache flags are set on the folio
+ * - Caller holds folio lock
+ * - Return true (may release folio) or false (may not)
*/
-static int nfs_release_page(struct page *page, gfp_t gfp)
+static bool nfs_release_folio(struct folio *folio, gfp_t gfp)
{
- dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
+ dfprintk(PAGECACHE, "NFS: release_folio(%p)\n", folio);
- /* If PagePrivate() is set, then the page is not freeable */
- if (PagePrivate(page))
- return 0;
- return nfs_fscache_release_page(page, gfp);
+ /* If the private flag is set, then the folio is not freeable */
+ if (folio_test_private(folio))
+ return false;
+ return nfs_fscache_release_folio(folio, gfp);
}
-static void nfs_check_dirty_writeback(struct page *page,
+static void nfs_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback)
{
struct nfs_inode *nfsi;
- struct address_space *mapping = page_file_mapping(page);
-
- if (!mapping || PageSwapCache(page))
- return;
+ struct address_space *mapping = folio->mapping;
/*
- * Check if an unstable page is currently being committed and
- * if so, have the VM treat it as if the page is under writeback
- * so it will not block due to pages that will shortly be freeable.
+ * Check if an unstable folio is currently being committed and
+ * if so, have the VM treat it as if the folio is under writeback
+ * so it will not block due to folios that will shortly be freeable.
*/
nfsi = NFS_I(mapping->host);
if (atomic_read(&nfsi->commit_info.rpcs_out)) {
@@ -451,11 +448,11 @@ static void nfs_check_dirty_writeback(struct page *page,
}
/*
- * If PagePrivate() is set, then the page is not freeable and as the
- * inode is not being committed, it's not going to be cleaned in the
- * near future so treat it as dirty
+ * If the private flag is set, then the folio is not freeable
+ * and as the inode is not being committed, it's not going to
+ * be cleaned in the near future so treat it as dirty
*/
- if (PagePrivate(page))
+ if (folio_test_private(folio))
*dirty = true;
}
@@ -517,7 +514,7 @@ static void nfs_swap_deactivate(struct file *file)
}
const struct address_space_operations nfs_file_aops = {
- .readpage = nfs_readpage,
+ .read_folio = nfs_read_folio,
.readahead = nfs_readahead,
.dirty_folio = filemap_dirty_folio,
.writepage = nfs_writepage,
@@ -525,7 +522,7 @@ const struct address_space_operations nfs_file_aops = {
.write_begin = nfs_write_begin,
.write_end = nfs_write_end,
.invalidate_folio = nfs_invalidate_folio,
- .releasepage = nfs_release_page,
+ .release_folio = nfs_release_folio,
.direct_IO = nfs_direct_IO,
#ifdef CONFIG_MIGRATION
.migratepage = nfs_migrate_page,
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 4e980cc04779bd..2a37af88097800 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -48,14 +48,14 @@ extern void nfs_fscache_release_file(struct inode *, struct file *);
extern int __nfs_fscache_read_page(struct inode *, struct page *);
extern void __nfs_fscache_write_page(struct inode *, struct page *);
-static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
+static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{
- if (PageFsCache(page)) {
+ if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
- wait_on_page_fscache(page);
- fscache_note_page_release(nfs_i_fscache(page->mapping->host));
- nfs_inc_fscache_stats(page->mapping->host,
+ folio_wait_fscache(folio);
+ fscache_note_page_release(nfs_i_fscache(folio->mapping->host));
+ nfs_inc_fscache_stats(folio->mapping->host,
NFSIOS_FSCACHE_PAGES_UNCACHED);
}
return true;
@@ -129,9 +129,9 @@ static inline void nfs_fscache_open_file(struct inode *inode,
struct file *filp) {}
static inline void nfs_fscache_release_file(struct inode *inode, struct file *file) {}
-static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
+static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{
- return 1; /* True: may release page */
+ return true; /* may release folio */
}
static inline int nfs_fscache_read_page(struct inode *inode, struct page *page)
{
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 5e7657374bc3f8..5a9b043662e913 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -333,8 +333,9 @@ out:
* - The error flag is set for this page. This happens only when a
* previous async read operation failed.
*/
-int nfs_readpage(struct file *file, struct page *page)
+int nfs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct nfs_readdesc desc;
struct inode *inode = page_file_mapping(page)->host;
int ret;
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
index 25ba299fdac2e6..0e27a2e4e68b84 100644
--- a/fs/nfs/symlink.c
+++ b/fs/nfs/symlink.c
@@ -26,21 +26,21 @@
* and straight-forward than readdir caching.
*/
-static int nfs_symlink_filler(void *data, struct page *page)
+static int nfs_symlink_filler(struct file *file, struct folio *folio)
{
- struct inode *inode = data;
+ struct inode *inode = folio->mapping->host;
int error;
- error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
+ error = NFS_PROTO(inode)->readlink(inode, &folio->page, 0, PAGE_SIZE);
if (error < 0)
goto error;
- SetPageUptodate(page);
- unlock_page(page);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
return 0;
error:
- SetPageError(page);
- unlock_page(page);
+ folio_set_error(folio);
+ folio_unlock(folio);
return -EIO;
}
@@ -67,7 +67,7 @@ static const char *nfs_get_link(struct dentry *dentry,
if (err)
return err;
page = read_cache_page(&inode->i_data, 0, nfs_symlink_filler,
- inode);
+ NULL);
if (IS_ERR(page))
return ERR_CAST(page);
}
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 6045cea21f5224..67f63cfeade5c4 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -63,10 +63,10 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n)
/**
* nilfs_get_block() - get a file block on the filesystem (callback function)
- * @inode - inode struct of the target file
- * @blkoff - file block number
- * @bh_result - buffer head to be mapped on
- * @create - indicate whether allocating the block or not when it has not
+ * @inode: inode struct of the target file
+ * @blkoff: file block number
+ * @bh_result: buffer head to be mapped on
+ * @create: indicate whether allocating the block or not when it has not
* been allocated yet.
*
* This function does not issue actual read request of the specified data
@@ -140,14 +140,14 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
}
/**
- * nilfs_readpage() - implement readpage() method of nilfs_aops {}
+ * nilfs_read_folio() - implement read_folio() method of nilfs_aops {}
* address_space_operations.
- * @file - file struct of the file to be read
- * @page - the page to be read
+ * @file: file struct of the file to be read
+ * @folio: the folio to be read
*/
-static int nilfs_readpage(struct file *file, struct page *page)
+static int nilfs_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, nilfs_get_block);
+ return mpage_read_folio(folio, nilfs_get_block);
}
static void nilfs_readahead(struct readahead_control *rac)
@@ -248,7 +248,7 @@ void nilfs_write_failed(struct address_space *mapping, loff_t to)
}
static int nilfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
@@ -258,8 +258,7 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping,
if (unlikely(err))
return err;
- err = block_write_begin(mapping, pos, len, flags, pagep,
- nilfs_get_block);
+ err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block);
if (unlikely(err)) {
nilfs_write_failed(mapping, pos + len);
nilfs_transaction_abort(inode->i_sb);
@@ -299,13 +298,12 @@ nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
const struct address_space_operations nilfs_aops = {
.writepage = nilfs_writepage,
- .readpage = nilfs_readpage,
+ .read_folio = nilfs_read_folio,
.writepages = nilfs_writepages,
.dirty_folio = nilfs_dirty_folio,
.readahead = nilfs_readahead,
.write_begin = nilfs_write_begin,
.write_end = nilfs_write_end,
- /* .releasepage = nilfs_releasepage, */
.invalidate_folio = block_invalidate_folio,
.direct_IO = nilfs_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
@@ -1088,6 +1086,7 @@ int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
/**
* nilfs_dirty_inode - reflect changes on given inode to an inode block.
* @inode: inode of the file to be registered.
+ * @flags: flags to determine the dirty state of the inode
*
* nilfs_dirty_inode() loads a inode block containing the specified
* @inode and copies data from a nilfs_inode to a corresponding inode
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 9e2ed76c0f25cb..0955b657938ff2 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -511,7 +511,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
pos = rb->blkoff << inode->i_blkbits;
err = block_write_begin(inode->i_mapping, pos, blocksize,
- 0, &page, nilfs_get_block);
+ &page, nilfs_get_block);
if (unlikely(err)) {
loff_t isize = inode->i_size;
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 90e3dad8ee454a..9e3964ea2ea030 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -159,7 +159,7 @@ still_busy:
*
* Return 0 on success and -errno on error.
*
- * Contains an adapted version of fs/buffer.c::block_read_full_page().
+ * Contains an adapted version of fs/buffer.c::block_read_full_folio().
*/
static int ntfs_read_block(struct page *page)
{
@@ -358,16 +358,16 @@ handle_zblock:
}
/**
- * ntfs_readpage - fill a @page of a @file with data from the device
- * @file: open file to which the page @page belongs or NULL
- * @page: page cache page to fill with data
+ * ntfs_read_folio - fill a @folio of a @file with data from the device
+ * @file: open file to which the folio @folio belongs or NULL
+ * @folio: page cache folio to fill with data
*
- * For non-resident attributes, ntfs_readpage() fills the @page of the open
- * file @file by calling the ntfs version of the generic block_read_full_page()
+ * For non-resident attributes, ntfs_read_folio() fills the @folio of the open
+ * file @file by calling the ntfs version of the generic block_read_full_folio()
* function, ntfs_read_block(), which in turn creates and reads in the buffers
- * associated with the page asynchronously.
+ * associated with the folio asynchronously.
*
- * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the
+ * For resident attributes, OTOH, ntfs_read_folio() fills @folio by copying the
* data from the mft record (which at this stage is most likely in memory) and
* fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as
* even if the mft record is not cached at this point in time, we need to wait
@@ -375,8 +375,9 @@ handle_zblock:
*
* Return 0 on success and -errno on error.
*/
-static int ntfs_readpage(struct file *file, struct page *page)
+static int ntfs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
loff_t i_size;
struct inode *vi;
ntfs_inode *ni, *base_ni;
@@ -458,7 +459,7 @@ retry_readpage:
}
/*
* If a parallel write made the attribute non-resident, drop the mft
- * record and retry the readpage.
+ * record and retry the read_folio.
*/
if (unlikely(NInoNonResident(ni))) {
unmap_mft_record(base_ni);
@@ -637,10 +638,11 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
if (unlikely((block >= iblock) &&
(initialized_size < i_size))) {
/*
- * If this page is fully outside initialized size, zero
- * out all pages between the current initialized size
- * and the current page. Just use ntfs_readpage() to do
- * the zeroing transparently.
+ * If this page is fully outside initialized
+ * size, zero out all pages between the current
+ * initialized size and the current page. Just
+ * use ntfs_read_folio() to do the zeroing
+ * transparently.
*/
if (block > iblock) {
// TODO:
@@ -798,7 +800,7 @@ lock_retry_remap:
/* For the error case, need to reset bh to the beginning. */
bh = head;
- /* Just an optimization, so ->readpage() is not called later. */
+ /* Just an optimization, so ->read_folio() is not called later. */
if (unlikely(!PageUptodate(page))) {
int uptodate = 1;
do {
@@ -1329,7 +1331,7 @@ done:
* vfs inode dirty code path for the inode the mft record belongs to or via the
* vm page dirty code path for the page the mft record is in.
*
- * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page().
+ * Based on ntfs_read_folio() and fs/buffer.c::block_write_full_page().
*
* Return 0 on success and -errno on error.
*/
@@ -1651,7 +1653,7 @@ hole:
* attributes.
*/
const struct address_space_operations ntfs_normal_aops = {
- .readpage = ntfs_readpage,
+ .read_folio = ntfs_read_folio,
#ifdef NTFS_RW
.writepage = ntfs_writepage,
.dirty_folio = block_dirty_folio,
@@ -1666,7 +1668,7 @@ const struct address_space_operations ntfs_normal_aops = {
* ntfs_compressed_aops - address space operations for compressed inodes
*/
const struct address_space_operations ntfs_compressed_aops = {
- .readpage = ntfs_readpage,
+ .read_folio = ntfs_read_folio,
#ifdef NTFS_RW
.writepage = ntfs_writepage,
.dirty_folio = block_dirty_folio,
@@ -1681,7 +1683,7 @@ const struct address_space_operations ntfs_compressed_aops = {
* and attributes
*/
const struct address_space_operations ntfs_mst_aops = {
- .readpage = ntfs_readpage, /* Fill page with data. */
+ .read_folio = ntfs_read_folio, /* Fill page with data. */
#ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */
.dirty_folio = filemap_dirty_folio,
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index f0962d46bd673b..934d5f79b9e7a1 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -37,9 +37,9 @@ static inline void ntfs_unmap_page(struct page *page)
* Read a page from the page cache of the address space @mapping at position
* @index, where @index is in units of PAGE_SIZE, and not in bytes.
*
- * If the page is not in memory it is loaded from disk first using the readpage
- * method defined in the address space operations of @mapping and the page is
- * added to the page cache of @mapping in the process.
+ * If the page is not in memory it is loaded from disk first using the
+ * read_folio method defined in the address space operations of @mapping
+ * and the page is added to the page cache of @mapping in the process.
*
* If the page belongs to an mst protected attribute and it is marked as such
* in its ntfs inode (NInoMstProtected()) the mst fixups are applied but no
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 2911c04a33e01c..4de597a83b88dd 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1719,7 +1719,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
vi->i_blocks = ni->allocated_size >> 9;
write_unlock_irqrestore(&ni->size_lock, flags);
/*
- * This needs to be last since the address space operations ->readpage
+ * This needs to be last since the address space operations ->read_folio
* and ->writepage can run concurrently with us as they are not
* serialized on i_mutex. Note, we are not allowed to fail once we flip
* this switch, which is another reason to do this last.
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index d2f9d6a0ee323a..a60f543e755777 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -780,12 +780,12 @@ lock_retry_remap:
/* Uncompressed cb, copy it to the destination pages. */
/*
* TODO: As a big optimization, we could detect this case
- * before we read all the pages and use block_read_full_page()
+ * before we read all the pages and use block_read_full_folio()
* on all full pages instead (we still have to treat partial
* pages especially but at least we are getting rid of the
* synchronous io for the majority of pages.
* Or if we choose not to do the read-ahead/-behind stuff, we
- * could just return block_read_full_page(pages[xpage]) as long
+ * could just return block_read_full_folio(pages[xpage]) as long
* as PAGE_SIZE <= cb_size.
*/
if (cb_max_ofs)
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 2ae25e48a41a61..e1392a9b8cebe8 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -251,14 +251,14 @@ do_non_resident_extend:
*
* TODO: For sparse pages could optimize this workload by using
* the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This
- * would be set in readpage for sparse pages and here we would
+ * would be set in read_folio for sparse pages and here we would
* not need to mark dirty any pages which have this bit set.
* The only caveat is that we have to clear the bit everywhere
* where we allocate any clusters that lie in the page or that
* contain the page.
*
* TODO: An even greater optimization would be for us to only
- * call readpage() on pages which are not in sparse regions as
+ * call read_folio() on pages which are not in sparse regions as
* determined from the runlist. This would greatly reduce the
* number of pages we read and make dirty in the case of sparse
* files.
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index efe0602b4e5110..db0f1995aedd15 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -1832,7 +1832,7 @@ int ntfs_read_inode_mount(struct inode *vi)
/* Need this to sanity check attribute list references to $MFT. */
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
- /* Provides readpage() for map_mft_record(). */
+ /* Provides read_folio() for map_mft_record(). */
vi->i_mapping->a_ops = &ntfs_mst_aops;
ctx = ntfs_attr_get_search_ctx(ni, m);
@@ -2503,7 +2503,7 @@ retry_truncate:
* between the old data_size, i.e. old_size, and the new_size
* has not been zeroed. Fortunately, we do not need to zero it
* either since on one hand it will either already be zero due
- * to both readpage and writepage clearing partial page data
+ * to both read_folio and writepage clearing partial page data
* beyond i_size in which case there is nothing to do or in the
* case of the file being mmap()ped at the same time, POSIX
* specifies that the behaviour is unspecified thus we do not
diff --git a/fs/ntfs/mft.h b/fs/ntfs/mft.h
index 17bfefc3027166..49c001af16edc5 100644
--- a/fs/ntfs/mft.h
+++ b/fs/ntfs/mft.h
@@ -79,7 +79,7 @@ extern int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync);
* paths and via the page cache write back code paths or between writing
* neighbouring mft records residing in the same page.
*
- * Locking the page also serializes us against ->readpage() if the page is not
+ * Locking the page also serializes us against ->read_folio() if the page is not
* uptodate.
*
* On success, clean the mft record and return 0. On error, leave the mft
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 15806eeae217a0..a4fcdc7927ca2c 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -115,7 +115,6 @@ static int ntfs_extend_initialized_size(struct file *file,
for (;;) {
u32 zerofrom, len;
struct page *page;
- void *fsdata;
u8 bits;
CLST vcn, lcn, clen;
@@ -157,16 +156,14 @@ static int ntfs_extend_initialized_size(struct file *file,
if (pos + len > new_valid)
len = new_valid - pos;
- err = pagecache_write_begin(file, mapping, pos, len, 0, &page,
- &fsdata);
+ err = ntfs_write_begin(file, mapping, pos, len, &page, NULL);
if (err)
goto out;
zero_user_segment(page, zerofrom, PAGE_SIZE);
/* This function in any case puts page. */
- err = pagecache_write_end(file, mapping, pos, len, len, page,
- fsdata);
+ err = ntfs_write_end(file, mapping, pos, len, len, page, NULL);
if (err < 0)
goto out;
pos += len;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index 9eab11e3b03415..74f60c457f287a 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -676,8 +676,9 @@ static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
}
-static int ntfs_readpage(struct file *file, struct page *page)
+static int ntfs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
int err;
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
@@ -701,7 +702,7 @@ static int ntfs_readpage(struct file *file, struct page *page)
}
/* Normal + sparse files. */
- return mpage_readpage(page, ntfs_get_block);
+ return mpage_read_folio(folio, ntfs_get_block);
}
static void ntfs_readahead(struct readahead_control *rac)
@@ -861,9 +862,8 @@ static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
bh_result, create, GET_BLOCK_WRITE_BEGIN);
}
-static int ntfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, u32 len, u32 flags, struct page **pagep,
- void **fsdata)
+int ntfs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, u32 len, struct page **pagep, void **fsdata)
{
int err;
struct inode *inode = mapping->host;
@@ -872,7 +872,7 @@ static int ntfs_write_begin(struct file *file, struct address_space *mapping,
*pagep = NULL;
if (is_resident(ni)) {
struct page *page = grab_cache_page_write_begin(
- mapping, pos >> PAGE_SHIFT, flags);
+ mapping, pos >> PAGE_SHIFT);
if (!page) {
err = -ENOMEM;
@@ -894,7 +894,7 @@ static int ntfs_write_begin(struct file *file, struct address_space *mapping,
goto out;
}
- err = block_write_begin(mapping, pos, len, flags, pagep,
+ err = block_write_begin(mapping, pos, len, pagep,
ntfs_get_block_write_begin);
out:
@@ -904,10 +904,9 @@ out:
/*
* ntfs_write_end - Address_space_operations::write_end.
*/
-static int ntfs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, u32 len, u32 copied, struct page *page,
- void *fsdata)
-
+int ntfs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, u32 len, u32 copied, struct page *page,
+ void *fsdata)
{
struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
@@ -975,7 +974,7 @@ int reset_log_file(struct inode *inode)
len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
- err = block_write_begin(mapping, pos, len, 0, &page,
+ err = block_write_begin(mapping, pos, len, &page,
ntfs_get_block_write_begin);
if (err)
goto out;
@@ -1942,7 +1941,7 @@ const struct inode_operations ntfs_link_inode_operations = {
};
const struct address_space_operations ntfs_aops = {
- .readpage = ntfs_readpage,
+ .read_folio = ntfs_read_folio,
.readahead = ntfs_readahead,
.writepage = ntfs_writepage,
.writepages = ntfs_writepages,
@@ -1954,7 +1953,7 @@ const struct address_space_operations ntfs_aops = {
};
const struct address_space_operations ntfs_aops_cmpr = {
- .readpage = ntfs_readpage,
+ .read_folio = ntfs_read_folio,
.readahead = ntfs_readahead,
};
// clang-format on
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index fb825059d48867..8de129a6419b5b 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -689,6 +689,11 @@ int ntfs_set_size(struct inode *inode, u64 new_size);
int reset_log_file(struct inode *inode);
int ntfs_get_block(struct inode *inode, sector_t vbn,
struct buffer_head *bh_result, int create);
+int ntfs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, u32 len, struct page **pagep, void **fsdata);
+int ntfs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, u32 len, u32 copied, struct page *page,
+ void *fsdata);
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
int ntfs_sync_inode(struct inode *inode);
int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 49f41074baadd1..51c93929a1461d 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7427,7 +7427,7 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
/*
* No need to worry about the data page here - it's been
* truncated already and inline data doesn't need it for
- * pushing zero's to disk, so we'll let readpage pick it up
+ * pushing zero's to disk, so we'll let read_folio pick it up
* later.
*/
if (trunc) {
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 4b9af65cb61bb9..35d40a67204c48 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -275,8 +275,9 @@ out:
return ret;
}
-static int ocfs2_readpage(struct file *file, struct page *page)
+static int ocfs2_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
loff_t start = (loff_t)page->index << PAGE_SHIFT;
@@ -309,7 +310,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
/*
* i_size might have just been updated as we grabed the meta lock. We
* might now be discovering a truncate that hit on another node.
- * block_read_full_page->get_block freaks out if it is asked to read
+ * block_read_full_folio->get_block freaks out if it is asked to read
* beyond the end of a file, so we check here. Callers
* (generic_file_read, vm_ops->fault) are clever enough to check i_size
* and notice that the page they just read isn't needed.
@@ -326,7 +327,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
ret = ocfs2_readpage_inline(inode, page);
else
- ret = block_read_full_page(page, ocfs2_get_block);
+ ret = block_read_full_folio(page_folio(page), ocfs2_get_block);
unlock = 0;
out_alloc:
@@ -497,11 +498,11 @@ bail:
return status;
}
-static int ocfs2_releasepage(struct page *page, gfp_t wait)
+static bool ocfs2_release_folio(struct folio *folio, gfp_t wait)
{
- if (!page_has_buffers(page))
- return 0;
- return try_to_free_buffers(page);
+ if (!folio_buffers(folio))
+ return false;
+ return try_to_free_buffers(folio);
}
static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
@@ -1881,7 +1882,7 @@ out:
}
static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
@@ -1897,7 +1898,7 @@ static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
/*
* Take alloc sem here to prevent concurrent lookups. That way
* the mapping, zeroing and tree manipulation within
- * ocfs2_write() will be safe against ->readpage(). This
+ * ocfs2_write() will be safe against ->read_folio(). This
* should also serve to lock out allocation from a shared
* writeable region.
*/
@@ -2454,7 +2455,7 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
const struct address_space_operations ocfs2_aops = {
.dirty_folio = block_dirty_folio,
- .readpage = ocfs2_readpage,
+ .read_folio = ocfs2_read_folio,
.readahead = ocfs2_readahead,
.writepage = ocfs2_writepage,
.write_begin = ocfs2_write_begin,
@@ -2462,7 +2463,7 @@ const struct address_space_operations ocfs2_aops = {
.bmap = ocfs2_bmap,
.direct_IO = ocfs2_direct_IO,
.invalidate_folio = block_invalidate_folio,
- .releasepage = ocfs2_releasepage,
+ .release_folio = ocfs2_release_folio,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 01b7407a8893f0..7497cd59225861 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2526,7 +2526,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
return -EOPNOTSUPP;
/*
- * buffered reads protect themselves in ->readpage(). O_DIRECT reads
+ * buffered reads protect themselves in ->read_folio(). O_DIRECT reads
* need locks to protect pending reads from racing with truncate.
*/
if (direct_io) {
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 7f6355cbb58759..e04358a46b6805 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2961,12 +2961,14 @@ retry:
}
if (!PageUptodate(page)) {
- ret = block_read_full_page(page, ocfs2_get_block);
+ struct folio *folio = page_folio(page);
+
+ ret = block_read_full_folio(folio, ocfs2_get_block);
if (ret) {
mlog_errno(ret);
goto unlock;
}
- lock_page(page);
+ folio_lock(folio);
}
if (page_has_buffers(page)) {
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index f755a498582160..d4c5fdcfa1e464 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -52,8 +52,9 @@
#include "buffer_head_io.h"
-static int ocfs2_fast_symlink_readpage(struct file *unused, struct page *page)
+static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
int status = ocfs2_read_inode_block(inode, &bh);
@@ -81,7 +82,7 @@ static int ocfs2_fast_symlink_readpage(struct file *unused, struct page *page)
}
const struct address_space_operations ocfs2_fast_symlink_aops = {
- .readpage = ocfs2_fast_symlink_readpage,
+ .read_folio = ocfs2_fast_symlink_read_folio,
};
const struct inode_operations ocfs2_symlink_inode_operations = {
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 3f297b54171328..fa7fe2393ff686 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -284,9 +284,9 @@ out:
return ret;
}
-static int omfs_readpage(struct file *file, struct page *page)
+static int omfs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page, omfs_get_block);
+ return block_read_full_folio(folio, omfs_get_block);
}
static void omfs_readahead(struct readahead_control *rac)
@@ -316,13 +316,12 @@ static void omfs_write_failed(struct address_space *mapping, loff_t to)
}
static int omfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, flags, pagep,
- omfs_get_block);
+ ret = block_write_begin(mapping, pos, len, pagep, omfs_get_block);
if (unlikely(ret))
omfs_write_failed(mapping, pos + len);
@@ -374,7 +373,7 @@ const struct inode_operations omfs_file_inops = {
const struct address_space_operations omfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = omfs_readpage,
+ .read_folio = omfs_read_folio,
.readahead = omfs_readahead,
.writepage = omfs_writepage,
.writepages = omfs_writepages,
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 79c1025d18ea94..5ce27dde3c799c 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -288,47 +288,45 @@ static void orangefs_readahead(struct readahead_control *rac)
}
}
-static int orangefs_readpage(struct file *file, struct page *page)
+static int orangefs_read_folio(struct file *file, struct folio *folio)
{
- struct folio *folio = page_folio(page);
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct iov_iter iter;
struct bio_vec bv;
ssize_t ret;
- loff_t off; /* offset into this page */
+ loff_t off; /* offset of this folio in the file */
if (folio_test_dirty(folio))
orangefs_launder_folio(folio);
- off = page_offset(page);
- bv.bv_page = page;
- bv.bv_len = PAGE_SIZE;
+ off = folio_pos(folio);
+ bv.bv_page = &folio->page;
+ bv.bv_len = folio_size(folio);
bv.bv_offset = 0;
- iov_iter_bvec(&iter, READ, &bv, 1, PAGE_SIZE);
+ iov_iter_bvec(&iter, READ, &bv, 1, folio_size(folio));
ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter,
- PAGE_SIZE, inode->i_size, NULL, NULL, file);
+ folio_size(folio), inode->i_size, NULL, NULL, file);
/* this will only zero remaining unread portions of the page data */
iov_iter_zero(~0U, &iter);
/* takes care of potential aliasing */
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
if (ret < 0) {
- SetPageError(page);
+ folio_set_error(folio);
} else {
- SetPageUptodate(page);
- if (PageError(page))
- ClearPageError(page);
+ folio_mark_uptodate(folio);
+ if (folio_test_error(folio))
+ folio_clear_error(folio);
ret = 0;
}
- /* unlock the page after the ->readpage() routine completes */
- unlock_page(page);
+ /* unlock the folio after the ->read_folio() routine completes */
+ folio_unlock(folio);
return ret;
}
static int orangefs_write_begin(struct file *file,
- struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags, struct page **pagep,
- void **fsdata)
+ struct address_space *mapping, loff_t pos, unsigned len,
+ struct page **pagep, void **fsdata)
{
struct orangefs_write_range *wr;
struct folio *folio;
@@ -338,7 +336,7 @@ static int orangefs_write_begin(struct file *file,
index = pos >> PAGE_SHIFT;
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
@@ -487,14 +485,14 @@ static void orangefs_invalidate_folio(struct folio *folio,
orangefs_launder_folio(folio);
}
-static int orangefs_releasepage(struct page *page, gfp_t foo)
+static bool orangefs_release_folio(struct folio *folio, gfp_t foo)
{
- return !PagePrivate(page);
+ return !folio_test_private(folio);
}
-static void orangefs_freepage(struct page *page)
+static void orangefs_free_folio(struct folio *folio)
{
- kfree(detach_page_private(page));
+ kfree(folio_detach_private(folio));
}
static int orangefs_launder_folio(struct folio *folio)
@@ -632,14 +630,14 @@ out:
static const struct address_space_operations orangefs_address_operations = {
.writepage = orangefs_writepage,
.readahead = orangefs_readahead,
- .readpage = orangefs_readpage,
+ .read_folio = orangefs_read_folio,
.writepages = orangefs_writepages,
.dirty_folio = filemap_dirty_folio,
.write_begin = orangefs_write_begin,
.write_end = orangefs_write_end,
.invalidate_folio = orangefs_invalidate_folio,
- .releasepage = orangefs_releasepage,
- .freepage = orangefs_freepage,
+ .release_folio = orangefs_release_folio,
+ .free_folio = orangefs_free_folio,
.launder_folio = orangefs_launder_folio,
.direct_IO = orangefs_direct_IO,
};
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index a635bb6615e9d4..391ea402920d9a 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -245,17 +245,18 @@ static void qnx4_kill_sb(struct super_block *sb)
}
}
-static int qnx4_readpage(struct file *file, struct page *page)
+static int qnx4_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page,qnx4_get_block);
+ return block_read_full_folio(folio, qnx4_get_block);
}
static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,qnx4_get_block);
}
+
static const struct address_space_operations qnx4_aops = {
- .readpage = qnx4_readpage,
+ .read_folio = qnx4_read_folio,
.bmap = qnx4_bmap
};
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 9d8e7e9788a1df..b9895afca9d110 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -94,9 +94,9 @@ static int qnx6_check_blockptr(__fs32 ptr)
return 1;
}
-static int qnx6_readpage(struct file *file, struct page *page)
+static int qnx6_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, qnx6_get_block);
+ return mpage_read_folio(folio, qnx6_get_block);
}
static void qnx6_readahead(struct readahead_control *rac)
@@ -496,7 +496,7 @@ static sector_t qnx6_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, qnx6_get_block);
}
static const struct address_space_operations qnx6_aops = {
- .readpage = qnx6_readpage,
+ .read_folio = qnx6_read_folio,
.readahead = qnx6_readahead,
.bmap = qnx6_bmap
};
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 203a47232707c5..6e228bfbe7efbf 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -227,7 +227,7 @@ drop_write_lock:
}
/*
* If this is a partial write which happened to make all buffers
- * uptodate then we can optimize away a bogus readpage() for
+ * uptodate then we can optimize away a bogus read_folio() for
* the next read(). Here we 'discover' whether the page went
* uptodate as a result of this (potentially partial) write.
*/
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 36c59b25486c87..0cffe054b78e16 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -167,10 +167,10 @@ inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
* cutting the code is fine, since it really isn't in use yet and is easy
* to add back in. But, Vladimir has a really good idea here. Think
* about what happens for reading a file. For each page,
- * The VFS layer calls reiserfs_readpage, who searches the tree to find
+ * The VFS layer calls reiserfs_read_folio, who searches the tree to find
* an indirect item. This indirect item has X number of pointers, where
* X is a big number if we've done the block allocation right. But,
- * we only use one or two of these pointers during each call to readpage,
+ * we only use one or two of these pointers during each call to read_folio,
* needlessly researching again later on.
*
* The size of the cache could be dynamic based on the size of the file.
@@ -966,7 +966,7 @@ research:
* it is important the set_buffer_uptodate is done
* after the direct2indirect. The buffer might
* contain valid data newer than the data on disk
- * (read by readpage, changed, and then sent here by
+ * (read by read_folio, changed, and then sent here by
* writepage). direct2indirect needs to know if unbh
* was already up to date, so it can decide if the
* data in unbh needs to be replaced with data from
@@ -2733,9 +2733,9 @@ fail:
goto done;
}
-static int reiserfs_readpage(struct file *f, struct page *page)
+static int reiserfs_read_folio(struct file *f, struct folio *folio)
{
- return block_read_full_page(page, reiserfs_get_block);
+ return block_read_full_folio(folio, reiserfs_get_block);
}
static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -2753,7 +2753,7 @@ static void reiserfs_truncate_failed_write(struct inode *inode)
static int reiserfs_write_begin(struct file *file,
struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct inode *inode;
@@ -2764,7 +2764,7 @@ static int reiserfs_write_begin(struct file *file,
inode = mapping->host;
index = pos >> PAGE_SHIFT;
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
*pagep = page;
@@ -3202,39 +3202,39 @@ static bool reiserfs_dirty_folio(struct address_space *mapping,
}
/*
- * Returns 1 if the page's buffers were dropped. The page is locked.
+ * Returns true if the folio's buffers were dropped. The folio is locked.
*
* Takes j_dirty_buffers_lock to protect the b_assoc_buffers list_heads
- * in the buffers at page_buffers(page).
+ * in the buffers at folio_buffers(folio).
*
* even in -o notail mode, we can't be sure an old mount without -o notail
* didn't create files with tails.
*/
-static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
+static bool reiserfs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
struct buffer_head *head;
struct buffer_head *bh;
- int ret = 1;
+ bool ret = true;
- WARN_ON(PageChecked(page));
+ WARN_ON(folio_test_checked(folio));
spin_lock(&j->j_dirty_buffers_lock);
- head = page_buffers(page);
+ head = folio_buffers(folio);
bh = head;
do {
if (bh->b_private) {
if (!buffer_dirty(bh) && !buffer_locked(bh)) {
reiserfs_free_jh(bh);
} else {
- ret = 0;
+ ret = false;
break;
}
}
bh = bh->b_this_page;
} while (bh != head);
if (ret)
- ret = try_to_free_buffers(page);
+ ret = try_to_free_buffers(folio);
spin_unlock(&j->j_dirty_buffers_lock);
return ret;
}
@@ -3421,9 +3421,9 @@ out:
const struct address_space_operations reiserfs_address_space_operations = {
.writepage = reiserfs_writepage,
- .readpage = reiserfs_readpage,
+ .read_folio = reiserfs_read_folio,
.readahead = reiserfs_readahead,
- .releasepage = reiserfs_releasepage,
+ .release_folio = reiserfs_release_folio,
.invalidate_folio = reiserfs_invalidate_folio,
.write_begin = reiserfs_write_begin,
.write_end = reiserfs_write_end,
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index b5b6f6201bed37..d8cc9a36612469 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -601,14 +601,14 @@ static int journal_list_still_alive(struct super_block *s,
*/
static void release_buffer_page(struct buffer_head *bh)
{
- struct page *page = bh->b_page;
- if (!page->mapping && trylock_page(page)) {
- get_page(page);
+ struct folio *folio = page_folio(bh->b_page);
+ if (!folio->mapping && folio_trylock(folio)) {
+ folio_get(folio);
put_bh(bh);
- if (!page->mapping)
- try_to_free_buffers(page);
- unlock_page(page);
- put_page(page);
+ if (!folio->mapping)
+ try_to_free_buffers(folio);
+ folio_unlock(folio);
+ folio_put(folio);
} else {
put_bh(bh);
}
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 9e6bbb4219de85..c59b230d55b435 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -18,7 +18,7 @@
* Changed for 2.1.19 modules
* Jan 1997 Initial release
* Jun 1997 2.1.43+ changes
- * Proper page locking in readpage
+ * Proper page locking in read_folio
* Changed to work with 2.1.45+ fs
* Jul 1997 Fixed follow_link
* 2.1.47
@@ -41,7 +41,7 @@
* dentries in lookup
* clean up page flags setting
* (error, uptodate, locking) in
- * in readpage
+ * in read_folio
* use init_special_inode for
* fifos/sockets (and streamline) in
* read_inode, fix _ops table order
@@ -99,8 +99,9 @@ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos);
/*
* read a page worth of data from the image
*/
-static int romfs_readpage(struct file *file, struct page *page)
+static int romfs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
loff_t offset, size;
unsigned long fillsize, pos;
@@ -142,7 +143,7 @@ static int romfs_readpage(struct file *file, struct page *page)
}
static const struct address_space_operations romfs_aops = {
- .readpage = romfs_readpage
+ .read_folio = romfs_read_folio
};
/*
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 89d492916deaf8..a8e495d8eb8600 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -444,8 +444,9 @@ static int squashfs_readpage_sparse(struct page *page, int expected)
return 0;
}
-static int squashfs_readpage(struct file *file, struct page *page)
+static int squashfs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
int index = page->index >> (msblk->block_log - PAGE_SHIFT);
@@ -496,5 +497,5 @@ out:
const struct address_space_operations squashfs_aops = {
- .readpage = squashfs_readpage
+ .read_folio = squashfs_read_folio
};
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 4f74abbc1a54a2..6d594ba2ed28ff 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -148,7 +148,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
/*
* squashfs provides 'backing_dev_info' in order to disable read-ahead. For
- * squashfs, I/O is not deferred, it is done immediately in readpage,
+ * squashfs, I/O is not deferred, it is done immediately in read_folio,
* which means the user would always have to wait their own I/O. So the effect
* of readahead is very weak for squashfs. squashfs_bdi_init will set
* sb->s_bdi->ra_pages and sb->s_bdi->io_pages to 0 and close readahead for
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index 1430613183e6ed..2bf977a52c2c37 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -30,8 +30,9 @@
#include "squashfs.h"
#include "xattr.h"
-static int squashfs_symlink_readpage(struct file *file, struct page *page)
+static int squashfs_symlink_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct super_block *sb = inode->i_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
@@ -101,7 +102,7 @@ error_out:
const struct address_space_operations squashfs_symlink_aops = {
- .readpage = squashfs_symlink_readpage
+ .read_folio = squashfs_symlink_read_folio
};
const struct inode_operations squashfs_symlink_inode_ops = {
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 409ab5e178031c..d4ec9bb97de954 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -456,9 +456,9 @@ static int sysv_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page,get_block,wbc);
}
-static int sysv_readpage(struct file *file, struct page *page)
+static int sysv_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page,get_block);
+ return block_read_full_folio(folio, get_block);
}
int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len)
@@ -477,12 +477,12 @@ static void sysv_write_failed(struct address_space *mapping, loff_t to)
}
static int sysv_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, flags, pagep, get_block);
+ ret = block_write_begin(mapping, pos, len, pagep, get_block);
if (unlikely(ret))
sysv_write_failed(mapping, pos + len);
@@ -497,7 +497,7 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations sysv_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = sysv_readpage,
+ .read_folio = sysv_read_folio,
.writepage = sysv_writepage,
.write_begin = sysv_write_begin,
.write_end = generic_write_end,
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 0383fbdc95ff1d..04ced154960fa6 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -31,9 +31,9 @@
* in the "sys_write -> alloc_pages -> direct reclaim path". So, in
* 'ubifs_writepage()' we are only guaranteed that the page is locked.
*
- * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
+ * Similarly, @i_mutex is not always locked in 'ubifs_read_folio()', e.g., the
* read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
- * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not
+ * ondemand_readahead -> read_folio"). In case of readahead, @I_SYNC flag is not
* set as well. However, UBIFS disables readahead.
*/
@@ -215,8 +215,7 @@ static void release_existing_page_budget(struct ubifs_info *c)
}
static int write_begin_slow(struct address_space *mapping,
- loff_t pos, unsigned len, struct page **pagep,
- unsigned flags)
+ loff_t pos, unsigned len, struct page **pagep)
{
struct inode *inode = mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
@@ -244,7 +243,7 @@ static int write_begin_slow(struct address_space *mapping,
if (unlikely(err))
return err;
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (unlikely(!page)) {
ubifs_release_budget(c, &req);
return -ENOMEM;
@@ -419,7 +418,7 @@ static int allocate_budget(struct ubifs_info *c, struct page *page,
* without forcing write-back. The slow path does not make this assumption.
*/
static int ubifs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
@@ -437,7 +436,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
return -EROFS;
/* Try out the fast-path part first */
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (unlikely(!page))
return -ENOMEM;
@@ -493,7 +492,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
unlock_page(page);
put_page(page);
- return write_begin_slow(mapping, pos, len, pagep, flags);
+ return write_begin_slow(mapping, pos, len, pagep);
}
/*
@@ -890,12 +889,14 @@ out_unlock:
return err;
}
-static int ubifs_readpage(struct file *file, struct page *page)
+static int ubifs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
+
if (ubifs_bulk_read(page))
return 0;
do_readpage(page);
- unlock_page(page);
+ folio_unlock(folio);
return 0;
}
@@ -1483,22 +1484,22 @@ static int ubifs_migrate_page(struct address_space *mapping,
}
#endif
-static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
+static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
/*
* An attempt to release a dirty page without budgeting for it - should
* not happen.
*/
- if (PageWriteback(page))
- return 0;
- ubifs_assert(c, PagePrivate(page));
+ if (folio_test_writeback(folio))
+ return false;
+ ubifs_assert(c, folio_test_private(folio));
ubifs_assert(c, 0);
- detach_page_private(page);
- ClearPageChecked(page);
- return 1;
+ folio_detach_private(folio);
+ folio_clear_checked(folio);
+ return true;
}
/*
@@ -1642,7 +1643,7 @@ static int ubifs_symlink_getattr(struct user_namespace *mnt_userns,
}
const struct address_space_operations ubifs_file_address_operations = {
- .readpage = ubifs_readpage,
+ .read_folio = ubifs_read_folio,
.writepage = ubifs_writepage,
.write_begin = ubifs_write_begin,
.write_end = ubifs_write_end,
@@ -1651,7 +1652,7 @@ const struct address_space_operations ubifs_file_address_operations = {
#ifdef CONFIG_MIGRATION
.migratepage = ubifs_migrate_page,
#endif
- .releasepage = ubifs_releasepage,
+ .release_folio = ubifs_release_folio,
};
const struct inode_operations ubifs_file_inode_operations = {
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index bad67455215fac..0978d01b0ea4fa 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2191,7 +2191,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
/*
* UBIFS provides 'backing_dev_info' in order to disable read-ahead. For
- * UBIFS, I/O is not deferred, it is done immediately in readpage,
+ * UBIFS, I/O is not deferred, it is done immediately in read_folio,
* which means the user would have to wait not just for their own I/O
* but the read-ahead I/O as well i.e. completely pointless.
*
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 0f6bf2504437ba..09aef77269fe46 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -57,11 +57,11 @@ static void __udf_adinicb_readpage(struct page *page)
kunmap_atomic(kaddr);
}
-static int udf_adinicb_readpage(struct file *file, struct page *page)
+static int udf_adinicb_read_folio(struct file *file, struct folio *folio)
{
- BUG_ON(!PageLocked(page));
- __udf_adinicb_readpage(page);
- unlock_page(page);
+ BUG_ON(!folio_test_locked(folio));
+ __udf_adinicb_readpage(&folio->page);
+ folio_unlock(folio);
return 0;
}
@@ -87,14 +87,14 @@ static int udf_adinicb_writepage(struct page *page,
static int udf_adinicb_write_begin(struct file *file,
struct address_space *mapping, loff_t pos,
- unsigned len, unsigned flags, struct page **pagep,
+ unsigned len, struct page **pagep,
void **fsdata)
{
struct page *page;
if (WARN_ON_ONCE(pos >= PAGE_SIZE))
return -EIO;
- page = grab_cache_page_write_begin(mapping, 0, flags);
+ page = grab_cache_page_write_begin(mapping, 0);
if (!page)
return -ENOMEM;
*pagep = page;
@@ -127,7 +127,7 @@ static int udf_adinicb_write_end(struct file *file, struct address_space *mappin
const struct address_space_operations udf_adinicb_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = udf_adinicb_readpage,
+ .read_folio = udf_adinicb_read_folio,
.writepage = udf_adinicb_writepage,
.write_begin = udf_adinicb_write_begin,
.write_end = udf_adinicb_write_end,
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index ca4fa710e562f2..edc88716751abd 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -193,9 +193,9 @@ static int udf_writepages(struct address_space *mapping,
return mpage_writepages(mapping, wbc, udf_get_block);
}
-static int udf_readpage(struct file *file, struct page *page)
+static int udf_read_folio(struct file *file, struct folio *folio)
{
- return mpage_readpage(page, udf_get_block);
+ return mpage_read_folio(folio, udf_get_block);
}
static void udf_readahead(struct readahead_control *rac)
@@ -204,12 +204,12 @@ static void udf_readahead(struct readahead_control *rac)
}
static int udf_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
+ ret = block_write_begin(mapping, pos, len, pagep, udf_get_block);
if (unlikely(ret))
udf_write_failed(mapping, pos + len);
return ret;
@@ -237,7 +237,7 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations udf_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = udf_readpage,
+ .read_folio = udf_read_folio,
.readahead = udf_readahead,
.writepage = udf_writepage,
.writepages = udf_writepages,
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 9b223421a3c551..f3642f9c23f86b 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -101,8 +101,9 @@ static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
return 0;
}
-static int udf_symlink_filler(struct file *file, struct page *page)
+static int udf_symlink_filler(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
unsigned char *symlink;
@@ -183,7 +184,7 @@ static int udf_symlink_getattr(struct user_namespace *mnt_userns,
* symlinks can't do much...
*/
const struct address_space_operations udf_symlink_aops = {
- .readpage = udf_symlink_filler,
+ .read_folio = udf_symlink_filler,
};
const struct inode_operations udf_symlink_inode_operations = {
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index d0dda01620f0d7..a873de7dec1c42 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -390,7 +390,7 @@ out:
/**
* ufs_getfrag_block() - `get_block_t' function, interface between UFS and
- * readpage, writepage and so on
+ * read_folio, writepage and so on
*/
static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
@@ -472,9 +472,9 @@ static int ufs_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page,ufs_getfrag_block,wbc);
}
-static int ufs_readpage(struct file *file, struct page *page)
+static int ufs_read_folio(struct file *file, struct folio *folio)
{
- return block_read_full_page(page,ufs_getfrag_block);
+ return block_read_full_folio(folio, ufs_getfrag_block);
}
int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
@@ -495,13 +495,12 @@ static void ufs_write_failed(struct address_space *mapping, loff_t to)
}
static int ufs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, flags, pagep,
- ufs_getfrag_block);
+ ret = block_write_begin(mapping, pos, len, pagep, ufs_getfrag_block);
if (unlikely(ret))
ufs_write_failed(mapping, pos + len);
@@ -528,7 +527,7 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations ufs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .readpage = ufs_readpage,
+ .read_folio = ufs_read_folio,
.writepage = ufs_writepage,
.write_begin = ufs_write_begin,
.write_end = ufs_write_end,
diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c
index d74e0d336995d4..572aa1c43b37b9 100644
--- a/fs/vboxsf/file.c
+++ b/fs/vboxsf/file.c
@@ -225,8 +225,9 @@ const struct inode_operations vboxsf_reg_iops = {
.setattr = vboxsf_setattr
};
-static int vboxsf_readpage(struct file *file, struct page *page)
+static int vboxsf_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
struct vboxsf_handle *sf_handle = file->private_data;
loff_t off = page_offset(page);
u32 nread = PAGE_SIZE;
@@ -352,7 +353,7 @@ out:
* page and it does not call SetPageUptodate for partial writes.
*/
const struct address_space_operations vboxsf_reg_aops = {
- .readpage = vboxsf_readpage,
+ .read_folio = vboxsf_read_folio,
.writepage = vboxsf_writepage,
.dirty_folio = filemap_dirty_folio,
.write_begin = simple_write_begin,
diff --git a/fs/verity/enable.c b/fs/verity/enable.c
index d52872c808fffe..df6b499bf6a14a 100644
--- a/fs/verity/enable.c
+++ b/fs/verity/enable.c
@@ -18,27 +18,26 @@
* Read a file data page for Merkle tree construction. Do aggressive readahead,
* since we're sequentially reading the entire file.
*/
-static struct page *read_file_data_page(struct file *filp, pgoff_t index,
+static struct page *read_file_data_page(struct file *file, pgoff_t index,
struct file_ra_state *ra,
unsigned long remaining_pages)
{
- struct page *page;
+ DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, index);
+ struct folio *folio;
- page = find_get_page_flags(filp->f_mapping, index, FGP_ACCESSED);
- if (!page || !PageUptodate(page)) {
- if (page)
- put_page(page);
+ folio = __filemap_get_folio(ractl.mapping, index, FGP_ACCESSED, 0);
+ if (!folio || !folio_test_uptodate(folio)) {
+ if (folio)
+ folio_put(folio);
else
- page_cache_sync_readahead(filp->f_mapping, ra, filp,
- index, remaining_pages);
- page = read_mapping_page(filp->f_mapping, index, NULL);
- if (IS_ERR(page))
- return page;
+ page_cache_sync_ra(&ractl, remaining_pages);
+ folio = read_cache_folio(ractl.mapping, index, NULL, file);
+ if (IS_ERR(folio))
+ return &folio->page;
}
- if (PageReadahead(page))
- page_cache_async_readahead(filp->f_mapping, ra, filp, page,
- index, remaining_pages);
- return page;
+ if (folio_test_readahead(folio))
+ page_cache_async_ra(&ractl, folio, remaining_pages);
+ return folio_file_page(folio, index);
}
static int build_merkle_tree_level(struct file *filp, unsigned int level,
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index f6216d0fb0c2c0..8ec38b25187bdb 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -536,11 +536,11 @@ xfs_vm_bmap(
}
STATIC int
-xfs_vm_readpage(
+xfs_vm_read_folio(
struct file *unused,
- struct page *page)
+ struct folio *folio)
{
- return iomap_readpage(page, &xfs_read_iomap_ops);
+ return iomap_read_folio(folio, &xfs_read_iomap_ops);
}
STATIC void
@@ -562,11 +562,11 @@ xfs_iomap_swapfile_activate(
}
const struct address_space_operations xfs_address_space_operations = {
- .readpage = xfs_vm_readpage,
+ .read_folio = xfs_vm_read_folio,
.readahead = xfs_vm_readahead,
.writepages = xfs_vm_writepages,
.dirty_folio = filemap_dirty_folio,
- .releasepage = iomap_releasepage,
+ .release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
.bmap = xfs_vm_bmap,
.direct_IO = noop_direct_IO,
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 8f306485c95385..bcb21aea990aeb 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -162,9 +162,9 @@ static const struct iomap_ops zonefs_iomap_ops = {
.iomap_begin = zonefs_iomap_begin,
};
-static int zonefs_readpage(struct file *unused, struct page *page)
+static int zonefs_read_folio(struct file *unused, struct folio *folio)
{
- return iomap_readpage(page, &zonefs_iomap_ops);
+ return iomap_read_folio(folio, &zonefs_iomap_ops);
}
static void zonefs_readahead(struct readahead_control *rac)
@@ -230,12 +230,12 @@ static int zonefs_swap_activate(struct swap_info_struct *sis,
}
static const struct address_space_operations zonefs_file_aops = {
- .readpage = zonefs_readpage,
+ .read_folio = zonefs_read_folio,
.readahead = zonefs_readahead,
.writepage = zonefs_writepage,
.writepages = zonefs_writepages,
.dirty_folio = filemap_dirty_folio,
- .releasepage = iomap_releasepage,
+ .release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
.migratepage = iomap_migrate_page,
.is_partially_uptodate = iomap_is_partially_uptodate,
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index bcb4fe9b8575c2..c9d1463bb20f31 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -146,7 +146,7 @@ BUFFER_FNS(Defer_Completion, defer_completion)
#define page_has_buffers(page) PagePrivate(page)
#define folio_buffers(folio) folio_get_private(folio)
-void buffer_check_dirty_writeback(struct page *page,
+void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback);
/*
@@ -158,7 +158,7 @@ void mark_buffer_write_io_error(struct buffer_head *bh);
void touch_buffer(struct buffer_head *bh);
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset);
-int try_to_free_buffers(struct page *);
+bool try_to_free_buffers(struct folio *);
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry);
void create_empty_buffers(struct page *, unsigned long,
@@ -223,10 +223,10 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc,
bh_end_io_t *handler);
-int block_read_full_page(struct page*, get_block_t*);
+int block_read_full_folio(struct folio *, get_block_t *);
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- unsigned flags, struct page **pagep, get_block_t *get_block);
+ struct page **pagep, get_block_t *get_block);
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block);
int block_write_end(struct file *, struct address_space *,
@@ -238,7 +238,7 @@ int generic_write_end(struct file *, struct address_space *,
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
void clean_page_buffers(struct page *page);
int cont_write_begin(struct file *, struct address_space *, loff_t,
- unsigned, unsigned, struct page **, void **,
+ unsigned, struct page **, void **,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
@@ -258,7 +258,7 @@ static inline vm_fault_t block_page_mkwrite_return(int err)
}
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
+int nobh_write_begin(struct address_space *, loff_t, unsigned len,
struct page **, void **, get_block_t*);
int nobh_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
@@ -402,7 +402,7 @@ bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
#else /* CONFIG_BLOCK */
static inline void buffer_init(void) {}
-static inline int try_to_free_buffers(struct page *page) { return 1; }
+static inline bool try_to_free_buffers(struct folio *folio) { return true; }
static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 02e7f60638b847..f58ae40235ab91 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -262,7 +262,7 @@ struct iattr {
* trying again. The aop will be taking reasonable
* precautions not to livelock. If the caller held a page
* reference, it should drop it before retrying. Returned
- * by readpage().
+ * by read_folio().
*
* address_space_operation functions return these large constants to indicate
* special semantics to the caller. These are much larger than the bytes in a
@@ -275,10 +275,6 @@ enum positive_aop_returns {
AOP_TRUNCATED_PAGE = 0x80001,
};
-#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
- * helper code (eg buffer layer)
- * to clear GFP_FS from alloc */
-
/*
* oh the beauties of C type declarations.
*/
@@ -339,7 +335,7 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
- int (*readpage)(struct file *, struct page *);
+ int (*read_folio)(struct file *, struct folio *);
/* Write back some dirty pages from this mapping. */
int (*writepages)(struct address_space *, struct writeback_control *);
@@ -350,7 +346,7 @@ struct address_space_operations {
void (*readahead)(struct readahead_control *);
int (*write_begin)(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
@@ -359,8 +355,8 @@ struct address_space_operations {
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
- int (*releasepage) (struct page *, gfp_t);
- void (*freepage)(struct page *);
+ bool (*release_folio)(struct folio *, gfp_t);
+ void (*free_folio)(struct folio *folio);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/*
* migrate the contents of a page to the specified target. If
@@ -373,7 +369,7 @@ struct address_space_operations {
int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
- void (*is_dirty_writeback) (struct page *, bool *, bool *);
+ void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
int (*error_remove_page)(struct address_space *, struct page *);
/* swapfile support */
@@ -384,18 +380,6 @@ struct address_space_operations {
extern const struct address_space_operations empty_aops;
-/*
- * pagecache_write_begin/pagecache_write_end must be used by general code
- * to write into the pagecache.
- */
-int pagecache_write_begin(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
-
-int pagecache_write_end(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
-
/**
* struct address_space - Contents of a cacheable, mappable object.
* @host: Owner, either the inode or the block_device.
@@ -3116,8 +3100,6 @@ extern int page_readlink(struct dentry *, char __user *, int);
extern const char *page_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern void page_put_link(void *);
-extern int __page_symlink(struct inode *inode, const char *symname, int len,
- int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
@@ -3192,7 +3174,7 @@ extern int noop_fsync(struct file *, loff_t, loff_t, int);
extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
extern int simple_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 5b6f64f4d771f3..e552097c67e0bb 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -226,10 +226,10 @@ static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
-int iomap_readpage(struct page *page, const struct iomap_ops *ops);
+int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
-int iomap_releasepage(struct page *page, gfp_t gfp_mask);
+bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
#ifdef CONFIG_MIGRATION
int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index de9536680b2b5d..e79d6e0b14e8e8 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1529,7 +1529,7 @@ extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
int jbd2_journal_invalidate_folio(journal_t *, struct folio *,
size_t offset, size_t length);
-extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page);
+bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio);
extern int jbd2_journal_stop(handle_t *);
extern int jbd2_journal_flush(journal_t *journal, unsigned int flags);
extern void jbd2_journal_lock_updates (journal_t *);
diff --git a/include/linux/mpage.h b/include/linux/mpage.h
index f4f5e90a684415..43986f7ec4dd35 100644
--- a/include/linux/mpage.h
+++ b/include/linux/mpage.h
@@ -16,7 +16,7 @@ struct writeback_control;
struct readahead_control;
void mpage_readahead(struct readahead_control *, get_block_t get_block);
-int mpage_readpage(struct page *page, get_block_t get_block);
+int mpage_read_folio(struct folio *folio, get_block_t get_block);
int mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block);
int mpage_writepage(struct page *page, get_block_t *get_block,
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 80b7728277b1b1..77fa6a61706a55 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -275,9 +275,9 @@ struct netfs_cache_ops {
struct readahead_control;
extern void netfs_readahead(struct readahead_control *);
-extern int netfs_readpage(struct file *, struct page *);
+int netfs_read_folio(struct file *, struct folio *);
extern int netfs_write_begin(struct file *, struct address_space *,
- loff_t, unsigned int, unsigned int, struct folio **,
+ loff_t, unsigned int, struct folio **,
void **);
extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index b48b9259e02c74..1bba71757d6221 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -594,7 +594,7 @@ static inline bool nfs_have_writebacks(const struct inode *inode)
/*
* linux/fs/nfs/read.c
*/
-extern int nfs_readpage(struct file *, struct page *);
+int nfs_read_folio(struct file *, struct folio *);
void nfs_readahead(struct readahead_control *);
/*
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 9d8eeaa67d05ab..af10149a6c319c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -516,7 +516,7 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
/*
* Private page markings that may be used by the filesystem that owns the page
* for its own purposes.
- * - PG_private and PG_private_2 cause releasepage() and co to be invoked
+ * - PG_private and PG_private_2 cause release_folio() and co to be invoked
*/
PAGEFLAG(Private, private, PF_ANY)
PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 6165283bdb6f6b..ce96866fbec405 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -492,7 +492,7 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x)
return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
}
-typedef int filler_t(void *, struct page *);
+typedef int filler_t(struct file *, struct folio *);
pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
@@ -735,7 +735,7 @@ static inline unsigned find_get_pages_tag(struct address_space *mapping,
}
struct page *grab_cache_page_write_begin(struct address_space *mapping,
- pgoff_t index, unsigned flags);
+ pgoff_t index);
/*
* Returns locked page at given index in given cache, creating it if needed.
@@ -747,9 +747,9 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
}
struct folio *read_cache_folio(struct address_space *, pgoff_t index,
- filler_t *filler, void *data);
+ filler_t *filler, struct file *file);
struct page *read_cache_page(struct address_space *, pgoff_t index,
- filler_t *filler, void *data);
+ filler_t *filler, struct file *file);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
@@ -888,6 +888,18 @@ bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
void unlock_page(struct page *page);
void folio_unlock(struct folio *folio);
+/**
+ * folio_trylock() - Attempt to lock a folio.
+ * @folio: The folio to attempt to lock.
+ *
+ * Sometimes it is undesirable to wait for a folio to be unlocked (eg
+ * when the locks are being taken in the wrong order, or if making
+ * progress through a batch of folios is more important than processing
+ * them in order). Usually folio_lock() is the correct function to call.
+ *
+ * Context: Any context.
+ * Return: Whether the lock was successfully acquired.
+ */
static inline bool folio_trylock(struct folio *folio)
{
return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
@@ -901,6 +913,28 @@ static inline int trylock_page(struct page *page)
return folio_trylock(page_folio(page));
}
+/**
+ * folio_lock() - Lock this folio.
+ * @folio: The folio to lock.
+ *
+ * The folio lock protects against many things, probably more than it
+ * should. It is primarily held while a folio is being brought uptodate,
+ * either from its backing file or from swap. It is also held while a
+ * folio is being truncated from its address_space, so holding the lock
+ * is sufficient to keep folio->mapping stable.
+ *
+ * The folio lock is also held while write() is modifying the page to
+ * provide POSIX atomicity guarantees (as long as the write does not
+ * cross a page boundary). Other modifications to the data in the folio
+ * do not hold the folio lock and can race with writes, eg DMA and stores
+ * to mapped pages.
+ *
+ * Context: May sleep. If you need to acquire the locks of two or
+ * more folios, they must be in order of ascending index, if they are
+ * in the same address_space. If they are in different address_spaces,
+ * acquire the lock of the folio which belongs to the address_space which
+ * has the lowest address in memory first.
+ */
static inline void folio_lock(struct folio *folio)
{
might_sleep();
@@ -908,8 +942,16 @@ static inline void folio_lock(struct folio *folio)
__folio_lock(folio);
}
-/*
- * lock_page may only be called if we have the page's inode pinned.
+/**
+ * lock_page() - Lock the folio containing this page.
+ * @page: The page to lock.
+ *
+ * See folio_lock() for a description of what the lock protects.
+ * This is a legacy function and new code should probably use folio_lock()
+ * instead.
+ *
+ * Context: May sleep. Pages in the same folio share a lock, so do not
+ * attempt to lock two pages which share a folio.
*/
static inline void lock_page(struct page *page)
{
@@ -921,6 +963,16 @@ static inline void lock_page(struct page *page)
__folio_lock(folio);
}
+/**
+ * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
+ * @folio: The folio to lock.
+ *
+ * Attempts to lock the folio, like folio_lock(), except that the sleep
+ * to acquire the lock is interruptible by a fatal signal.
+ *
+ * Context: May sleep; see folio_lock().
+ * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
+ */
static inline int folio_lock_killable(struct folio *folio)
{
might_sleep();
@@ -967,8 +1019,8 @@ int folio_wait_bit_killable(struct folio *folio, int bit_nr);
* Wait for a folio to be unlocked.
*
* This must be called with the caller "holding" the folio,
- * ie with increased "page->count" so that the folio won't
- * go away during the wait..
+ * ie with increased folio reference count so that the folio won't
+ * go away during the wait.
*/
static inline void folio_wait_locked(struct folio *folio)
{
@@ -1015,10 +1067,6 @@ static inline void folio_cancel_dirty(struct folio *folio)
if (folio_test_dirty(folio))
__folio_cancel_dirty(folio);
}
-static inline void cancel_dirty_page(struct page *page)
-{
- folio_cancel_dirty(page_folio(page));
-}
bool folio_clear_dirty_for_io(struct folio *folio);
bool clear_page_dirty_for_io(struct page *page);
void folio_invalidate(struct folio *folio, size_t offset, size_t length);
@@ -1191,7 +1239,7 @@ void page_cache_sync_readahead(struct address_space *mapping,
* @mapping: address_space which holds the pagecache and I/O vectors
* @ra: file_ra_state which holds the readahead state
* @file: Used by the filesystem for authentication.
- * @page: The page at @index which triggered the readahead call.
+ * @folio: The folio at @index which triggered the readahead call.
* @index: Index of first page to be read.
* @req_count: Total number of pages being read by the caller.
*
@@ -1203,10 +1251,10 @@ void page_cache_sync_readahead(struct address_space *mapping,
static inline
void page_cache_async_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
- struct page *page, pgoff_t index, unsigned long req_count)
+ struct folio *folio, pgoff_t index, unsigned long req_count)
{
DEFINE_READAHEAD(ractl, file, ra, mapping, index);
- page_cache_async_ra(&ractl, page_folio(page), req_count);
+ page_cache_async_ra(&ractl, folio, req_count);
}
static inline struct folio *__readahead_folio(struct readahead_control *ractl)
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index d06ffffad43486..229e8fae66a345 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -335,17 +335,15 @@ TRACE_EVENT(ext4_begin_ordered_truncate,
DECLARE_EVENT_CLASS(ext4__write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len),
- TP_ARGS(inode, pos, len, flags),
+ TP_ARGS(inode, pos, len),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( loff_t, pos )
__field( unsigned int, len )
- __field( unsigned int, flags )
),
TP_fast_assign(
@@ -353,29 +351,26 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
__entry->ino = inode->i_ino;
__entry->pos = pos;
__entry->len = len;
- __entry->flags = flags;
),
- TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
+ TP_printk("dev %d,%d ino %lu pos %lld len %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->pos, __entry->len, __entry->flags)
+ __entry->pos, __entry->len)
);
DEFINE_EVENT(ext4__write_begin, ext4_write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len),
- TP_ARGS(inode, pos, len, flags)
+ TP_ARGS(inode, pos, len)
);
DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len),
- TP_ARGS(inode, pos, len, flags)
+ TP_ARGS(inode, pos, len)
);
DECLARE_EVENT_CLASS(ext4__write_end,
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 1779e133cea0c1..bea654a85e6ba8 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1159,17 +1159,15 @@ DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_write_bio,
TRACE_EVENT(f2fs_write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len),
- TP_ARGS(inode, pos, len, flags),
+ TP_ARGS(inode, pos, len),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(loff_t, pos)
__field(unsigned int, len)
- __field(unsigned int, flags)
),
TP_fast_assign(
@@ -1177,14 +1175,12 @@ TRACE_EVENT(f2fs_write_begin,
__entry->ino = inode->i_ino;
__entry->pos = pos;
__entry->len = len;
- __entry->flags = flags;
),
- TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u, flags = %u",
+ TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u",
show_dev_ino(__entry),
(unsigned long long)__entry->pos,
- __entry->len,
- __entry->flags)
+ __entry->len)
);
TRACE_EVENT(f2fs_write_end,
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 6418083901d4d3..a9bc3c98f76a1b 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -787,10 +787,10 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
struct page *page;
/*
* Ensure that the page that has the original instruction is populated
- * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
+ * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
* see uprobe_register().
*/
- if (mapping->a_ops->readpage)
+ if (mapping->a_ops->read_folio)
page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
else
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
@@ -1143,7 +1143,8 @@ static int __uprobe_register(struct inode *inode, loff_t offset,
return -EINVAL;
/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
- if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
+ if (!inode->i_mapping->a_ops->read_folio &&
+ !shmem_mapping(inode->i_mapping))
return -EIO;
/* Racy, just to catch the obvious mistakes */
if (offset > i_size_read(inode))
diff --git a/mm/filemap.c b/mm/filemap.c
index 9a1eef6c5d350e..fa0ca674450f85 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -225,12 +225,12 @@ void __filemap_remove_folio(struct folio *folio, void *shadow)
void filemap_free_folio(struct address_space *mapping, struct folio *folio)
{
- void (*freepage)(struct page *);
+ void (*free_folio)(struct folio *);
int refs = 1;
- freepage = mapping->a_ops->freepage;
- if (freepage)
- freepage(&folio->page);
+ free_folio = mapping->a_ops->free_folio;
+ if (free_folio)
+ free_folio(folio);
if (folio_test_large(folio) && !folio_test_hugetlb(folio))
refs = folio_nr_pages(folio);
@@ -807,7 +807,7 @@ void replace_page_cache_page(struct page *old, struct page *new)
struct folio *fold = page_folio(old);
struct folio *fnew = page_folio(new);
struct address_space *mapping = old->mapping;
- void (*freepage)(struct page *) = mapping->a_ops->freepage;
+ void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
pgoff_t offset = old->index;
XA_STATE(xas, &mapping->i_pages, offset);
@@ -835,9 +835,9 @@ void replace_page_cache_page(struct page *old, struct page *new)
if (PageSwapBacked(new))
__inc_lruvec_page_state(new, NR_SHMEM);
xas_unlock_irq(&xas);
- if (freepage)
- freepage(old);
- put_page(old);
+ if (free_folio)
+ free_folio(fold);
+ folio_put(fold);
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
@@ -2414,12 +2414,12 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping,
/*
* A previous I/O error may have been due to temporary failures,
- * eg. multipath errors. PG_error will be set again if readpage
+ * eg. multipath errors. PG_error will be set again if read_folio
* fails.
*/
folio_clear_error(folio);
/* Start the actual read. The read will unlock the page. */
- error = mapping->a_ops->readpage(file, &folio->page);
+ error = mapping->a_ops->read_folio(file, folio);
if (error)
return error;
@@ -2636,7 +2636,7 @@ err:
* @already_read: Number of bytes already read by the caller.
*
* Copies data from the page cache. If the data is not currently present,
- * uses the readahead and readpage address_space operations to fetch it.
+ * uses the readahead and read_folio address_space operations to fetch it.
*
* Return: Total number of bytes copied, including those already read by
* the caller. If an error happens before any bytes are copied, returns
@@ -3447,7 +3447,7 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct address_space *mapping = file->f_mapping;
- if (!mapping->a_ops->readpage)
+ if (!mapping->a_ops->read_folio)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &generic_file_vm_ops;
@@ -3483,10 +3483,13 @@ EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_file_readonly_mmap);
static struct folio *do_read_cache_folio(struct address_space *mapping,
- pgoff_t index, filler_t filler, void *data, gfp_t gfp)
+ pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
{
struct folio *folio;
int err;
+
+ if (!filler)
+ filler = mapping->a_ops->read_folio;
repeat:
folio = filemap_get_folio(mapping, index);
if (!folio) {
@@ -3503,11 +3506,7 @@ repeat:
}
filler:
- if (filler)
- err = filler(data, &folio->page);
- else
- err = mapping->a_ops->readpage(data, &folio->page);
-
+ err = filler(file, folio);
if (err < 0) {
folio_put(folio);
return ERR_PTR(err);
@@ -3557,44 +3556,44 @@ out:
}
/**
- * read_cache_folio - read into page cache, fill it if needed
- * @mapping: the page's address_space
- * @index: the page index
- * @filler: function to perform the read
- * @data: first arg to filler(data, page) function, often left as NULL
+ * read_cache_folio - Read into page cache, fill it if needed.
+ * @mapping: The address_space to read from.
+ * @index: The index to read.
+ * @filler: Function to perform the read, or NULL to use aops->read_folio().
+ * @file: Passed to filler function, may be NULL if not required.
*
- * Read into the page cache. If a page already exists, and PageUptodate() is
- * not set, try to fill the page and wait for it to become unlocked.
+ * Read one page into the page cache. If it succeeds, the folio returned
+ * will contain @index, but it may not be the first page of the folio.
*
- * If the page does not get brought uptodate, return -EIO.
- *
- * The function expects mapping->invalidate_lock to be already held.
+ * If the filler function returns an error, it will be returned to the
+ * caller.
*
- * Return: up to date page on success, ERR_PTR() on failure.
+ * Context: May sleep. Expects mapping->invalidate_lock to be held.
+ * Return: An uptodate folio on success, ERR_PTR() on failure.
*/
struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
- filler_t filler, void *data)
+ filler_t filler, struct file *file)
{
- return do_read_cache_folio(mapping, index, filler, data,
+ return do_read_cache_folio(mapping, index, filler, file,
mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(read_cache_folio);
static struct page *do_read_cache_page(struct address_space *mapping,
- pgoff_t index, filler_t *filler, void *data, gfp_t gfp)
+ pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)
{
struct folio *folio;
- folio = do_read_cache_folio(mapping, index, filler, data, gfp);
+ folio = do_read_cache_folio(mapping, index, filler, file, gfp);
if (IS_ERR(folio))
return &folio->page;
return folio_file_page(folio, index);
}
struct page *read_cache_page(struct address_space *mapping,
- pgoff_t index, filler_t *filler, void *data)
+ pgoff_t index, filler_t *filler, struct file *file)
{
- return do_read_cache_page(mapping, index, filler, data,
+ return do_read_cache_page(mapping, index, filler, file,
mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(read_cache_page);
@@ -3622,27 +3621,6 @@ struct page *read_cache_page_gfp(struct address_space *mapping,
}
EXPORT_SYMBOL(read_cache_page_gfp);
-int pagecache_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- const struct address_space_operations *aops = mapping->a_ops;
-
- return aops->write_begin(file, mapping, pos, len, flags,
- pagep, fsdata);
-}
-EXPORT_SYMBOL(pagecache_write_begin);
-
-int pagecache_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- const struct address_space_operations *aops = mapping->a_ops;
-
- return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
-}
-EXPORT_SYMBOL(pagecache_write_end);
-
/*
* Warn about a page cache invalidation failure during a direct I/O write.
*/
@@ -3754,7 +3732,6 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
const struct address_space_operations *a_ops = mapping->a_ops;
long status = 0;
ssize_t written = 0;
- unsigned int flags = 0;
do {
struct page *page;
@@ -3784,7 +3761,7 @@ again:
break;
}
- status = a_ops->write_begin(file, mapping, pos, bytes, flags,
+ status = a_ops->write_begin(file, mapping, pos, bytes,
&page, &fsdata);
if (unlikely(status < 0))
break;
@@ -3978,8 +3955,8 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp)
if (folio_test_writeback(folio))
return false;
- if (mapping && mapping->a_ops->releasepage)
- return mapping->a_ops->releasepage(&folio->page, gfp);
- return try_to_free_buffers(&folio->page);
+ if (mapping && mapping->a_ops->release_folio)
+ return mapping->a_ops->release_folio(folio, gfp);
+ return try_to_free_buffers(folio);
}
EXPORT_SYMBOL(filemap_release_folio);
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 46fa179e32fb26..20bc15b57d93e2 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -131,12 +131,10 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
EXPORT_SYMBOL(pagecache_get_page);
struct page *grab_cache_page_write_begin(struct address_space *mapping,
- pgoff_t index, unsigned flags)
+ pgoff_t index)
{
unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
- if (flags & AOP_FLAG_NOFS)
- fgp_flags |= FGP_NOFS;
return pagecache_get_page(mapping, index, fgp_flags,
mapping_gfp_mask(mapping));
}
diff --git a/mm/memory.c b/mm/memory.c
index 76e3af9639d93f..2a12028a374997 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -555,11 +555,11 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
dump_page(page, "bad pte");
pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
- pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
+ pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
vma->vm_file,
vma->vm_ops ? vma->vm_ops->fault : NULL,
vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
- mapping ? mapping->a_ops->readpage : NULL);
+ mapping ? mapping->a_ops->read_folio : NULL);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 6c31ee1e1c9b06..21d82636c291c2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1013,7 +1013,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
if (!page->mapping) {
VM_BUG_ON_PAGE(PageAnon(page), page);
if (page_has_private(page)) {
- try_to_free_buffers(page);
+ try_to_free_buffers(folio);
goto out_unlock_both;
}
} else if (page_mapped(page)) {
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7e2da284e42718..fa1117db4610f2 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2602,10 +2602,12 @@ EXPORT_SYMBOL(folio_redirty_for_writepage);
* folio_mark_dirty - Mark a folio as being modified.
* @folio: The folio.
*
- * For folios with a mapping this should be done with the folio lock held
- * for the benefit of asynchronous memory errors who prefer a consistent
- * dirty state. This rule can be broken in some special cases,
- * but should be better not to.
+ * The folio may not be truncated while this function is running.
+ * Holding the folio lock is sufficient to prevent truncation, but some
+ * callers cannot acquire a sleeping lock. These callers instead hold
+ * the page table lock for a page table which contains at least one page
+ * in this folio. Truncation will block on the page table lock as it
+ * unmaps pages before removing the folio from its mapping.
*
* Return: True if the folio was newly dirtied, false if it was already dirty.
*/
diff --git a/mm/page_io.c b/mm/page_io.c
index 3fbdab6a940e78..a9444e67ec2092 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -336,7 +336,7 @@ int swap_readpage(struct page *page, bool synchronous)
struct file *swap_file = sis->swap_file;
struct address_space *mapping = swap_file->f_mapping;
- ret = mapping->a_ops->readpage(swap_file, page);
+ ret = mapping->a_ops->read_folio(swap_file, page_folio(page));
if (!ret)
count_vm_event(PSWPIN);
goto out;
diff --git a/mm/readahead.c b/mm/readahead.c
index 26bf74a6b2fe6a..b78921b547547a 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -15,7 +15,7 @@
* explicitly requested by the application. Readahead only ever
* attempts to read folios that are not yet in the page cache. If a
* folio is present but not up-to-date, readahead will not try to read
- * it. In that case a simple ->readpage() will be requested.
+ * it. In that case a simple ->read_folio() will be requested.
*
* Readahead is triggered when an application read request (whether a
* system call or a page fault) finds that the requested folio is not in
@@ -78,7 +78,7 @@
* address space operation, for which mpage_readahead() is a canonical
* implementation. ->readahead() should normally initiate reads on all
* folios, but may fail to read any or all folios without causing an I/O
- * error. The page cache reading code will issue a ->readpage() request
+ * error. The page cache reading code will issue a ->read_folio() request
* for any folio which ->readahead() did not read, and only an error
* from this will be final.
*
@@ -110,7 +110,7 @@
* were not fetched with readahead_folio(). This will allow a
* subsequent synchronous readahead request to try them again. If they
* are left in the page cache, then they will be read individually using
- * ->readpage() which may be less efficient.
+ * ->read_folio() which may be less efficient.
*/
#include <linux/blkdev.h>
@@ -146,7 +146,7 @@ EXPORT_SYMBOL_GPL(file_ra_state_init);
static void read_pages(struct readahead_control *rac)
{
const struct address_space_operations *aops = rac->mapping->a_ops;
- struct page *page;
+ struct folio *folio;
struct blk_plug plug;
if (!readahead_count(rac))
@@ -157,24 +157,23 @@ static void read_pages(struct readahead_control *rac)
if (aops->readahead) {
aops->readahead(rac);
/*
- * Clean up the remaining pages. The sizes in ->ra
+ * Clean up the remaining folios. The sizes in ->ra
* may be used to size the next readahead, so make sure
* they accurately reflect what happened.
*/
- while ((page = readahead_page(rac))) {
- rac->ra->size -= 1;
- if (rac->ra->async_size > 0) {
- rac->ra->async_size -= 1;
- delete_from_page_cache(page);
+ while ((folio = readahead_folio(rac)) != NULL) {
+ unsigned long nr = folio_nr_pages(folio);
+
+ rac->ra->size -= nr;
+ if (rac->ra->async_size >= nr) {
+ rac->ra->async_size -= nr;
+ filemap_remove_folio(folio);
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
}
} else {
- while ((page = readahead_page(rac))) {
- aops->readpage(rac->file, page);
- put_page(page);
- }
+ while ((folio = readahead_folio(rac)) != NULL)
+ aops->read_folio(rac->file, folio);
}
blk_finish_plug(&plug);
@@ -255,8 +254,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
}
/*
- * Now start the IO. We ignore I/O errors - if the page is not
- * uptodate then the caller will launch readpage again, and
+ * Now start the IO. We ignore I/O errors - if the folio is not
+ * uptodate then the caller will launch read_folio again, and
* will then handle the error.
*/
read_pages(ractl);
@@ -304,7 +303,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long max_pages, index;
- if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead))
+ if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
return;
/*
diff --git a/mm/secretmem.c b/mm/secretmem.c
index 3b3cf2892b6ae8..206ed6b40c1d0f 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -145,15 +145,15 @@ static int secretmem_migratepage(struct address_space *mapping,
return -EBUSY;
}
-static void secretmem_freepage(struct page *page)
+static void secretmem_free_folio(struct folio *folio)
{
- set_direct_map_default_noflush(page);
- clear_highpage(page);
+ set_direct_map_default_noflush(&folio->page);
+ folio_zero_segment(folio, 0, folio_size(folio));
}
const struct address_space_operations secretmem_aops = {
.dirty_folio = noop_dirty_folio,
- .freepage = secretmem_freepage,
+ .free_folio = secretmem_free_folio,
.migratepage = secretmem_migratepage,
.isolate_page = secretmem_isolate_page,
};
diff --git a/mm/shmem.c b/mm/shmem.c
index 4b2fea33158e8a..f3e8de8ff75c0f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2426,7 +2426,7 @@ static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
static int
shmem_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
@@ -4162,7 +4162,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
*
* This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
* with any new page allocations done using the specified allocation flags.
- * But read_cache_page_gfp() uses the ->readpage() method: which does not
+ * But read_cache_page_gfp() uses the ->read_folio() method: which does not
* suit tmpfs, since it may have pages in swapcache, and needs to find those
* for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 981a6e85c88e77..6aec1b24f440ce 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3028,7 +3028,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
/*
* Read the swap header.
*/
- if (!mapping->a_ops->readpage) {
+ if (!mapping->a_ops->read_folio) {
error = -EINVAL;
goto bad_swap_unlock_inode;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1678802e03e785..edc89f26b738cf 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1181,7 +1181,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping)
* folio->mapping == NULL while being dirty with clean buffers.
*/
if (folio_test_private(folio)) {
- if (try_to_free_buffers(&folio->page)) {
+ if (try_to_free_buffers(folio)) {
folio_clear_dirty(folio);
pr_info("%s: orphaned folio\n", __func__);
return PAGE_CLEAN;
@@ -1282,9 +1282,9 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
xa_unlock_irq(&mapping->i_pages);
put_swap_page(&folio->page, swap);
} else {
- void (*freepage)(struct page *);
+ void (*free_folio)(struct folio *);
- freepage = mapping->a_ops->freepage;
+ free_folio = mapping->a_ops->free_folio;
/*
* Remember a shadow entry for reclaimed file cache in
* order to detect refaults, thus thrashing, later on.
@@ -1310,8 +1310,8 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
inode_add_lru(mapping->host);
spin_unlock(&mapping->host->i_lock);
- if (freepage != NULL)
- freepage(&folio->page);
+ if (free_folio)
+ free_folio(folio);
}
return 1;
@@ -1451,7 +1451,7 @@ static void folio_check_dirty_writeback(struct folio *folio,
mapping = folio_mapping(folio);
if (mapping && mapping->a_ops->is_dirty_writeback)
- mapping->a_ops->is_dirty_writeback(&folio->page, dirty, writeback);
+ mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
}
static struct page *alloc_demote_page(struct page *page, unsigned long node)