aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGao Xiang <hsiangkao@linux.alibaba.com>2024-03-05 17:14:48 +0800
committerGao Xiang <hsiangkao@linux.alibaba.com>2024-03-10 18:41:25 +0800
commit706fd68fce3a5737286afd3e6422ab9258bd3e94 (patch)
tree53374c178c9812d199fcab34ea734ae2ed8b7e3e
parent9266f2dc5e1158e7466e9db48b4e9a750ee4e3a5 (diff)
downloadlinux-can-next-706fd68fce3a5737286afd3e6422ab9258bd3e94.tar.gz
erofs: refine managed cache operations to folios
Convert erofs_try_to_free_all_cached_pages() and z_erofs_cache_release_folio(). Besides, erofs_page_is_managed() is moved to zdata.c and renamed as erofs_folio_is_managed(). Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com> Link: https://lore.kernel.org/r/20240305091448.1384242-6-hsiangkao@linux.alibaba.com
-rw-r--r--fs/erofs/compress.h7
-rw-r--r--fs/erofs/decompressor_deflate.c3
-rw-r--r--fs/erofs/decompressor_lzma.c3
-rw-r--r--fs/erofs/internal.h4
-rw-r--r--fs/erofs/utils.c2
-rw-r--r--fs/erofs/zdata.c63
6 files changed, 34 insertions, 48 deletions
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
index 7cc5841577b240..333587ba6183c1 100644
--- a/fs/erofs/compress.h
+++ b/fs/erofs/compress.h
@@ -81,13 +81,6 @@ static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
return true;
}
-#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
-static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
- struct page *page)
-{
- return page->mapping == MNGD_MAPPING(sbi);
-}
-
int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
unsigned int padbufsize);
extern const struct z_erofs_decompressor erofs_decompressors[];
diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c
index b98872058abe82..81e65c453ef05a 100644
--- a/fs/erofs/decompressor_deflate.c
+++ b/fs/erofs/decompressor_deflate.c
@@ -212,9 +212,6 @@ again:
if (rq->out[no] != rq->in[j])
continue;
-
- DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb),
- rq->in[j]));
tmppage = erofs_allocpage(pgpl, rq->gfp);
if (!tmppage) {
err = -ENOMEM;
diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
index 6ca357d83cfa45..4b28dc130c9f11 100644
--- a/fs/erofs/decompressor_lzma.c
+++ b/fs/erofs/decompressor_lzma.c
@@ -258,9 +258,6 @@ again:
if (rq->out[no] != rq->in[j])
continue;
-
- DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb),
- rq->in[j]));
tmppage = erofs_allocpage(pgpl, rq->gfp);
if (!tmppage) {
err = -ENOMEM;
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index b0409badb01723..65db0250f1461c 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -467,8 +467,8 @@ int __init erofs_init_shrinker(void);
void erofs_exit_shrinker(void);
int __init z_erofs_init_zip_subsystem(void);
void z_erofs_exit_zip_subsystem(void);
-int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
- struct erofs_workgroup *egrp);
+int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *egrp);
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
int flags);
void *erofs_get_pcpubuf(unsigned int requiredpages);
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index e146d09151af41..518bdd69c82325 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -129,7 +129,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
* the XArray. Otherwise some cached pages could be still attached to
* the orphan old workgroup when the new one is available in the tree.
*/
- if (erofs_try_to_free_all_cached_pages(sbi, grp))
+ if (erofs_try_to_free_all_cached_folios(sbi, grp))
goto out;
/*
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 63990c8192f2ca..c1bd4d8392ebce 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -119,6 +119,12 @@ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT;
}
+#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
+static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo)
+{
+ return fo->mapping == MNGD_MAPPING(sbi);
+}
+
/*
* bit 30: I/O error occurred on this folio
* bit 0 - 29: remaining parts to complete this folio
@@ -611,9 +617,9 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
}
-/* called by erofs_shrinker to get rid of all compressed_pages */
-int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp)
+/* called by erofs_shrinker to get rid of all cached compressed bvecs */
+int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *grp)
{
struct z_erofs_pcluster *const pcl =
container_of(grp, struct z_erofs_pcluster, obj);
@@ -621,27 +627,22 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
int i;
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
- /*
- * refcount of workgroup is now freezed as 0,
- * therefore no need to worry about available decompression users.
- */
+ /* There is no actice user since the pcluster is now freezed */
for (i = 0; i < pclusterpages; ++i) {
- struct page *page = pcl->compressed_bvecs[i].page;
+ struct folio *folio = pcl->compressed_bvecs[i].folio;
- if (!page)
+ if (!folio)
continue;
- /* block other users from reclaiming or migrating the page */
- if (!trylock_page(page))
+ /* Avoid reclaiming or migrating this folio */
+ if (!folio_trylock(folio))
return -EBUSY;
- if (!erofs_page_is_managed(sbi, page))
+ if (!erofs_folio_is_managed(sbi, folio))
continue;
-
- /* barrier is implied in the following 'unlock_page' */
- WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
- detach_page_private(page);
- unlock_page(page);
+ pcl->compressed_bvecs[i].folio = NULL;
+ folio_detach_private(folio);
+ folio_unlock(folio);
}
return 0;
}
@@ -658,20 +659,17 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
ret = false;
spin_lock(&pcl->obj.lockref.lock);
- if (pcl->obj.lockref.count > 0)
- goto out;
-
- DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
- for (i = 0; i < pclusterpages; ++i) {
- if (pcl->compressed_bvecs[i].page == &folio->page) {
- WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
- ret = true;
- break;
+ if (pcl->obj.lockref.count <= 0) {
+ DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
+ for (i = 0; i < pclusterpages; ++i) {
+ if (pcl->compressed_bvecs[i].folio == folio) {
+ pcl->compressed_bvecs[i].folio = NULL;
+ folio_detach_private(folio);
+ ret = true;
+ break;
+ }
}
}
- if (ret)
- folio_detach_private(folio);
-out:
spin_unlock(&pcl->obj.lockref.lock);
return ret;
}
@@ -1201,7 +1199,7 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
be->compressed_pages[i] = page;
if (z_erofs_is_inline_pcluster(pcl) ||
- erofs_page_is_managed(EROFS_SB(be->sb), page)) {
+ erofs_folio_is_managed(EROFS_SB(be->sb), page_folio(page))) {
if (!PageUptodate(page))
err = -EIO;
continue;
@@ -1286,7 +1284,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
/* consider shortlived pages added when decompressing */
page = be->compressed_pages[i];
- if (!page || erofs_page_is_managed(sbi, page))
+ if (!page ||
+ erofs_folio_is_managed(sbi, page_folio(page)))
continue;
(void)z_erofs_put_shortlivedpage(be->pagepool, page);
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
@@ -1573,7 +1572,7 @@ static void z_erofs_submissionqueue_endio(struct bio *bio)
DBG_BUGON(folio_test_uptodate(folio));
DBG_BUGON(z_erofs_page_is_invalidated(&folio->page));
- if (!erofs_page_is_managed(EROFS_SB(q->sb), &folio->page))
+ if (!erofs_folio_is_managed(EROFS_SB(q->sb), folio))
continue;
if (!err)