aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKemeng Shi <shikemeng@huaweicloud.com>2024-02-28 17:19:54 +0800
committerChristian Brauner <brauner@kernel.org>2024-04-05 15:52:17 +0200
commitd92109891f21cf367caa2cc6dff11a4411d917f4 (patch)
tree9bcfd02860c286986d910718a9d09ceb3e244216
parentac0c18f2c693f0e7a44dbbb36b14d5141e5d20e5 (diff)
downloadvfs-d92109891f21cf367caa2cc6dff11a4411d917f4.tar.gz
fs/writeback: bail out if there is no more inodes for IO and queued once
For case there is no more inodes for IO in io list from last wb_writeback, We may bail out early even there is inode in dirty list should be written back. Only bail out when we queued once to avoid missing dirtied inode. This is from code reading... Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Link: https://lore.kernel.org/r/20240228091958.288260-3-shikemeng@huaweicloud.com Reviewed-by: Jan Kara <jack@suse.cz> [brauner@kernel.org: fold in memory corruption fix from Jan in [1]] Link: https://lore.kernel.org/r/20240405132346.bid7gibby3lxxhez@quack3 [1] Signed-off-by: Christian Brauner <brauner@kernel.org>
-rw-r--r--fs/fs-writeback.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index fe634f00f4d9bc..f864c7d6ef9217 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -2076,6 +2076,7 @@ static long wb_writeback(struct bdi_writeback *wb,
struct inode *inode;
long progress;
struct blk_plug plug;
+ bool queued = false;
blk_start_plug(&plug);
for (;;) {
@@ -2118,8 +2119,10 @@ static long wb_writeback(struct bdi_writeback *wb,
dirtied_before = jiffies;
trace_writeback_start(wb, work);
- if (list_empty(&wb->b_io))
+ if (list_empty(&wb->b_io)) {
queue_io(wb, work, dirtied_before);
+ queued = true;
+ }
if (work->sb)
progress = writeback_sb_inodes(work->sb, wb, work);
else
@@ -2134,7 +2137,7 @@ static long wb_writeback(struct bdi_writeback *wb,
* mean the overall work is done. So we keep looping as long
* as made some progress on cleaning pages or inodes.
*/
- if (progress) {
+ if (progress || !queued) {
spin_unlock(&wb->list_lock);
continue;
}