aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorColy Li <colyli@suse.de>2022-05-24 22:16:37 +0800
committerColy Li <colyli@suse.de>2022-05-24 22:16:37 +0800
commit5fa94acba8ddf4d2f8d4e1a32c32651c0dc466ec (patch)
tree05bddf905a02fe633ed2be1338ea0158e9520dc8
parent5b73169ecf044d0175ed367cf56559585c3a62e6 (diff)
downloadbcache-patches-5fa94acba8ddf4d2f8d4e1a32c32651c0dc466ec.tar.gz
for-next: remove merged patches
-rw-r--r--for-next/0001-bcache-improve-multithreaded-bch_btree_check.patch155
-rw-r--r--for-next/0002-bcache-improve-multithreaded-bch_sectors_dirty_init.patch147
-rw-r--r--for-next/0003-bcache-remove-incremental-dirty-sector-counting-for-.patch138
-rw-r--r--for-next/0004-bcache-avoid-journal-no-space-deadlock-by-reserving-.patch148
4 files changed, 0 insertions, 588 deletions
diff --git a/for-next/0001-bcache-improve-multithreaded-bch_btree_check.patch b/for-next/0001-bcache-improve-multithreaded-bch_btree_check.patch
deleted file mode 100644
index 9a471ee..0000000
--- a/for-next/0001-bcache-improve-multithreaded-bch_btree_check.patch
+++ /dev/null
@@ -1,155 +0,0 @@
-From 2394e397d107fc195f72c0748dee0c0084da91aa Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 21 May 2022 13:08:58 +0800
-Subject: [PATCH 1/4] bcache: improve multithreaded bch_btree_check()
-
-Commit 8e7102273f59 ("bcache: make bch_btree_check() to be
-multithreaded") makes bch_btree_check() to be much faster when checking
-all btree nodes during cache device registration. But it isn't in ideal
-shap yet, still can be improved.
-
-This patch does the following thing to improve current parallel btree
-nodes check by multiple threads in bch_btree_check(),
-- Add read lock to root node while checking all the btree nodes with
- multiple threads. Although currently it is not mandatory but it is
- good to have a read lock in code logic.
-- Remove local variable 'char name[32]', and generate kernel thread name
- string directly when calling kthread_run().
-- Allocate local variable "struct btree_check_state check_state" on the
- stack and avoid unnecessary dynamic memory allocation for it.
-- Reduce BCH_BTR_CHKTHREAD_MAX from 64 to 12 which is enough indeed.
-- Increase check_state->started to count created kernel thread after it
- succeeds to create.
-- When wait for all checking kernel threads to finish, use wait_event()
- to replace wait_event_interruptible().
-
-With this change, the code is more clear, and some potential error
-conditions are avoided.
-
-Fixes: 8e7102273f59 ("bcache: make bch_btree_check() to be multithreaded")
-Signed-off-by: Coly Li <colyli@suse.de>
-Cc: stable@vger.kernel.org
----
- drivers/md/bcache/btree.c | 58 ++++++++++++++++++---------------------
- drivers/md/bcache/btree.h | 2 +-
- 2 files changed, 27 insertions(+), 33 deletions(-)
-
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index ad9f16689419..2362bb8ef6d1 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -2006,8 +2006,7 @@ int bch_btree_check(struct cache_set *c)
- int i;
- struct bkey *k = NULL;
- struct btree_iter iter;
-- struct btree_check_state *check_state;
-- char name[32];
-+ struct btree_check_state check_state;
-
- /* check and mark root node keys */
- for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
-@@ -2018,63 +2017,58 @@ int bch_btree_check(struct cache_set *c)
- if (c->root->level == 0)
- return 0;
-
-- check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL);
-- if (!check_state)
-- return -ENOMEM;
--
-- check_state->c = c;
-- check_state->total_threads = bch_btree_chkthread_nr();
-- check_state->key_idx = 0;
-- spin_lock_init(&check_state->idx_lock);
-- atomic_set(&check_state->started, 0);
-- atomic_set(&check_state->enough, 0);
-- init_waitqueue_head(&check_state->wait);
-+ check_state.c = c;
-+ check_state.total_threads = bch_btree_chkthread_nr();
-+ check_state.key_idx = 0;
-+ spin_lock_init(&check_state.idx_lock);
-+ atomic_set(&check_state.started, 0);
-+ atomic_set(&check_state.enough, 0);
-+ init_waitqueue_head(&check_state.wait);
-
-+ rw_lock(0, c->root, c->root->level);
- /*
- * Run multiple threads to check btree nodes in parallel,
-- * if check_state->enough is non-zero, it means current
-+ * if check_state.enough is non-zero, it means current
- * running check threads are enough, unncessary to create
- * more.
- */
-- for (i = 0; i < check_state->total_threads; i++) {
-- /* fetch latest check_state->enough earlier */
-+ for (i = 0; i < check_state.total_threads; i++) {
-+ /* fetch latest check_state.enough earlier */
- smp_mb__before_atomic();
-- if (atomic_read(&check_state->enough))
-+ if (atomic_read(&check_state.enough))
- break;
-
-- check_state->infos[i].result = 0;
-- check_state->infos[i].state = check_state;
-- snprintf(name, sizeof(name), "bch_btrchk[%u]", i);
-- atomic_inc(&check_state->started);
-+ check_state.infos[i].result = 0;
-+ check_state.infos[i].state = &check_state;
-
-- check_state->infos[i].thread =
-+ check_state.infos[i].thread =
- kthread_run(bch_btree_check_thread,
-- &check_state->infos[i],
-- name);
-- if (IS_ERR(check_state->infos[i].thread)) {
-+ &check_state.infos[i],
-+ "bch_btrchk[%d]", i);
-+ if (IS_ERR(check_state.infos[i].thread)) {
- pr_err("fails to run thread bch_btrchk[%d]\n", i);
- for (--i; i >= 0; i--)
-- kthread_stop(check_state->infos[i].thread);
-+ kthread_stop(check_state.infos[i].thread);
- ret = -ENOMEM;
- goto out;
- }
-+ atomic_inc(&check_state.started);
- }
-
- /*
- * Must wait for all threads to stop.
- */
-- wait_event_interruptible(check_state->wait,
-- atomic_read(&check_state->started) == 0);
-+ wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
-
-- for (i = 0; i < check_state->total_threads; i++) {
-- if (check_state->infos[i].result) {
-- ret = check_state->infos[i].result;
-+ for (i = 0; i < check_state.total_threads; i++) {
-+ if (check_state.infos[i].result) {
-+ ret = check_state.infos[i].result;
- goto out;
- }
- }
-
- out:
-- kfree(check_state);
-+ rw_unlock(0, c->root);
- return ret;
- }
-
-diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
-index 50482107134f..1b5fdbc0d83e 100644
---- a/drivers/md/bcache/btree.h
-+++ b/drivers/md/bcache/btree.h
-@@ -226,7 +226,7 @@ struct btree_check_info {
- int result;
- };
-
--#define BCH_BTR_CHKTHREAD_MAX 64
-+#define BCH_BTR_CHKTHREAD_MAX 12
- struct btree_check_state {
- struct cache_set *c;
- int total_threads;
---
-2.35.3
-
diff --git a/for-next/0002-bcache-improve-multithreaded-bch_sectors_dirty_init.patch b/for-next/0002-bcache-improve-multithreaded-bch_sectors_dirty_init.patch
deleted file mode 100644
index 640355a..0000000
--- a/for-next/0002-bcache-improve-multithreaded-bch_sectors_dirty_init.patch
+++ /dev/null
@@ -1,147 +0,0 @@
-From 8f979bacc606cc24460c1e315242fcd2927bebbf Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 21 May 2022 14:14:17 +0800
-Subject: [PATCH 2/4] bcache: improve multithreaded bch_sectors_dirty_init()
-
-Commit b144e45fc576 ("bcache: make bch_sectors_dirty_init() to be
-multithreaded") makes bch_sectors_dirty_init() to be much faster
-when counting dirty sectors by iterating all dirty keys in the btree.
-But it isn't in ideal shape yet, still can be improved.
-
-This patch does the following changes to improve current parallel dirty
-keys iteration on the btree,
-- Add read lock to root node when multiple threads iterating the btree,
- to prevent the root node gets split by I/Os from other registered
- bcache devices.
-- Remove local variable "char name[32]" and generate kernel thread name
- string directly when calling kthread_run().
-- Allocate "struct bch_dirty_init_state state" directly on stack and
- avoid the unnecessary dynamic memory allocation for it.
-- Decrease BCH_DIRTY_INIT_THRD_MAX from 64 to 12 which is enough indeed.
-- Increase &state->started to count created kernel thread after it
- succeeds to create.
-- When wait for all dirty key counting threads to finish, use
- wait_event() to replace wait_event_interruptible().
-
-With the above changes, the code is more clear, and some potential error
-conditions are avoided.
-
-Fixes: b144e45fc576 ("bcache: make bch_sectors_dirty_init() to be multithreaded")
-Signed-off-by: Coly Li <colyli@suse.de>
-Cc: stable@vger.kernel.org
----
- drivers/md/bcache/writeback.c | 62 ++++++++++++++---------------------
- drivers/md/bcache/writeback.h | 2 +-
- 2 files changed, 26 insertions(+), 38 deletions(-)
-
-diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
-index 9ee0005874cd..d24c09490f8e 100644
---- a/drivers/md/bcache/writeback.c
-+++ b/drivers/md/bcache/writeback.c
-@@ -948,10 +948,10 @@ void bch_sectors_dirty_init(struct bcache_device *d)
- struct btree_iter iter;
- struct sectors_dirty_init op;
- struct cache_set *c = d->c;
-- struct bch_dirty_init_state *state;
-- char name[32];
-+ struct bch_dirty_init_state state;
-
- /* Just count root keys if no leaf node */
-+ rw_lock(0, c->root, c->root->level);
- if (c->root->level == 0) {
- bch_btree_op_init(&op.op, -1);
- op.inode = d->id;
-@@ -961,54 +961,42 @@ void bch_sectors_dirty_init(struct bcache_device *d)
- for_each_key_filter(&c->root->keys,
- k, &iter, bch_ptr_invalid)
- sectors_dirty_init_fn(&op.op, c->root, k);
-+ rw_unlock(0, c->root);
- return;
- }
-
-- state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
-- if (!state) {
-- pr_warn("sectors dirty init failed: cannot allocate memory\n");
-- return;
-- }
--
-- state->c = c;
-- state->d = d;
-- state->total_threads = bch_btre_dirty_init_thread_nr();
-- state->key_idx = 0;
-- spin_lock_init(&state->idx_lock);
-- atomic_set(&state->started, 0);
-- atomic_set(&state->enough, 0);
-- init_waitqueue_head(&state->wait);
--
-- for (i = 0; i < state->total_threads; i++) {
-- /* Fetch latest state->enough earlier */
-+ state.c = c;
-+ state.d = d;
-+ state.total_threads = bch_btre_dirty_init_thread_nr();
-+ state.key_idx = 0;
-+ spin_lock_init(&state.idx_lock);
-+ atomic_set(&state.started, 0);
-+ atomic_set(&state.enough, 0);
-+ init_waitqueue_head(&state.wait);
-+
-+ for (i = 0; i < state.total_threads; i++) {
-+ /* Fetch latest state.enough earlier */
- smp_mb__before_atomic();
-- if (atomic_read(&state->enough))
-+ if (atomic_read(&state.enough))
- break;
-
-- state->infos[i].state = state;
-- atomic_inc(&state->started);
-- snprintf(name, sizeof(name), "bch_dirty_init[%d]", i);
--
-- state->infos[i].thread =
-- kthread_run(bch_dirty_init_thread,
-- &state->infos[i],
-- name);
-- if (IS_ERR(state->infos[i].thread)) {
-+ state.infos[i].state = &state;
-+ state.infos[i].thread =
-+ kthread_run(bch_dirty_init_thread, &state.infos[i],
-+ "bch_dirtcnt[%d]", i);
-+ if (IS_ERR(state.infos[i].thread)) {
- pr_err("fails to run thread bch_dirty_init[%d]\n", i);
- for (--i; i >= 0; i--)
-- kthread_stop(state->infos[i].thread);
-+ kthread_stop(state.infos[i].thread);
- goto out;
- }
-+ atomic_inc(&state.started);
- }
-
-- /*
-- * Must wait for all threads to stop.
-- */
-- wait_event_interruptible(state->wait,
-- atomic_read(&state->started) == 0);
--
- out:
-- kfree(state);
-+ /* Must wait for all threads to stop. */
-+ wait_event(state.wait, atomic_read(&state.started) == 0);
-+ rw_unlock(0, c->root);
- }
-
- void bch_cached_dev_writeback_init(struct cached_dev *dc)
-diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
-index 02b2f9df73f6..31df716951f6 100644
---- a/drivers/md/bcache/writeback.h
-+++ b/drivers/md/bcache/writeback.h
-@@ -20,7 +20,7 @@
- #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
- #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
-
--#define BCH_DIRTY_INIT_THRD_MAX 64
-+#define BCH_DIRTY_INIT_THRD_MAX 12
- /*
- * 14 (16384ths) is chosen here as something that each backing device
- * should be a reasonable fraction of the share, and not to blow up
---
-2.35.3
-
diff --git a/for-next/0003-bcache-remove-incremental-dirty-sector-counting-for-.patch b/for-next/0003-bcache-remove-incremental-dirty-sector-counting-for-.patch
deleted file mode 100644
index c6a1f4f..0000000
--- a/for-next/0003-bcache-remove-incremental-dirty-sector-counting-for-.patch
+++ /dev/null
@@ -1,138 +0,0 @@
-From c8bd7529cef5317058c667cca7ebde15c0668300 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 21 May 2022 14:46:03 +0800
-Subject: [PATCH 3/4] bcache: remove incremental dirty sector counting for
- bch_sectors_dirty_init()
-
-After making bch_sectors_dirty_init() being multithreaded, the existing
-incremental dirty sector counting in bch_root_node_dirty_init() doesn't
-release btree occupation after iterating 500000 (INIT_KEYS_EACH_TIME)
-bkeys. Because a read lock is added on btree root node to prevent the
-btree to be split during the dirty sectors counting, other I/O requester
-has no chance to gain the write lock even restart bcache_btree().
-
-That is to say, the incremental dirty sectors counting is incompatible
-to the multhreaded bch_sectors_dirty_init(). We have to choose one and
-drop another one.
-
-In my testing, with 512 bytes random writes, I generate 1.2T dirty data
-and a btree with 400K nodes. With single thread and incremental dirty
-sectors counting, it takes 30+ minites to register the backing device.
-And with multithreaded dirty sectors counting, the backing device
-registration can be accomplished within 2 minutes.
-
-The 30+ minutes V.S. 2- minutes difference makes me decide to keep
-multithreaded bch_sectors_dirty_init() and drop the incremental dirty
-sectors counting. This is what this patch does.
-
-But INIT_KEYS_EACH_TIME is kept, in sectors_dirty_init_fn() the CPU
-will be released by cond_resched() after every INIT_KEYS_EACH_TIME keys
-iterated. This is to avoid the watchdog reports a bogus soft lockup
-warning.
-
-Fixes: b144e45fc576 ("bcache: make bch_sectors_dirty_init() to be multithreaded")
-Signed-off-by: Coly Li <colyli@suse.de>
-Cc: stable@vger.kernel.org
----
- drivers/md/bcache/writeback.c | 41 +++++++++++------------------------
- 1 file changed, 13 insertions(+), 28 deletions(-)
-
-diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
-index d24c09490f8e..75b71199800d 100644
---- a/drivers/md/bcache/writeback.c
-+++ b/drivers/md/bcache/writeback.c
-@@ -805,13 +805,11 @@ static int bch_writeback_thread(void *arg)
-
- /* Init */
- #define INIT_KEYS_EACH_TIME 500000
--#define INIT_KEYS_SLEEP_MS 100
-
- struct sectors_dirty_init {
- struct btree_op op;
- unsigned int inode;
- size_t count;
-- struct bkey start;
- };
-
- static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
-@@ -827,11 +825,8 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
- KEY_START(k), KEY_SIZE(k));
-
- op->count++;
-- if (atomic_read(&b->c->search_inflight) &&
-- !(op->count % INIT_KEYS_EACH_TIME)) {
-- bkey_copy_key(&op->start, k);
-- return -EAGAIN;
-- }
-+ if (!(op->count % INIT_KEYS_EACH_TIME))
-+ cond_resched();
-
- return MAP_CONTINUE;
- }
-@@ -846,24 +841,16 @@ static int bch_root_node_dirty_init(struct cache_set *c,
- bch_btree_op_init(&op.op, -1);
- op.inode = d->id;
- op.count = 0;
-- op.start = KEY(op.inode, 0, 0);
--
-- do {
-- ret = bcache_btree(map_keys_recurse,
-- k,
-- c->root,
-- &op.op,
-- &op.start,
-- sectors_dirty_init_fn,
-- 0);
-- if (ret == -EAGAIN)
-- schedule_timeout_interruptible(
-- msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
-- else if (ret < 0) {
-- pr_warn("sectors dirty init failed, ret=%d!\n", ret);
-- break;
-- }
-- } while (ret == -EAGAIN);
-+
-+ ret = bcache_btree(map_keys_recurse,
-+ k,
-+ c->root,
-+ &op.op,
-+ &KEY(op.inode, 0, 0),
-+ sectors_dirty_init_fn,
-+ 0);
-+ if (ret < 0)
-+ pr_warn("sectors dirty init failed, ret=%d!\n", ret);
-
- return ret;
- }
-@@ -907,7 +894,6 @@ static int bch_dirty_init_thread(void *arg)
- goto out;
- }
- skip_nr--;
-- cond_resched();
- }
-
- if (p) {
-@@ -917,7 +903,6 @@ static int bch_dirty_init_thread(void *arg)
-
- p = NULL;
- prev_idx = cur_idx;
-- cond_resched();
- }
-
- out:
-@@ -956,11 +941,11 @@ void bch_sectors_dirty_init(struct bcache_device *d)
- bch_btree_op_init(&op.op, -1);
- op.inode = d->id;
- op.count = 0;
-- op.start = KEY(op.inode, 0, 0);
-
- for_each_key_filter(&c->root->keys,
- k, &iter, bch_ptr_invalid)
- sectors_dirty_init_fn(&op.op, c->root, k);
-+
- rw_unlock(0, c->root);
- return;
- }
---
-2.35.3
-
diff --git a/for-next/0004-bcache-avoid-journal-no-space-deadlock-by-reserving-.patch b/for-next/0004-bcache-avoid-journal-no-space-deadlock-by-reserving-.patch
deleted file mode 100644
index e9fe259..0000000
--- a/for-next/0004-bcache-avoid-journal-no-space-deadlock-by-reserving-.patch
+++ /dev/null
@@ -1,148 +0,0 @@
-From 2dc9383e085c5474a081e9b54cbe7743466d4e3e Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 21 May 2022 22:55:46 +0800
-Subject: [PATCH 4/4] bcache: avoid journal no-space deadlock by reserving 1
- journal bucket
-
-The journal no-space deadlock was reported time to time. Such deadlock
-can happen in the following situation.
-
-When all journal buckets are fully filled by active jset with heavy
-write I/O load, the cache set registration (after a reboot) will load
-all active jsets and inserting them into the btree again (which is
-called journal replay). If a journaled bkey is inserted into a btree
-node and results btree node split, new journal request might be
-triggered. For example, the btree grows one more level after the node
-split, then the root node record in cache device super block will be
-upgrade by bch_journal_meta() from bch_btree_set_root(). But there is no
-space in journal buckets, the journal replay has to wait for new journal
-bucket to be reclaimed after at least one journal bucket replayed. This
-is one example that how the journal no-space deadlock happens.
-
-The solution to avoid the deadlock is to reserve 1 journal bucket in
-run time, and only permit the reserved journal bucket to be used during
-cache set registration procedure for things like journal replay. Then
-the journal space will never be fully filled, there is no chance for
-journal no-space deadlock to happen anymore.
-
-This patch adds a new member "bool do_reserve" in struct journal, it is
-inititalized to 0 (false) when struct journal is allocated, and set to
-1 (true) by bch_journal_space_reserve() when all initialization done in
-run_cache_set(). In the run time when journal_reclaim() tries to
-allocate a new journal bucket, free_journal_buckets() is called to check
-whether there are enough free journal buckets to use. If there is only
-1 free journal bucket and journal->do_reserve is 1 (true), the last
-bucket is reserved and free_journal_buckets() will return 0 to indicate
-no free journal bucket. Then journal_reclaim() will give up, and try
-next time to see whetheer there is free journal bucket to allocate. By
-this method, there is always 1 jouranl bucket reserved in run time.
-
-During the cache set registration, journal->do_reserve is 0 (false), so
-the reserved journal bucket can be used to avoid the no-space deadlock.
-
-Reported-by: Nikhil Kshirsagar <nkshirsagar@gmail.com>
-Signed-off-by: Coly Li <colyli@suse.de>
-Cc: stable@vger.kernel.org
----
- drivers/md/bcache/journal.c | 31 ++++++++++++++++++++++++++-----
- drivers/md/bcache/journal.h | 2 ++
- drivers/md/bcache/super.c | 1 +
- 3 files changed, 29 insertions(+), 5 deletions(-)
-
-diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
-index df5347ea450b..e5da469a4235 100644
---- a/drivers/md/bcache/journal.c
-+++ b/drivers/md/bcache/journal.c
-@@ -405,6 +405,11 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
- return ret;
- }
-
-+void bch_journal_space_reserve(struct journal *j)
-+{
-+ j->do_reserve = true;
-+}
-+
- /* Journalling */
-
- static void btree_flush_write(struct cache_set *c)
-@@ -621,12 +626,30 @@ static void do_journal_discard(struct cache *ca)
- }
- }
-
-+static unsigned int free_journal_buckets(struct cache_set *c)
-+{
-+ struct journal *j = &c->journal;
-+ struct cache *ca = c->cache;
-+ struct journal_device *ja = &c->cache->journal;
-+ unsigned int n;
-+
-+ /* In case njournal_buckets is not power of 2 */
-+ if (ja->cur_idx >= ja->discard_idx)
-+ n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
-+ else
-+ n = ja->discard_idx - ja->cur_idx;
-+
-+ if (n > (1 + j->do_reserve))
-+ return n - (1 + j->do_reserve);
-+
-+ return 0;
-+}
-+
- static void journal_reclaim(struct cache_set *c)
- {
- struct bkey *k = &c->journal.key;
- struct cache *ca = c->cache;
- uint64_t last_seq;
-- unsigned int next;
- struct journal_device *ja = &ca->journal;
- atomic_t p __maybe_unused;
-
-@@ -649,12 +672,10 @@ static void journal_reclaim(struct cache_set *c)
- if (c->journal.blocks_free)
- goto out;
-
-- next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
-- /* No space available on this device */
-- if (next == ja->discard_idx)
-+ if (!free_journal_buckets(c))
- goto out;
-
-- ja->cur_idx = next;
-+ ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
- k->ptr[0] = MAKE_PTR(0,
- bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
- ca->sb.nr_this_dev);
-diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
-index f2ea34d5f431..cd316b4a1e95 100644
---- a/drivers/md/bcache/journal.h
-+++ b/drivers/md/bcache/journal.h
-@@ -105,6 +105,7 @@ struct journal {
- spinlock_t lock;
- spinlock_t flush_write_lock;
- bool btree_flushing;
-+ bool do_reserve;
- /* used when waiting because the journal was full */
- struct closure_waitlist wait;
- struct closure io;
-@@ -182,5 +183,6 @@ int bch_journal_replay(struct cache_set *c, struct list_head *list);
-
- void bch_journal_free(struct cache_set *c);
- int bch_journal_alloc(struct cache_set *c);
-+void bch_journal_space_reserve(struct journal *j);
-
- #endif /* _BCACHE_JOURNAL_H */
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index bf3de149d3c9..2bb55278d22d 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -2128,6 +2128,7 @@ static int run_cache_set(struct cache_set *c)
-
- flash_devs_run(c);
-
-+ bch_journal_space_reserve(&c->journal);
- set_bit(CACHE_SET_RUNNING, &c->flags);
- return 0;
- err:
---
-2.35.3
-