aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorColy Li <colyli@suse.de>2020-09-18 17:49:33 +0800
committerColy Li <colyli@suse.de>2020-09-18 17:49:33 +0800
commit158ccc42daa1b97cf472ded4677d2618cb63f542 (patch)
tree8e135dbfc0a05a8b282149921c764e8eb9e7d4ce
parent5a0407b3689ea45ffbfd352ec5d49644c59931d9 (diff)
downloadbcache-patches-158ccc42daa1b97cf472ded4677d2618cb63f542.tar.gz
for-next: prepare for 5.10
-rw-r--r--for-next/0001-bcache-share-register-sysfs-with-async-register.patch4
-rw-r--r--for-next/0002-bcache-check-c-root-with-IS_ERR_OR_NULL-in-mca_reser.patch54
-rw-r--r--for-next/0003-bcache-Convert-to-DEFINE_SHOW_ATTRIBUTE.patch61
-rw-r--r--for-next/0004-bcache-remove-int-n-from-parameter-list-of-bch_bucke.patch152
-rw-r--r--for-next/0005-bcache-explicitly-make-cache_set-only-have-single-ca.patch128
-rw-r--r--for-next/0006-bcache-remove-for_each_cache.patch896
-rw-r--r--for-next/0007-bcache-add-set_uuid-in-struct-cache_set.patch173
-rw-r--r--for-next/0008-bcache-only-use-block_bytes-on-struct-cache.patch258
-rw-r--r--for-next/0009-bcache-remove-useless-alloc_bucket_pages.patch30
-rw-r--r--for-next/0010-bcache-remove-useless-bucket_pages.patch30
-rw-r--r--for-next/0011-bcache-only-use-bucket_bytes-on-struct-cache.patch50
-rw-r--r--for-next/0012-bcache-don-t-check-seq-numbers-in-register_cache_set.patch52
-rw-r--r--for-next/0013-bcache-remove-can_attach_cache.patch50
-rw-r--r--for-next/0014-bcache-check-and-set-sync-status-on-cache-s-in-memor.patch110
-rw-r--r--for-next/0015-bcache-remove-embedded-struct-cache_sb-from-struct-c.patch469
-rw-r--r--for-next/v4-0001-docs-update-trusted-encrypted.rst.patch54
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0000-cover-letter.patch73
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch5
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch4
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0003-bcache-remove-for_each_cache.patch5
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch5
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch5
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch5
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0007-bcache-remove-useless-bucket_pages.patch5
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch5
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0009-bcache-don-t-check-seq-numbers-in-register_cache_.patch53
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0010-bcache-remove-can_attach_cache.patch50
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0011-bcache-check-and-set-sync-status-on-cache-s-in-me.patch110
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0012-bcache-remove-embedded-struct-cache_sb-from-struc.patch469
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0014-bcache-move-struct-cache_sb-out-of-uapi-bcache.h.patch261
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0015-bcache-share-register-sysfs-with-async-register.patch66
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0016-docs-update-trusted-encrypted.rst.patch59
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0017-net-introduce-helper-sendpage_ok-in-include-linux.patch75
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0018-nvme-tcp-check-page-by-sendpage_ok-before-calling.patch57
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0019-drbd-code-cleanup-by-using-sendpage_ok-to-check-p.patch42
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch152
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch128
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0003-bcache-remove-for_each_cache.patch896
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch173
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch258
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch30
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0007-bcache-remove-useless-bucket_pages.patch30
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch50
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch (renamed from for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch)2
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch (renamed from for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch)3
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0011-bcache-remove-can_attach_cache.patch (renamed from for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0011-bcache-remove-can_attach_cache.patch)3
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch (renamed from for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch)3
-rw-r--r--for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch (renamed from for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch)37
48 files changed, 5041 insertions, 649 deletions
diff --git a/for-next/0001-bcache-share-register-sysfs-with-async-register.patch b/for-next/0001-bcache-share-register-sysfs-with-async-register.patch
index 3b823e1..132c48f 100644
--- a/for-next/0001-bcache-share-register-sysfs-with-async-register.patch
+++ b/for-next/0001-bcache-share-register-sysfs-with-async-register.patch
@@ -1,7 +1,7 @@
-From 3015499a88e4a06b9923c94789d4bf44a05db0ca Mon Sep 17 00:00:00 2001
+From 9dca5fe119fd6340f112949be5c6c1fc91da5c00 Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Sat, 15 Aug 2020 16:56:19 +0800
-Subject: [PATCH] bcache: share register sysfs with async register
+Subject: [PATCH 01/15] bcache: share register sysfs with async register
Previously the experimental async registration uses a separate sysfs
file register_async. Now the async registration code seems working well
diff --git a/for-next/0002-bcache-check-c-root-with-IS_ERR_OR_NULL-in-mca_reser.patch b/for-next/0002-bcache-check-c-root-with-IS_ERR_OR_NULL-in-mca_reser.patch
new file mode 100644
index 0000000..951a69a
--- /dev/null
+++ b/for-next/0002-bcache-check-c-root-with-IS_ERR_OR_NULL-in-mca_reser.patch
@@ -0,0 +1,54 @@
+From d0d6e0319e4c24492281ea803e3064e40019d556 Mon Sep 17 00:00:00 2001
+From: Dongsheng Yang <dongsheng.yang@easystack.cn>
+Date: Thu, 17 Sep 2020 08:13:26 +0000
+Subject: [PATCH 02/15] bcache: check c->root with IS_ERR_OR_NULL() in
+ mca_reserve()
+
+In mca_reserve(c) macro, we are checking root whether is NULL or not.
+But that's not enough, when we read the root node in run_cache_set(),
+if we got an error in bch_btree_node_read_done(), we will return ERR_PTR(-EIO)
+to c->root.
+
+And then we will go continue to unregister, but before unregister_shrinker(&c->shrink);
+there is a possibility to call bch_mca_count(), and we would get a crash with call trace like that:
+
+[ 2149.876008] Unable to handle kernel NULL pointer dereference at virtual address 00000000000000b5
+... ...
+[ 2150.598931] Call trace:
+[ 2150.606439] bch_mca_count+0x58/0x98 [escache]
+[ 2150.615866] do_shrink_slab+0x54/0x310
+[ 2150.624429] shrink_slab+0x248/0x2d0
+[ 2150.632633] drop_slab_node+0x54/0x88
+[ 2150.640746] drop_slab+0x50/0x88
+[ 2150.648228] drop_caches_sysctl_handler+0xf0/0x118
+[ 2150.657219] proc_sys_call_handler.isra.18+0xb8/0x110
+[ 2150.666342] proc_sys_write+0x40/0x50
+[ 2150.673889] __vfs_write+0x48/0x90
+[ 2150.681095] vfs_write+0xac/0x1b8
+[ 2150.688145] ksys_write+0x6c/0xd0
+[ 2150.695127] __arm64_sys_write+0x24/0x30
+[ 2150.702749] el0_svc_handler+0xa0/0x128
+[ 2150.710296] el0_svc+0x8/0xc
+
+Signed-off-by: Dongsheng Yang <dongsheng.yang@easystack.cn>
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/btree.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 3d8bd0692af3..ae7611fa42bf 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -514,7 +514,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
+ * mca -> memory cache
+ */
+
+-#define mca_reserve(c) (((c->root && c->root->level) \
++#define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \
+ ? c->root->level : 1) * 8 + 16)
+ #define mca_can_free(c) \
+ max_t(int, 0, c->btree_cache_used - mca_reserve(c))
+--
+2.26.2
+
diff --git a/for-next/0003-bcache-Convert-to-DEFINE_SHOW_ATTRIBUTE.patch b/for-next/0003-bcache-Convert-to-DEFINE_SHOW_ATTRIBUTE.patch
new file mode 100644
index 0000000..af8deb5
--- /dev/null
+++ b/for-next/0003-bcache-Convert-to-DEFINE_SHOW_ATTRIBUTE.patch
@@ -0,0 +1,61 @@
+From 984f3fa5893973a53454f913e2d266ca7a01fd93 Mon Sep 17 00:00:00 2001
+From: Qinglang Miao <miaoqinglang@huawei.com>
+Date: Thu, 17 Sep 2020 20:23:26 +0800
+Subject: [PATCH 03/15] bcache: Convert to DEFINE_SHOW_ATTRIBUTE
+
+Use DEFINE_SHOW_ATTRIBUTE macro to simplify the code.
+
+As inode->iprivate equals to third parameter of
+debugfs_create_file() which is NULL. So it's equivalent
+to original code logic.
+
+Signed-off-by: Qinglang Miao <miaoqinglang@huawei.com>
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/closure.c | 16 +++-------------
+ 1 file changed, 3 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
+index 0164a1fe94a9..d8d9394a6beb 100644
+--- a/drivers/md/bcache/closure.c
++++ b/drivers/md/bcache/closure.c
+@@ -159,7 +159,7 @@ void closure_debug_destroy(struct closure *cl)
+
+ static struct dentry *closure_debug;
+
+-static int debug_seq_show(struct seq_file *f, void *data)
++static int debug_show(struct seq_file *f, void *data)
+ {
+ struct closure *cl;
+
+@@ -188,17 +188,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
+ return 0;
+ }
+
+-static int debug_seq_open(struct inode *inode, struct file *file)
+-{
+- return single_open(file, debug_seq_show, NULL);
+-}
+-
+-static const struct file_operations debug_ops = {
+- .owner = THIS_MODULE,
+- .open = debug_seq_open,
+- .read = seq_read,
+- .release = single_release
+-};
++DEFINE_SHOW_ATTRIBUTE(debug);
+
+ void __init closure_debug_init(void)
+ {
+@@ -209,7 +199,7 @@ void __init closure_debug_init(void)
+ * about this.
+ */
+ closure_debug = debugfs_create_file(
+- "closures", 0400, bcache_debug, NULL, &debug_ops);
++ "closures", 0400, bcache_debug, NULL, &debug_fops);
+ }
+ #endif
+
+--
+2.26.2
+
diff --git a/for-next/0004-bcache-remove-int-n-from-parameter-list-of-bch_bucke.patch b/for-next/0004-bcache-remove-int-n-from-parameter-list-of-bch_bucke.patch
new file mode 100644
index 0000000..28421ed
--- /dev/null
+++ b/for-next/0004-bcache-remove-int-n-from-parameter-list-of-bch_bucke.patch
@@ -0,0 +1,152 @@
+From d013c7074107c95c1b7a2a8ad5b011eb543f71e8 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 00:07:05 +0800
+Subject: [PATCH 04/15] bcache: remove 'int n' from parameter list of
+ bch_bucket_alloc_set()
+
+The parameter 'int n' from bch_bucket_alloc_set() is not cleared
+defined. From the code comments n is the number of buckets to alloc, but
+from the code itself 'n' is the maximum cache to iterate. Indeed all the
+locations where bch_bucket_alloc_set() is called, 'n' is alwasy 1.
+
+This patch removes the confused and unnecessary 'int n' from parameter
+list of bch_bucket_alloc_set(), and explicitly allocates only 1 bucket
+for its caller.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/alloc.c | 35 +++++++++++++++--------------------
+ drivers/md/bcache/bcache.h | 4 ++--
+ drivers/md/bcache/btree.c | 2 +-
+ drivers/md/bcache/super.c | 2 +-
+ 4 files changed, 19 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 52035a78d836..4493ff57476d 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -49,7 +49,7 @@
+ *
+ * bch_bucket_alloc() allocates a single bucket from a specific cache.
+ *
+- * bch_bucket_alloc_set() allocates one or more buckets from different caches
++ * bch_bucket_alloc_set() allocates one bucket from different caches
+ * out of a cache set.
+ *
+ * free_some_buckets() drives all the processes described above. It's called
+@@ -488,34 +488,29 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
+ }
+
+ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+- struct bkey *k, int n, bool wait)
++ struct bkey *k, bool wait)
+ {
+- int i;
++ struct cache *ca;
++ long b;
+
+ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return -1;
+
+ lockdep_assert_held(&c->bucket_lock);
+- BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
+
+ bkey_init(k);
+
+- /* sort by free space/prio of oldest data in caches */
+-
+- for (i = 0; i < n; i++) {
+- struct cache *ca = c->cache_by_alloc[i];
+- long b = bch_bucket_alloc(ca, reserve, wait);
++ ca = c->cache_by_alloc[0];
++ b = bch_bucket_alloc(ca, reserve, wait);
++ if (b == -1)
++ goto err;
+
+- if (b == -1)
+- goto err;
++ k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
++ bucket_to_sector(c, b),
++ ca->sb.nr_this_dev);
+
+- k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
+- bucket_to_sector(c, b),
+- ca->sb.nr_this_dev);
+-
+- SET_KEY_PTRS(k, i + 1);
+- }
++ SET_KEY_PTRS(k, 1);
+
+ return 0;
+ err:
+@@ -525,12 +520,12 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ }
+
+ int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+- struct bkey *k, int n, bool wait)
++ struct bkey *k, bool wait)
+ {
+ int ret;
+
+ mutex_lock(&c->bucket_lock);
+- ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
++ ret = __bch_bucket_alloc_set(c, reserve, k, wait);
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+ }
+@@ -638,7 +633,7 @@ bool bch_alloc_sectors(struct cache_set *c,
+
+ spin_unlock(&c->data_bucket_lock);
+
+- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
++ if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
+ return false;
+
+ spin_lock(&c->data_bucket_lock);
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 4fd03d2496d8..5ff6e9573935 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -994,9 +994,9 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k);
+
+ long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
+ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+- struct bkey *k, int n, bool wait);
++ struct bkey *k, bool wait);
+ int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+- struct bkey *k, int n, bool wait);
++ struct bkey *k, bool wait);
+ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
+ unsigned int sectors, unsigned int write_point,
+ unsigned int write_prio, bool wait);
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index ae7611fa42bf..e2bd03408e66 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1091,7 +1091,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+
+ mutex_lock(&c->bucket_lock);
+ retry:
+- if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
++ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
+ goto err;
+
+ bkey_put(c, &k.key);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 61abd6499a11..18358b67d8d6 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -486,7 +486,7 @@ static int __uuid_write(struct cache_set *c)
+ closure_init_stack(&cl);
+ lockdep_assert_held(&bch_register_lock);
+
+- if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
++ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
+ return 1;
+
+ size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
+--
+2.26.2
+
diff --git a/for-next/0005-bcache-explicitly-make-cache_set-only-have-single-ca.patch b/for-next/0005-bcache-explicitly-make-cache_set-only-have-single-ca.patch
new file mode 100644
index 0000000..70812a8
--- /dev/null
+++ b/for-next/0005-bcache-explicitly-make-cache_set-only-have-single-ca.patch
@@ -0,0 +1,128 @@
+From 3ad499ab5c879c938e663108f1ff4c8d89c48144 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 00:30:59 +0800
+Subject: [PATCH 05/15] bcache: explicitly make cache_set only have single
+ cache
+
+Currently although the bcache code has a framework for multiple caches
+in a cache set, but indeed the multiple caches never completed and users
+use md raid1 for multiple copies of the cached data.
+
+This patch does the following change in struct cache_set, to explicitly
+make a cache_set only have single cache,
+- Change pointer array "*cache[MAX_CACHES_PER_SET]" to a single pointer
+ "*cache".
+- Remove pointer array "*cache_by_alloc[MAX_CACHES_PER_SET]".
+- Remove "caches_loaded".
+
+Now the code looks as exactly what it does in practic: only one cache is
+used in the cache set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/alloc.c | 2 +-
+ drivers/md/bcache/bcache.h | 8 +++-----
+ drivers/md/bcache/super.c | 19 ++++++++-----------
+ 3 files changed, 12 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 4493ff57476d..3385f6add6df 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -501,7 +501,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+
+ bkey_init(k);
+
+- ca = c->cache_by_alloc[0];
++ ca = c->cache;
+ b = bch_bucket_alloc(ca, reserve, wait);
+ if (b == -1)
+ goto err;
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 5ff6e9573935..aa112c1adba1 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -519,9 +519,7 @@ struct cache_set {
+
+ struct cache_sb sb;
+
+- struct cache *cache[MAX_CACHES_PER_SET];
+- struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
+- int caches_loaded;
++ struct cache *cache;
+
+ struct bcache_device **devices;
+ unsigned int devices_max_used;
+@@ -808,7 +806,7 @@ static inline struct cache *PTR_CACHE(struct cache_set *c,
+ const struct bkey *k,
+ unsigned int ptr)
+ {
+- return c->cache[PTR_DEV(k, ptr)];
++ return c->cache;
+ }
+
+ static inline size_t PTR_BUCKET_NR(struct cache_set *c,
+@@ -890,7 +888,7 @@ do { \
+ /* Looping macros */
+
+ #define for_each_cache(ca, cs, iter) \
+- for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
++ for (iter = 0; ca = cs->cache, iter < 1; iter++)
+
+ #define for_each_bucket(b, ca) \
+ for (b = (ca)->buckets + (ca)->sb.first_bucket; \
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 18358b67d8d6..718515644c89 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1675,7 +1675,7 @@ static void cache_set_free(struct closure *cl)
+ for_each_cache(ca, c, i)
+ if (ca) {
+ ca->set = NULL;
+- c->cache[ca->sb.nr_this_dev] = NULL;
++ c->cache = NULL;
+ kobject_put(&ca->kobj);
+ }
+
+@@ -2166,7 +2166,7 @@ static const char *register_cache_set(struct cache *ca)
+
+ list_for_each_entry(c, &bch_cache_sets, list)
+ if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
+- if (c->cache[ca->sb.nr_this_dev])
++ if (c->cache)
+ return "duplicate cache set member";
+
+ if (!can_attach_cache(ca, c))
+@@ -2216,14 +2216,11 @@ static const char *register_cache_set(struct cache *ca)
+
+ kobject_get(&ca->kobj);
+ ca->set = c;
+- ca->set->cache[ca->sb.nr_this_dev] = ca;
+- c->cache_by_alloc[c->caches_loaded++] = ca;
++ ca->set->cache = ca;
+
+- if (c->caches_loaded == c->sb.nr_in_set) {
+- err = "failed to run cache set";
+- if (run_cache_set(c) < 0)
+- goto err;
+- }
++ err = "failed to run cache set";
++ if (run_cache_set(c) < 0)
++ goto err;
+
+ return NULL;
+ err:
+@@ -2240,8 +2237,8 @@ void bch_cache_release(struct kobject *kobj)
+ unsigned int i;
+
+ if (ca->set) {
+- BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
+- ca->set->cache[ca->sb.nr_this_dev] = NULL;
++ BUG_ON(ca->set->cache != ca);
++ ca->set->cache = NULL;
+ }
+
+ free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
+--
+2.26.2
+
diff --git a/for-next/0006-bcache-remove-for_each_cache.patch b/for-next/0006-bcache-remove-for_each_cache.patch
new file mode 100644
index 0000000..d81e378
--- /dev/null
+++ b/for-next/0006-bcache-remove-for_each_cache.patch
@@ -0,0 +1,896 @@
+From c1fbcf37e05ac362e8a45f00d8f5037c1c697000 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 01:26:09 +0800
+Subject: [PATCH 06/15] bcache: remove for_each_cache()
+
+Since now each cache_set explicitly has single cache, for_each_cache()
+is unnecessary. This patch removes this macro, and update all locations
+where it is used, and makes sure all code logic still being consistent.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/alloc.c | 17 ++-
+ drivers/md/bcache/bcache.h | 9 +-
+ drivers/md/bcache/btree.c | 103 +++++++---------
+ drivers/md/bcache/journal.c | 229 ++++++++++++++++-------------------
+ drivers/md/bcache/movinggc.c | 58 +++++----
+ drivers/md/bcache/super.c | 115 ++++++++----------
+ 6 files changed, 237 insertions(+), 294 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 3385f6add6df..1b8310992dd0 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -88,7 +88,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
+ struct cache *ca;
+ struct bucket *b;
+ unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
+- unsigned int i;
+ int r;
+
+ atomic_sub(sectors, &c->rescale);
+@@ -104,14 +103,14 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
+
+ c->min_prio = USHRT_MAX;
+
+- for_each_cache(ca, c, i)
+- for_each_bucket(b, ca)
+- if (b->prio &&
+- b->prio != BTREE_PRIO &&
+- !atomic_read(&b->pin)) {
+- b->prio--;
+- c->min_prio = min(c->min_prio, b->prio);
+- }
++ ca = c->cache;
++ for_each_bucket(b, ca)
++ if (b->prio &&
++ b->prio != BTREE_PRIO &&
++ !atomic_read(&b->pin)) {
++ b->prio--;
++ c->min_prio = min(c->min_prio, b->prio);
++ }
+
+ mutex_unlock(&c->bucket_lock);
+ }
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index aa112c1adba1..7ffe6b2d179b 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -887,9 +887,6 @@ do { \
+
+ /* Looping macros */
+
+-#define for_each_cache(ca, cs, iter) \
+- for (iter = 0; ca = cs->cache, iter < 1; iter++)
+-
+ #define for_each_bucket(b, ca) \
+ for (b = (ca)->buckets + (ca)->sb.first_bucket; \
+ b < (ca)->buckets + (ca)->sb.nbuckets; b++)
+@@ -931,11 +928,9 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
+
+ static inline void wake_up_allocators(struct cache_set *c)
+ {
+- struct cache *ca;
+- unsigned int i;
++ struct cache *ca = c->cache;
+
+- for_each_cache(ca, c, i)
+- wake_up_process(ca->alloc_thread);
++ wake_up_process(ca->alloc_thread);
+ }
+
+ static inline void closure_bio_submit(struct cache_set *c,
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index e2bd03408e66..f626d536981e 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1167,19 +1167,18 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
+ static int btree_check_reserve(struct btree *b, struct btree_op *op)
+ {
+ struct cache_set *c = b->c;
+- struct cache *ca;
+- unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
++ struct cache *ca = c->cache;
++ unsigned int reserve = (c->root->level - b->level) * 2 + 1;
+
+ mutex_lock(&c->bucket_lock);
+
+- for_each_cache(ca, c, i)
+- if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
+- if (op)
+- prepare_to_wait(&c->btree_cache_wait, &op->wait,
+- TASK_UNINTERRUPTIBLE);
+- mutex_unlock(&c->bucket_lock);
+- return -EINTR;
+- }
++ if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
++ if (op)
++ prepare_to_wait(&c->btree_cache_wait, &op->wait,
++ TASK_UNINTERRUPTIBLE);
++ mutex_unlock(&c->bucket_lock);
++ return -EINTR;
++ }
+
+ mutex_unlock(&c->bucket_lock);
+
+@@ -1695,7 +1694,6 @@ static void btree_gc_start(struct cache_set *c)
+ {
+ struct cache *ca;
+ struct bucket *b;
+- unsigned int i;
+
+ if (!c->gc_mark_valid)
+ return;
+@@ -1705,14 +1703,14 @@ static void btree_gc_start(struct cache_set *c)
+ c->gc_mark_valid = 0;
+ c->gc_done = ZERO_KEY;
+
+- for_each_cache(ca, c, i)
+- for_each_bucket(b, ca) {
+- b->last_gc = b->gen;
+- if (!atomic_read(&b->pin)) {
+- SET_GC_MARK(b, 0);
+- SET_GC_SECTORS_USED(b, 0);
+- }
++ ca = c->cache;
++ for_each_bucket(b, ca) {
++ b->last_gc = b->gen;
++ if (!atomic_read(&b->pin)) {
++ SET_GC_MARK(b, 0);
++ SET_GC_SECTORS_USED(b, 0);
+ }
++ }
+
+ mutex_unlock(&c->bucket_lock);
+ }
+@@ -1721,7 +1719,8 @@ static void bch_btree_gc_finish(struct cache_set *c)
+ {
+ struct bucket *b;
+ struct cache *ca;
+- unsigned int i;
++ unsigned int i, j;
++ uint64_t *k;
+
+ mutex_lock(&c->bucket_lock);
+
+@@ -1739,7 +1738,6 @@ static void bch_btree_gc_finish(struct cache_set *c)
+ struct bcache_device *d = c->devices[i];
+ struct cached_dev *dc;
+ struct keybuf_key *w, *n;
+- unsigned int j;
+
+ if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+@@ -1756,29 +1754,27 @@ static void bch_btree_gc_finish(struct cache_set *c)
+ rcu_read_unlock();
+
+ c->avail_nbuckets = 0;
+- for_each_cache(ca, c, i) {
+- uint64_t *i;
+
+- ca->invalidate_needs_gc = 0;
++ ca = c->cache;
++ ca->invalidate_needs_gc = 0;
+
+- for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
+- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
++ for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
++ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
+
+- for (i = ca->prio_buckets;
+- i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
+- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
++ for (k = ca->prio_buckets;
++ k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
++ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
+
+- for_each_bucket(b, ca) {
+- c->need_gc = max(c->need_gc, bucket_gc_gen(b));
++ for_each_bucket(b, ca) {
++ c->need_gc = max(c->need_gc, bucket_gc_gen(b));
+
+- if (atomic_read(&b->pin))
+- continue;
++ if (atomic_read(&b->pin))
++ continue;
+
+- BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
++ BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
+
+- if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
+- c->avail_nbuckets++;
+- }
++ if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
++ c->avail_nbuckets++;
+ }
+
+ mutex_unlock(&c->bucket_lock);
+@@ -1830,12 +1826,10 @@ static void bch_btree_gc(struct cache_set *c)
+
+ static bool gc_should_run(struct cache_set *c)
+ {
+- struct cache *ca;
+- unsigned int i;
++ struct cache *ca = c->cache;
+
+- for_each_cache(ca, c, i)
+- if (ca->invalidate_needs_gc)
+- return true;
++ if (ca->invalidate_needs_gc)
++ return true;
+
+ if (atomic_read(&c->sectors_to_gc) < 0)
+ return true;
+@@ -2081,9 +2075,8 @@ int bch_btree_check(struct cache_set *c)
+
+ void bch_initial_gc_finish(struct cache_set *c)
+ {
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ struct bucket *b;
+- unsigned int i;
+
+ bch_btree_gc_finish(c);
+
+@@ -2098,20 +2091,18 @@ void bch_initial_gc_finish(struct cache_set *c)
+ * This is only safe for buckets that have no live data in them, which
+ * there should always be some of.
+ */
+- for_each_cache(ca, c, i) {
+- for_each_bucket(b, ca) {
+- if (fifo_full(&ca->free[RESERVE_PRIO]) &&
+- fifo_full(&ca->free[RESERVE_BTREE]))
+- break;
++ for_each_bucket(b, ca) {
++ if (fifo_full(&ca->free[RESERVE_PRIO]) &&
++ fifo_full(&ca->free[RESERVE_BTREE]))
++ break;
+
+- if (bch_can_invalidate_bucket(ca, b) &&
+- !GC_MARK(b)) {
+- __bch_invalidate_one_bucket(ca, b);
+- if (!fifo_push(&ca->free[RESERVE_PRIO],
+- b - ca->buckets))
+- fifo_push(&ca->free[RESERVE_BTREE],
+- b - ca->buckets);
+- }
++ if (bch_can_invalidate_bucket(ca, b) &&
++ !GC_MARK(b)) {
++ __bch_invalidate_one_bucket(ca, b);
++ if (!fifo_push(&ca->free[RESERVE_PRIO],
++ b - ca->buckets))
++ fifo_push(&ca->free[RESERVE_BTREE],
++ b - ca->buckets);
+ }
+ }
+
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index c1227bdb57e7..e89ae7c4ba97 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -179,112 +179,109 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
+ ret; \
+ })
+
+- struct cache *ca;
+- unsigned int iter;
++ struct cache *ca = c->cache;
+ int ret = 0;
++ struct journal_device *ja = &ca->journal;
++ DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
++ unsigned int i, l, r, m;
++ uint64_t seq;
+
+- for_each_cache(ca, c, iter) {
+- struct journal_device *ja = &ca->journal;
+- DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
+- unsigned int i, l, r, m;
+- uint64_t seq;
+-
+- bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
+- pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
++ bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
++ pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
+
++ /*
++ * Read journal buckets ordered by golden ratio hash to quickly
++ * find a sequence of buckets with valid journal entries
++ */
++ for (i = 0; i < ca->sb.njournal_buckets; i++) {
+ /*
+- * Read journal buckets ordered by golden ratio hash to quickly
+- * find a sequence of buckets with valid journal entries
++ * We must try the index l with ZERO first for
++ * correctness due to the scenario that the journal
++ * bucket is circular buffer which might have wrapped
+ */
+- for (i = 0; i < ca->sb.njournal_buckets; i++) {
+- /*
+- * We must try the index l with ZERO first for
+- * correctness due to the scenario that the journal
+- * bucket is circular buffer which might have wrapped
+- */
+- l = (i * 2654435769U) % ca->sb.njournal_buckets;
++ l = (i * 2654435769U) % ca->sb.njournal_buckets;
+
+- if (test_bit(l, bitmap))
+- break;
++ if (test_bit(l, bitmap))
++ break;
+
+- if (read_bucket(l))
+- goto bsearch;
+- }
++ if (read_bucket(l))
++ goto bsearch;
++ }
+
+- /*
+- * If that fails, check all the buckets we haven't checked
+- * already
+- */
+- pr_debug("falling back to linear search\n");
++ /*
++ * If that fails, check all the buckets we haven't checked
++ * already
++ */
++ pr_debug("falling back to linear search\n");
+
+- for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
+- if (read_bucket(l))
+- goto bsearch;
++ for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
++ if (read_bucket(l))
++ goto bsearch;
+
+- /* no journal entries on this device? */
+- if (l == ca->sb.njournal_buckets)
+- continue;
++ /* no journal entries on this device? */
++ if (l == ca->sb.njournal_buckets)
++ goto out;
+ bsearch:
+- BUG_ON(list_empty(list));
++ BUG_ON(list_empty(list));
+
+- /* Binary search */
+- m = l;
+- r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
+- pr_debug("starting binary search, l %u r %u\n", l, r);
++ /* Binary search */
++ m = l;
++ r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
++ pr_debug("starting binary search, l %u r %u\n", l, r);
+
+- while (l + 1 < r) {
+- seq = list_entry(list->prev, struct journal_replay,
+- list)->j.seq;
++ while (l + 1 < r) {
++ seq = list_entry(list->prev, struct journal_replay,
++ list)->j.seq;
+
+- m = (l + r) >> 1;
+- read_bucket(m);
++ m = (l + r) >> 1;
++ read_bucket(m);
+
+- if (seq != list_entry(list->prev, struct journal_replay,
+- list)->j.seq)
+- l = m;
+- else
+- r = m;
+- }
++ if (seq != list_entry(list->prev, struct journal_replay,
++ list)->j.seq)
++ l = m;
++ else
++ r = m;
++ }
+
+- /*
+- * Read buckets in reverse order until we stop finding more
+- * journal entries
+- */
+- pr_debug("finishing up: m %u njournal_buckets %u\n",
+- m, ca->sb.njournal_buckets);
+- l = m;
++ /*
++ * Read buckets in reverse order until we stop finding more
++ * journal entries
++ */
++ pr_debug("finishing up: m %u njournal_buckets %u\n",
++ m, ca->sb.njournal_buckets);
++ l = m;
+
+- while (1) {
+- if (!l--)
+- l = ca->sb.njournal_buckets - 1;
++ while (1) {
++ if (!l--)
++ l = ca->sb.njournal_buckets - 1;
+
+- if (l == m)
+- break;
++ if (l == m)
++ break;
+
+- if (test_bit(l, bitmap))
+- continue;
++ if (test_bit(l, bitmap))
++ continue;
+
+- if (!read_bucket(l))
+- break;
+- }
++ if (!read_bucket(l))
++ break;
++ }
+
+- seq = 0;
++ seq = 0;
+
+- for (i = 0; i < ca->sb.njournal_buckets; i++)
+- if (ja->seq[i] > seq) {
+- seq = ja->seq[i];
+- /*
+- * When journal_reclaim() goes to allocate for
+- * the first time, it'll use the bucket after
+- * ja->cur_idx
+- */
+- ja->cur_idx = i;
+- ja->last_idx = ja->discard_idx = (i + 1) %
+- ca->sb.njournal_buckets;
++ for (i = 0; i < ca->sb.njournal_buckets; i++)
++ if (ja->seq[i] > seq) {
++ seq = ja->seq[i];
++ /*
++ * When journal_reclaim() goes to allocate for
++ * the first time, it'll use the bucket after
++ * ja->cur_idx
++ */
++ ja->cur_idx = i;
++ ja->last_idx = ja->discard_idx = (i + 1) %
++ ca->sb.njournal_buckets;
+
+- }
+- }
++ }
+
++out:
+ if (!list_empty(list))
+ c->journal.seq = list_entry(list->prev,
+ struct journal_replay,
+@@ -342,12 +339,10 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
+
+ static bool is_discard_enabled(struct cache_set *s)
+ {
+- struct cache *ca;
+- unsigned int i;
++ struct cache *ca = s->cache;
+
+- for_each_cache(ca, s, i)
+- if (ca->discard)
+- return true;
++ if (ca->discard)
++ return true;
+
+ return false;
+ }
+@@ -633,9 +628,10 @@ static void do_journal_discard(struct cache *ca)
+ static void journal_reclaim(struct cache_set *c)
+ {
+ struct bkey *k = &c->journal.key;
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ uint64_t last_seq;
+- unsigned int iter, n = 0;
++ unsigned int next;
++ struct journal_device *ja = &ca->journal;
+ atomic_t p __maybe_unused;
+
+ atomic_long_inc(&c->reclaim);
+@@ -647,46 +643,31 @@ static void journal_reclaim(struct cache_set *c)
+
+ /* Update last_idx */
+
+- for_each_cache(ca, c, iter) {
+- struct journal_device *ja = &ca->journal;
+-
+- while (ja->last_idx != ja->cur_idx &&
+- ja->seq[ja->last_idx] < last_seq)
+- ja->last_idx = (ja->last_idx + 1) %
+- ca->sb.njournal_buckets;
+- }
++ while (ja->last_idx != ja->cur_idx &&
++ ja->seq[ja->last_idx] < last_seq)
++ ja->last_idx = (ja->last_idx + 1) %
++ ca->sb.njournal_buckets;
+
+- for_each_cache(ca, c, iter)
+- do_journal_discard(ca);
++ do_journal_discard(ca);
+
+ if (c->journal.blocks_free)
+ goto out;
+
+- /*
+- * Allocate:
+- * XXX: Sort by free journal space
+- */
+-
+- for_each_cache(ca, c, iter) {
+- struct journal_device *ja = &ca->journal;
+- unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
++ next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
++ /* No space available on this device */
++ if (next == ja->discard_idx)
++ goto out;
+
+- /* No space available on this device */
+- if (next == ja->discard_idx)
+- continue;
++ ja->cur_idx = next;
++ k->ptr[0] = MAKE_PTR(0,
++ bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
++ ca->sb.nr_this_dev);
++ atomic_long_inc(&c->reclaimed_journal_buckets);
+
+- ja->cur_idx = next;
+- k->ptr[n++] = MAKE_PTR(0,
+- bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
+- ca->sb.nr_this_dev);
+- atomic_long_inc(&c->reclaimed_journal_buckets);
+- }
++ bkey_init(k);
++ SET_KEY_PTRS(k, 1);
++ c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+
+- if (n) {
+- bkey_init(k);
+- SET_KEY_PTRS(k, n);
+- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+- }
+ out:
+ if (!journal_full(&c->journal))
+ __closure_wake_up(&c->journal.wait);
+@@ -750,7 +731,7 @@ static void journal_write_unlocked(struct closure *cl)
+ __releases(c->journal.lock)
+ {
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ struct journal_write *w = c->journal.cur;
+ struct bkey *k = &c->journal.key;
+ unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
+@@ -780,9 +761,7 @@ static void journal_write_unlocked(struct closure *cl)
+ bkey_copy(&w->data->btree_root, &c->root->key);
+ bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
+
+- for_each_cache(ca, c, i)
+- w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
+-
++ w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
+ w->data->magic = jset_magic(&c->sb);
+ w->data->version = BCACHE_JSET_VERSION;
+ w->data->last_seq = last_seq(&c->journal);
+diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
+index 5872d6470470..b9c3d27ec093 100644
+--- a/drivers/md/bcache/movinggc.c
++++ b/drivers/md/bcache/movinggc.c
+@@ -196,50 +196,48 @@ static unsigned int bucket_heap_top(struct cache *ca)
+
+ void bch_moving_gc(struct cache_set *c)
+ {
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ struct bucket *b;
+- unsigned int i;
++ unsigned long sectors_to_move, reserve_sectors;
+
+ if (!c->copy_gc_enabled)
+ return;
+
+ mutex_lock(&c->bucket_lock);
+
+- for_each_cache(ca, c, i) {
+- unsigned long sectors_to_move = 0;
+- unsigned long reserve_sectors = ca->sb.bucket_size *
++ sectors_to_move = 0;
++ reserve_sectors = ca->sb.bucket_size *
+ fifo_used(&ca->free[RESERVE_MOVINGGC]);
+
+- ca->heap.used = 0;
+-
+- for_each_bucket(b, ca) {
+- if (GC_MARK(b) == GC_MARK_METADATA ||
+- !GC_SECTORS_USED(b) ||
+- GC_SECTORS_USED(b) == ca->sb.bucket_size ||
+- atomic_read(&b->pin))
+- continue;
+-
+- if (!heap_full(&ca->heap)) {
+- sectors_to_move += GC_SECTORS_USED(b);
+- heap_add(&ca->heap, b, bucket_cmp);
+- } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
+- sectors_to_move -= bucket_heap_top(ca);
+- sectors_to_move += GC_SECTORS_USED(b);
+-
+- ca->heap.data[0] = b;
+- heap_sift(&ca->heap, 0, bucket_cmp);
+- }
+- }
++ ca->heap.used = 0;
++
++ for_each_bucket(b, ca) {
++ if (GC_MARK(b) == GC_MARK_METADATA ||
++ !GC_SECTORS_USED(b) ||
++ GC_SECTORS_USED(b) == ca->sb.bucket_size ||
++ atomic_read(&b->pin))
++ continue;
+
+- while (sectors_to_move > reserve_sectors) {
+- heap_pop(&ca->heap, b, bucket_cmp);
+- sectors_to_move -= GC_SECTORS_USED(b);
++ if (!heap_full(&ca->heap)) {
++ sectors_to_move += GC_SECTORS_USED(b);
++ heap_add(&ca->heap, b, bucket_cmp);
++ } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
++ sectors_to_move -= bucket_heap_top(ca);
++ sectors_to_move += GC_SECTORS_USED(b);
++
++ ca->heap.data[0] = b;
++ heap_sift(&ca->heap, 0, bucket_cmp);
+ }
++ }
+
+- while (heap_pop(&ca->heap, b, bucket_cmp))
+- SET_GC_MOVE(b, 1);
++ while (sectors_to_move > reserve_sectors) {
++ heap_pop(&ca->heap, b, bucket_cmp);
++ sectors_to_move -= GC_SECTORS_USED(b);
+ }
+
++ while (heap_pop(&ca->heap, b, bucket_cmp))
++ SET_GC_MOVE(b, 1);
++
+ mutex_unlock(&c->bucket_lock);
+
+ c->moving_gc_keys.last_scanned = ZERO_KEY;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 718515644c89..4f5fbdc4404f 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -343,8 +343,9 @@ static void bcache_write_super_unlock(struct closure *cl)
+ void bcache_write_super(struct cache_set *c)
+ {
+ struct closure *cl = &c->sb_write;
+- struct cache *ca;
+- unsigned int i, version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
++ struct cache *ca = c->cache;
++ struct bio *bio = &ca->sb_bio;
++ unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
+
+ down(&c->sb_write_mutex);
+ closure_init(cl, &c->cl);
+@@ -354,23 +355,19 @@ void bcache_write_super(struct cache_set *c)
+ if (c->sb.version > version)
+ version = c->sb.version;
+
+- for_each_cache(ca, c, i) {
+- struct bio *bio = &ca->sb_bio;
+-
+- ca->sb.version = version;
+- ca->sb.seq = c->sb.seq;
+- ca->sb.last_mount = c->sb.last_mount;
++ ca->sb.version = version;
++ ca->sb.seq = c->sb.seq;
++ ca->sb.last_mount = c->sb.last_mount;
+
+- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
++ SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
+
+- bio_init(bio, ca->sb_bv, 1);
+- bio_set_dev(bio, ca->bdev);
+- bio->bi_end_io = write_super_endio;
+- bio->bi_private = ca;
++ bio_init(bio, ca->sb_bv, 1);
++ bio_set_dev(bio, ca->bdev);
++ bio->bi_end_io = write_super_endio;
++ bio->bi_private = ca;
+
+- closure_get(cl);
+- __write_super(&ca->sb, ca->sb_disk, bio);
+- }
++ closure_get(cl);
++ __write_super(&ca->sb, ca->sb_disk, bio);
+
+ closure_return_with_destructor(cl, bcache_write_super_unlock);
+ }
+@@ -772,26 +769,22 @@ static void bcache_device_unlink(struct bcache_device *d)
+ lockdep_assert_held(&bch_register_lock);
+
+ if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
+- unsigned int i;
+- struct cache *ca;
++ struct cache *ca = d->c->cache;
+
+ sysfs_remove_link(&d->c->kobj, d->name);
+ sysfs_remove_link(&d->kobj, "cache");
+
+- for_each_cache(ca, d->c, i)
+- bd_unlink_disk_holder(ca->bdev, d->disk);
++ bd_unlink_disk_holder(ca->bdev, d->disk);
+ }
+ }
+
+ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
+ const char *name)
+ {
+- unsigned int i;
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ int ret;
+
+- for_each_cache(ca, d->c, i)
+- bd_link_disk_holder(ca->bdev, d->disk);
++ bd_link_disk_holder(ca->bdev, d->disk);
+
+ snprintf(d->name, BCACHEDEVNAME_SIZE,
+ "%s%u", name, d->id);
+@@ -1663,7 +1656,6 @@ static void cache_set_free(struct closure *cl)
+ {
+ struct cache_set *c = container_of(cl, struct cache_set, cl);
+ struct cache *ca;
+- unsigned int i;
+
+ debugfs_remove(c->debug);
+
+@@ -1672,12 +1664,12 @@ static void cache_set_free(struct closure *cl)
+ bch_journal_free(c);
+
+ mutex_lock(&bch_register_lock);
+- for_each_cache(ca, c, i)
+- if (ca) {
+- ca->set = NULL;
+- c->cache = NULL;
+- kobject_put(&ca->kobj);
+- }
++ ca = c->cache;
++ if (ca) {
++ ca->set = NULL;
++ c->cache = NULL;
++ kobject_put(&ca->kobj);
++ }
+
+ bch_bset_sort_state_free(&c->sort);
+ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
+@@ -1703,9 +1695,8 @@ static void cache_set_free(struct closure *cl)
+ static void cache_set_flush(struct closure *cl)
+ {
+ struct cache_set *c = container_of(cl, struct cache_set, caching);
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ struct btree *b;
+- unsigned int i;
+
+ bch_cache_accounting_destroy(&c->accounting);
+
+@@ -1730,9 +1721,8 @@ static void cache_set_flush(struct closure *cl)
+ mutex_unlock(&b->write_lock);
+ }
+
+- for_each_cache(ca, c, i)
+- if (ca->alloc_thread)
+- kthread_stop(ca->alloc_thread);
++ if (ca->alloc_thread)
++ kthread_stop(ca->alloc_thread);
+
+ if (c->journal.cur) {
+ cancel_delayed_work_sync(&c->journal.work);
+@@ -1973,16 +1963,14 @@ static int run_cache_set(struct cache_set *c)
+ {
+ const char *err = "cannot allocate memory";
+ struct cached_dev *dc, *t;
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ struct closure cl;
+- unsigned int i;
+ LIST_HEAD(journal);
+ struct journal_replay *l;
+
+ closure_init_stack(&cl);
+
+- for_each_cache(ca, c, i)
+- c->nbuckets += ca->sb.nbuckets;
++ c->nbuckets = ca->sb.nbuckets;
+ set_gc_sectors(c);
+
+ if (CACHE_SYNC(&c->sb)) {
+@@ -2002,10 +1990,8 @@ static int run_cache_set(struct cache_set *c)
+ j = &list_entry(journal.prev, struct journal_replay, list)->j;
+
+ err = "IO error reading priorities";
+- for_each_cache(ca, c, i) {
+- if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
+- goto err;
+- }
++ if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
++ goto err;
+
+ /*
+ * If prio_read() fails it'll call cache_set_error and we'll
+@@ -2049,9 +2035,8 @@ static int run_cache_set(struct cache_set *c)
+ bch_journal_next(&c->journal);
+
+ err = "error starting allocator thread";
+- for_each_cache(ca, c, i)
+- if (bch_cache_allocator_start(ca))
+- goto err;
++ if (bch_cache_allocator_start(ca))
++ goto err;
+
+ /*
+ * First place it's safe to allocate: btree_check() and
+@@ -2070,28 +2055,23 @@ static int run_cache_set(struct cache_set *c)
+ if (bch_journal_replay(c, &journal))
+ goto err;
+ } else {
+- pr_notice("invalidating existing data\n");
+-
+- for_each_cache(ca, c, i) {
+- unsigned int j;
++ unsigned int j;
+
+- ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
+- 2, SB_JOURNAL_BUCKETS);
++ pr_notice("invalidating existing data\n");
++ ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
++ 2, SB_JOURNAL_BUCKETS);
+
+- for (j = 0; j < ca->sb.keys; j++)
+- ca->sb.d[j] = ca->sb.first_bucket + j;
+- }
++ for (j = 0; j < ca->sb.keys; j++)
++ ca->sb.d[j] = ca->sb.first_bucket + j;
+
+ bch_initial_gc_finish(c);
+
+ err = "error starting allocator thread";
+- for_each_cache(ca, c, i)
+- if (bch_cache_allocator_start(ca))
+- goto err;
++ if (bch_cache_allocator_start(ca))
++ goto err;
+
+ mutex_lock(&c->bucket_lock);
+- for_each_cache(ca, c, i)
+- bch_prio_write(ca, true);
++ bch_prio_write(ca, true);
+ mutex_unlock(&c->bucket_lock);
+
+ err = "cannot allocate new UUID bucket";
+@@ -2466,13 +2446,14 @@ static bool bch_is_open_backing(struct block_device *bdev)
+ static bool bch_is_open_cache(struct block_device *bdev)
+ {
+ struct cache_set *c, *tc;
+- struct cache *ca;
+- unsigned int i;
+
+- list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+- for_each_cache(ca, c, i)
+- if (ca->bdev == bdev)
+- return true;
++ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
++ struct cache *ca = c->cache;
++
++ if (ca->bdev == bdev)
++ return true;
++ }
++
+ return false;
+ }
+
+--
+2.26.2
+
diff --git a/for-next/0007-bcache-add-set_uuid-in-struct-cache_set.patch b/for-next/0007-bcache-add-set_uuid-in-struct-cache_set.patch
new file mode 100644
index 0000000..d0b01c9
--- /dev/null
+++ b/for-next/0007-bcache-add-set_uuid-in-struct-cache_set.patch
@@ -0,0 +1,173 @@
+From 911b03149e3f4b3389426f7fe4aea6d27604f795 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 20:12:07 +0800
+Subject: [PATCH 07/15] bcache: add set_uuid in struct cache_set
+
+This patch adds a separated set_uuid[16] in struct cache_set, to store
+the uuid of the cache set. This is the preparation to remove the
+embedded struct cache_sb from struct cache_set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/bcache.h | 1 +
+ drivers/md/bcache/debug.c | 2 +-
+ drivers/md/bcache/super.c | 24 ++++++++++++------------
+ include/trace/events/bcache.h | 4 ++--
+ 4 files changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 7ffe6b2d179b..94a62acac4fc 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -668,6 +668,7 @@ struct cache_set {
+ struct mutex verify_lock;
+ #endif
+
++ uint8_t set_uuid[16];
+ unsigned int nr_uuids;
+ struct uuid_entry *uuids;
+ BKEY_PADDED(uuid_bucket);
+diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
+index 336f43910383..0ccc1b0baa42 100644
+--- a/drivers/md/bcache/debug.c
++++ b/drivers/md/bcache/debug.c
+@@ -238,7 +238,7 @@ void bch_debug_init_cache_set(struct cache_set *c)
+ if (!IS_ERR_OR_NULL(bcache_debug)) {
+ char name[50];
+
+- snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
++ snprintf(name, 50, "bcache-%pU", c->set_uuid);
+ c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
+ &cache_set_debug_ops);
+ }
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 4f5fbdc4404f..a08e353f7ad6 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1189,8 +1189,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ struct cached_dev *exist_dc, *t;
+ int ret = 0;
+
+- if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
+- (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
++ if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) ||
++ (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16)))
+ return -ENOENT;
+
+ if (dc->disk.c) {
+@@ -1262,7 +1262,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ u->first_reg = u->last_reg = rtime;
+ bch_uuid_write(c);
+
+- memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
++ memcpy(dc->sb.set_uuid, c->set_uuid, 16);
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+
+ bch_write_bdev_super(dc, &cl);
+@@ -1324,7 +1324,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ pr_info("Caching %s as %s on set %pU\n",
+ dc->backing_dev_name,
+ dc->disk.disk->disk_name,
+- dc->disk.c->sb.set_uuid);
++ dc->disk.c->set_uuid);
+ return 0;
+ }
+
+@@ -1632,7 +1632,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
+ vaf.va = &args;
+
+ pr_err("error on %pU: %pV, disabling caching\n",
+- c->sb.set_uuid, &vaf);
++ c->set_uuid, &vaf);
+
+ va_end(args);
+
+@@ -1685,7 +1685,7 @@ static void cache_set_free(struct closure *cl)
+ list_del(&c->list);
+ mutex_unlock(&bch_register_lock);
+
+- pr_info("Cache set %pU unregistered\n", c->sb.set_uuid);
++ pr_info("Cache set %pU unregistered\n", c->set_uuid);
+ wake_up(&unregister_wait);
+
+ closure_debug_destroy(&c->cl);
+@@ -1755,7 +1755,7 @@ static void conditional_stop_bcache_device(struct cache_set *c,
+ {
+ if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
+ pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
+- d->disk->disk_name, c->sb.set_uuid);
++ d->disk->disk_name, c->set_uuid);
+ bcache_device_stop(d);
+ } else if (atomic_read(&dc->has_dirty)) {
+ /*
+@@ -1862,7 +1862,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+
+ bch_cache_accounting_init(&c->accounting, &c->cl);
+
+- memcpy(c->sb.set_uuid, sb->set_uuid, 16);
++ memcpy(c->set_uuid, sb->set_uuid, 16);
+ c->sb.block_size = sb->block_size;
+ c->sb.bucket_size = sb->bucket_size;
+ c->sb.nr_in_set = sb->nr_in_set;
+@@ -2145,7 +2145,7 @@ static const char *register_cache_set(struct cache *ca)
+ struct cache_set *c;
+
+ list_for_each_entry(c, &bch_cache_sets, list)
+- if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
++ if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
+ if (c->cache)
+ return "duplicate cache set member";
+
+@@ -2163,7 +2163,7 @@ static const char *register_cache_set(struct cache *ca)
+ return err;
+
+ err = "error creating kobject";
+- if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
++ if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) ||
+ kobject_add(&c->internal, &c->kobj, "internal"))
+ goto err;
+
+@@ -2188,7 +2188,7 @@ static const char *register_cache_set(struct cache *ca)
+ */
+ if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
+ c->sb.version = ca->sb.version;
+- memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
++ memcpy(c->set_uuid, ca->sb.set_uuid, 16);
+ c->sb.flags = ca->sb.flags;
+ c->sb.seq = ca->sb.seq;
+ pr_debug("set version = %llu\n", c->sb.version);
+@@ -2703,7 +2703,7 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
+ list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
+ char *pdev_set_uuid = pdev->dc->sb.set_uuid;
+- char *set_uuid = c->sb.uuid;
++ char *set_uuid = c->set_uuid;
+
+ if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
+ list_del(&pdev->list);
+diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
+index 0bddea663b3b..e41c611d6d3b 100644
+--- a/include/trace/events/bcache.h
++++ b/include/trace/events/bcache.h
+@@ -164,7 +164,7 @@ TRACE_EVENT(bcache_write,
+ ),
+
+ TP_fast_assign(
+- memcpy(__entry->uuid, c->sb.set_uuid, 16);
++ memcpy(__entry->uuid, c->set_uuid, 16);
+ __entry->inode = inode;
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->nr_sector = bio->bi_iter.bi_size >> 9;
+@@ -200,7 +200,7 @@ DECLARE_EVENT_CLASS(cache_set,
+ ),
+
+ TP_fast_assign(
+- memcpy(__entry->uuid, c->sb.set_uuid, 16);
++ memcpy(__entry->uuid, c->set_uuid, 16);
+ ),
+
+ TP_printk("%pU", __entry->uuid)
+--
+2.26.2
+
diff --git a/for-next/0008-bcache-only-use-block_bytes-on-struct-cache.patch b/for-next/0008-bcache-only-use-block_bytes-on-struct-cache.patch
new file mode 100644
index 0000000..ee9d2a5
--- /dev/null
+++ b/for-next/0008-bcache-only-use-block_bytes-on-struct-cache.patch
@@ -0,0 +1,258 @@
+From 1d6375bf8fb7bc3ea966175f010fc88139306b7c Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 21:25:58 +0800
+Subject: [PATCH 08/15] bcache: only use block_bytes() on struct cache
+
+Because struct cache_set and struct cache both have struct cache_sb,
+therefore macro block_bytes() can be used on both of them. When removing
+the embedded struct cache_sb from struct cache_set, this macro won't be
+used on struct cache_set anymore.
+
+This patch unifies all block_bytes() usage only on struct cache, this is
+one of the preparation to remove the embedded struct cache_sb from
+struct cache_set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/bcache.h | 2 +-
+ drivers/md/bcache/btree.c | 24 ++++++++++++------------
+ drivers/md/bcache/debug.c | 8 ++++----
+ drivers/md/bcache/journal.c | 8 ++++----
+ drivers/md/bcache/request.c | 2 +-
+ drivers/md/bcache/super.c | 2 +-
+ drivers/md/bcache/sysfs.c | 2 +-
+ 7 files changed, 24 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 94a62acac4fc..29bec61cafbb 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -759,7 +759,7 @@ struct bbio {
+
+ #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
+ #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
+-#define block_bytes(c) ((c)->sb.block_size << 9)
++#define block_bytes(ca) ((ca)->sb.block_size << 9)
+
+ static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
+ {
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index f626d536981e..1f3d4870a8b2 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -104,7 +104,7 @@
+
+ static inline struct bset *write_block(struct btree *b)
+ {
+- return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
++ return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
+ }
+
+ static void bch_btree_init_next(struct btree *b)
+@@ -173,7 +173,7 @@ void bch_btree_node_read_done(struct btree *b)
+ goto err;
+
+ err = "bad btree header";
+- if (b->written + set_blocks(i, block_bytes(b->c)) >
++ if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
+ btree_blocks(b))
+ goto err;
+
+@@ -199,13 +199,13 @@ void bch_btree_node_read_done(struct btree *b)
+
+ bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
+
+- b->written += set_blocks(i, block_bytes(b->c));
++ b->written += set_blocks(i, block_bytes(b->c->cache));
+ }
+
+ err = "corrupted btree";
+ for (i = write_block(b);
+ bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
+- i = ((void *) i) + block_bytes(b->c))
++ i = ((void *) i) + block_bytes(b->c->cache))
+ if (i->seq == b->keys.set[0].data->seq)
+ goto err;
+
+@@ -347,7 +347,7 @@ static void do_btree_node_write(struct btree *b)
+
+ b->bio->bi_end_io = btree_node_write_endio;
+ b->bio->bi_private = cl;
+- b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
++ b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
+ b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
+ bch_bio_map(b->bio, i);
+
+@@ -423,10 +423,10 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
+
+ do_btree_node_write(b);
+
+- atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
++ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
+ &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
+
+- b->written += set_blocks(i, block_bytes(b->c));
++ b->written += set_blocks(i, block_bytes(b->c->cache));
+ }
+
+ void bch_btree_node_write(struct btree *b, struct closure *parent)
+@@ -1344,7 +1344,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+
+ if (nodes < 2 ||
+ __set_blocks(b->keys.set[0].data, keys,
+- block_bytes(b->c)) > blocks * (nodes - 1))
++ block_bytes(b->c->cache)) > blocks * (nodes - 1))
+ return 0;
+
+ for (i = 0; i < nodes; i++) {
+@@ -1378,7 +1378,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ k = bkey_next(k)) {
+ if (__set_blocks(n1, n1->keys + keys +
+ bkey_u64s(k),
+- block_bytes(b->c)) > blocks)
++ block_bytes(b->c->cache)) > blocks)
+ break;
+
+ last = k;
+@@ -1394,7 +1394,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ * though)
+ */
+ if (__set_blocks(n1, n1->keys + n2->keys,
+- block_bytes(b->c)) >
++ block_bytes(b->c->cache)) >
+ btree_blocks(new_nodes[i]))
+ goto out_unlock_nocoalesce;
+
+@@ -1403,7 +1403,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ last = &r->b->key;
+ }
+
+- BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
++ BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
+ btree_blocks(new_nodes[i]));
+
+ if (last)
+@@ -2210,7 +2210,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
+ goto err;
+
+ split = set_blocks(btree_bset_first(n1),
+- block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
++ block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
+
+ if (split) {
+ unsigned int keys = 0;
+diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
+index 0ccc1b0baa42..b00fd08d696b 100644
+--- a/drivers/md/bcache/debug.c
++++ b/drivers/md/bcache/debug.c
+@@ -25,8 +25,8 @@ struct dentry *bcache_debug;
+ for (i = (start); \
+ (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
+ i->seq == (start)->seq; \
+- i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
+- block_bytes(b->c))
++ i = (void *) i + set_blocks(i, block_bytes(b->c->cache)) * \
++ block_bytes(b->c->cache))
+
+ void bch_btree_verify(struct btree *b)
+ {
+@@ -82,14 +82,14 @@ void bch_btree_verify(struct btree *b)
+
+ for_each_written_bset(b, ondisk, i) {
+ unsigned int block = ((void *) i - (void *) ondisk) /
+- block_bytes(b->c);
++ block_bytes(b->c->cache);
+
+ pr_err("*** on disk block %u:\n", block);
+ bch_dump_bset(&b->keys, i, block);
+ }
+
+ pr_err("*** block %zu not written\n",
+- ((void *) i - (void *) ondisk) / block_bytes(b->c));
++ ((void *) i - (void *) ondisk) / block_bytes(b->c->cache));
+
+ for (j = 0; j < inmemory->keys; j++)
+ if (inmemory->d[j] != sorted->d[j])
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index e89ae7c4ba97..ab0d06e4851d 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -98,7 +98,7 @@ reread: left = ca->sb.bucket_size - offset;
+ return ret;
+ }
+
+- blocks = set_blocks(j, block_bytes(ca->set));
++ blocks = set_blocks(j, block_bytes(ca));
+
+ /*
+ * Nodes in 'list' are in linear increasing order of
+@@ -734,7 +734,7 @@ static void journal_write_unlocked(struct closure *cl)
+ struct cache *ca = c->cache;
+ struct journal_write *w = c->journal.cur;
+ struct bkey *k = &c->journal.key;
+- unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
++ unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
+ c->sb.block_size;
+
+ struct bio *bio;
+@@ -754,7 +754,7 @@ static void journal_write_unlocked(struct closure *cl)
+ return;
+ }
+
+- c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
++ c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
+
+ w->data->btree_level = c->root->level;
+
+@@ -847,7 +847,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
+ struct journal_write *w = c->journal.cur;
+
+ sectors = __set_blocks(w->data, w->data->keys + nkeys,
+- block_bytes(c)) * c->sb.block_size;
++ block_bytes(c->cache)) * c->sb.block_size;
+
+ if (sectors <= min_t(size_t,
+ c->journal.blocks_free * c->sb.block_size,
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index c7cadaafa947..02408fdbf5bb 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -99,7 +99,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
+ * bch_data_insert_keys() will insert the keys created so far
+ * and finish the rest when the keylist is empty.
+ */
+- if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
++ if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
+ return -ENOMEM;
+
+ return __bch_keylist_realloc(l, u64s);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index a08e353f7ad6..26f0c37c126e 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1528,7 +1528,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
+
+ kobject_init(&d->kobj, &bch_flash_dev_ktype);
+
+- if (bcache_device_init(d, block_bytes(c), u->sectors,
++ if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
+ NULL, &bcache_flash_ops))
+ goto err;
+
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index ac06c0bc3c0a..b9f524ab5cc8 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -714,7 +714,7 @@ SHOW(__bch_cache_set)
+ sysfs_print(synchronous, CACHE_SYNC(&c->sb));
+ sysfs_print(journal_delay_ms, c->journal_delay_ms);
+ sysfs_hprint(bucket_size, bucket_bytes(c));
+- sysfs_hprint(block_size, block_bytes(c));
++ sysfs_hprint(block_size, block_bytes(c->cache));
+ sysfs_print(tree_depth, c->root->level);
+ sysfs_print(root_usage_percent, bch_root_usage(c));
+
+--
+2.26.2
+
diff --git a/for-next/0009-bcache-remove-useless-alloc_bucket_pages.patch b/for-next/0009-bcache-remove-useless-alloc_bucket_pages.patch
new file mode 100644
index 0000000..bff209f
--- /dev/null
+++ b/for-next/0009-bcache-remove-useless-alloc_bucket_pages.patch
@@ -0,0 +1,30 @@
+From 82ec86664c50fe223b9245d23be4e62e04246bf5 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 21:28:23 +0800
+Subject: [PATCH 09/15] bcache: remove useless alloc_bucket_pages()
+
+Now no one uses alloc_bucket_pages() anymore, remove it from bcache.h.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/super.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 26f0c37c126e..35f6b57cc743 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1832,9 +1832,6 @@ void bch_cache_set_unregister(struct cache_set *c)
+ bch_cache_set_stop(c);
+ }
+
+-#define alloc_bucket_pages(gfp, c) \
+- ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
+-
+ #define alloc_meta_bucket_pages(gfp, sb) \
+ ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
+
+--
+2.26.2
+
diff --git a/for-next/0010-bcache-remove-useless-bucket_pages.patch b/for-next/0010-bcache-remove-useless-bucket_pages.patch
new file mode 100644
index 0000000..94af975
--- /dev/null
+++ b/for-next/0010-bcache-remove-useless-bucket_pages.patch
@@ -0,0 +1,30 @@
+From dca6956abe8661465555fe765df37a4a41eb2afe Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 21:15:28 +0800
+Subject: [PATCH 10/15] bcache: remove useless bucket_pages()
+
+It seems alloc_bucket_pages() is the only user of bucket_pages().
+Considering alloc_bucket_pages() is removed from bcache code, it is safe
+to remove the useless macro bucket_pages() now.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/bcache.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 29bec61cafbb..48a2585b6bbb 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -757,7 +757,6 @@ struct bbio {
+ #define btree_default_blocks(c) \
+ ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
+
+-#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
+ #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
+ #define block_bytes(ca) ((ca)->sb.block_size << 9)
+
+--
+2.26.2
+
diff --git a/for-next/0011-bcache-only-use-bucket_bytes-on-struct-cache.patch b/for-next/0011-bcache-only-use-bucket_bytes-on-struct-cache.patch
new file mode 100644
index 0000000..220ff59
--- /dev/null
+++ b/for-next/0011-bcache-only-use-bucket_bytes-on-struct-cache.patch
@@ -0,0 +1,50 @@
+From 73f58aab8bb461bea4c23cb16bfe2898910211fc Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 21:20:48 +0800
+Subject: [PATCH 11/15] bcache: only use bucket_bytes() on struct cache
+
+Because struct cache_set and struct cache both have struct cache_sb,
+macro bucket_bytes() currently are used on both of them. When removing
+the embedded struct cache_sb from struct cache_set, this macro won't be
+used on struct cache_set anymore.
+
+This patch unifies all bucket_bytes() usage only on struct cache, this is
+one of the preparation to remove the embedded struct cache_sb from
+struct cache_set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/bcache.h | 2 +-
+ drivers/md/bcache/sysfs.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 48a2585b6bbb..94d4baf4c405 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -757,7 +757,7 @@ struct bbio {
+ #define btree_default_blocks(c) \
+ ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
+
+-#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
++#define bucket_bytes(ca) ((ca)->sb.bucket_size << 9)
+ #define block_bytes(ca) ((ca)->sb.block_size << 9)
+
+ static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index b9f524ab5cc8..4bfe98faadcc 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -713,7 +713,7 @@ SHOW(__bch_cache_set)
+
+ sysfs_print(synchronous, CACHE_SYNC(&c->sb));
+ sysfs_print(journal_delay_ms, c->journal_delay_ms);
+- sysfs_hprint(bucket_size, bucket_bytes(c));
++ sysfs_hprint(bucket_size, bucket_bytes(c->cache));
+ sysfs_hprint(block_size, block_bytes(c->cache));
+ sysfs_print(tree_depth, c->root->level);
+ sysfs_print(root_usage_percent, bch_root_usage(c));
+--
+2.26.2
+
diff --git a/for-next/0012-bcache-don-t-check-seq-numbers-in-register_cache_set.patch b/for-next/0012-bcache-don-t-check-seq-numbers-in-register_cache_set.patch
new file mode 100644
index 0000000..0b5cd9e
--- /dev/null
+++ b/for-next/0012-bcache-don-t-check-seq-numbers-in-register_cache_set.patch
@@ -0,0 +1,52 @@
+From 25fd83f33059d1ceb76a3e743bb0c681687a9f96 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 23:28:26 +0800
+Subject: [PATCH 12/15] bcache: don't check seq numbers in register_cache_set()
+
+In order to update the partial super block of cache set, the seq numbers
+of cache and cache set are checked in register_cache_set(). If cache's
+seq number is larger than cache set's seq number, cache set must update
+its partial super block from cache's super block. It is unncessary when
+the embedded struct cache_sb is removed from struct cache set.
+
+This patch removed the seq numbers checking from register_cache_set(),
+because later there will be no such partial super block in struct cache
+set, the cache set will directly reference in-memory super block from
+struct cache. This is a preparation patch for removing embedded struct
+cache_sb from struct cache_set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/super.c | 15 ---------------
+ 1 file changed, 15 deletions(-)
+
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 35f6b57cc743..74eb1886eaf3 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -2176,21 +2176,6 @@ static const char *register_cache_set(struct cache *ca)
+ sysfs_create_link(&c->kobj, &ca->kobj, buf))
+ goto err;
+
+- /*
+- * A special case is both ca->sb.seq and c->sb.seq are 0,
+- * such condition happens on a new created cache device whose
+- * super block is never flushed yet. In this case c->sb.version
+- * and other members should be updated too, otherwise we will
+- * have a mistaken super block version in cache set.
+- */
+- if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
+- c->sb.version = ca->sb.version;
+- memcpy(c->set_uuid, ca->sb.set_uuid, 16);
+- c->sb.flags = ca->sb.flags;
+- c->sb.seq = ca->sb.seq;
+- pr_debug("set version = %llu\n", c->sb.version);
+- }
+-
+ kobject_get(&ca->kobj);
+ ca->set = c;
+ ca->set->cache = ca;
+--
+2.26.2
+
diff --git a/for-next/0013-bcache-remove-can_attach_cache.patch b/for-next/0013-bcache-remove-can_attach_cache.patch
new file mode 100644
index 0000000..b339d94
--- /dev/null
+++ b/for-next/0013-bcache-remove-can_attach_cache.patch
@@ -0,0 +1,50 @@
+From f4c9cef0ff29dea29cf51dafdfc3494069179096 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 23:36:56 +0800
+Subject: [PATCH 13/15] bcache: remove can_attach_cache()
+
+After removing the embedded struct cache_sb from struct cache_set, cache
+set will directly reference the in-memory super block of struct cache.
+It is unnecessary to compare block_size, bucket_size and nr_in_set from
+the identical in-memory super block in can_attach_cache().
+
+This is a preparation patch for latter removing cache_set->sb from
+struct cache_set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/super.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 74eb1886eaf3..78f99f75fa28 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -2128,13 +2128,6 @@ static int run_cache_set(struct cache_set *c)
+ return -EIO;
+ }
+
+-static bool can_attach_cache(struct cache *ca, struct cache_set *c)
+-{
+- return ca->sb.block_size == c->sb.block_size &&
+- ca->sb.bucket_size == c->sb.bucket_size &&
+- ca->sb.nr_in_set == c->sb.nr_in_set;
+-}
+-
+ static const char *register_cache_set(struct cache *ca)
+ {
+ char buf[12];
+@@ -2146,9 +2139,6 @@ static const char *register_cache_set(struct cache *ca)
+ if (c->cache)
+ return "duplicate cache set member";
+
+- if (!can_attach_cache(ca, c))
+- return "cache sb does not match set";
+-
+ if (!CACHE_SYNC(&ca->sb))
+ SET_CACHE_SYNC(&c->sb, false);
+
+--
+2.26.2
+
diff --git a/for-next/0014-bcache-check-and-set-sync-status-on-cache-s-in-memor.patch b/for-next/0014-bcache-check-and-set-sync-status-on-cache-s-in-memor.patch
new file mode 100644
index 0000000..b488067
--- /dev/null
+++ b/for-next/0014-bcache-check-and-set-sync-status-on-cache-s-in-memor.patch
@@ -0,0 +1,110 @@
+From fef2bb922757ef7855b5354f6e7e08cecf7315d5 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 23:53:52 +0800
+Subject: [PATCH 14/15] bcache: check and set sync status on cache's in-memory
+ super block
+
+Currently the cache's sync status is checked and set on cache set's in-
+memory partial super block. After removing the embedded struct cache_sb
+from cache set and reference cache's in-memory super block from struct
+cache_set, the sync status can set and check directly on cache's super
+block.
+
+This patch checks and sets the cache sync status directly on cache's
+in-memory super block. This is a preparation for later removing embedded
+struct cache_sb from struct cache_set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/alloc.c | 2 +-
+ drivers/md/bcache/journal.c | 2 +-
+ drivers/md/bcache/super.c | 7 ++-----
+ drivers/md/bcache/sysfs.c | 6 +++---
+ 4 files changed, 7 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 1b8310992dd0..65fdbdeb5134 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -361,7 +361,7 @@ static int bch_allocator_thread(void *arg)
+ * new stuff to them:
+ */
+ allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
+- if (CACHE_SYNC(&ca->set->sb)) {
++ if (CACHE_SYNC(&ca->sb)) {
+ /*
+ * This could deadlock if an allocation with a btree
+ * node locked ever blocked - having the btree node
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index ab0d06e4851d..cd46f33db507 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -915,7 +915,7 @@ atomic_t *bch_journal(struct cache_set *c,
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return NULL;
+
+- if (!CACHE_SYNC(&c->sb))
++ if (!CACHE_SYNC(&c->cache->sb))
+ return NULL;
+
+ w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 78f99f75fa28..f400e84cb653 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1970,7 +1970,7 @@ static int run_cache_set(struct cache_set *c)
+ c->nbuckets = ca->sb.nbuckets;
+ set_gc_sectors(c);
+
+- if (CACHE_SYNC(&c->sb)) {
++ if (CACHE_SYNC(&c->cache->sb)) {
+ struct bkey *k;
+ struct jset *j;
+
+@@ -2093,7 +2093,7 @@ static int run_cache_set(struct cache_set *c)
+ * everything is set up - fortunately journal entries won't be
+ * written until the SET_CACHE_SYNC() here:
+ */
+- SET_CACHE_SYNC(&c->sb, true);
++ SET_CACHE_SYNC(&c->cache->sb, true);
+
+ bch_journal_next(&c->journal);
+ bch_journal_meta(c, &cl);
+@@ -2139,9 +2139,6 @@ static const char *register_cache_set(struct cache *ca)
+ if (c->cache)
+ return "duplicate cache set member";
+
+- if (!CACHE_SYNC(&ca->sb))
+- SET_CACHE_SYNC(&c->sb, false);
+-
+ goto found;
+ }
+
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index 4bfe98faadcc..554e3afc9b68 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -711,7 +711,7 @@ SHOW(__bch_cache_set)
+ {
+ struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+
+- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
++ sysfs_print(synchronous, CACHE_SYNC(&c->cache->sb));
+ sysfs_print(journal_delay_ms, c->journal_delay_ms);
+ sysfs_hprint(bucket_size, bucket_bytes(c->cache));
+ sysfs_hprint(block_size, block_bytes(c->cache));
+@@ -812,8 +812,8 @@ STORE(__bch_cache_set)
+ if (attr == &sysfs_synchronous) {
+ bool sync = strtoul_or_return(buf);
+
+- if (sync != CACHE_SYNC(&c->sb)) {
+- SET_CACHE_SYNC(&c->sb, sync);
++ if (sync != CACHE_SYNC(&c->cache->sb)) {
++ SET_CACHE_SYNC(&c->cache->sb, sync);
+ bcache_write_super(c);
+ }
+ }
+--
+2.26.2
+
diff --git a/for-next/0015-bcache-remove-embedded-struct-cache_sb-from-struct-c.patch b/for-next/0015-bcache-remove-embedded-struct-cache_sb-from-struct-c.patch
new file mode 100644
index 0000000..a15cfca
--- /dev/null
+++ b/for-next/0015-bcache-remove-embedded-struct-cache_sb-from-struct-c.patch
@@ -0,0 +1,469 @@
+From e255e6273d7278e6f0bd4ff94eb0d2cc058a7df4 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Sat, 22 Aug 2020 16:11:38 +0800
+Subject: [PATCH 15/15] bcache: remove embedded struct cache_sb from struct
+ cache_set
+
+Since bcache code was merged into mainline kerrnel, each cache set only
+as one single cache in it. The multiple caches framework is here but the
+code is far from completed. Considering the multiple copies of cached
+data can also be stored on e.g. md raid1 devices, it is unnecessary to
+support multiple caches in one cache set indeed.
+
+The previous preparation patches fix the dependencies of explicitly
+making a cache set only have single cache. Now we don't have to maintain
+an embedded partial super block in struct cache_set, the in-memory super
+block can be directly referenced from struct cache.
+
+This patch removes the embedded struct cache_sb from struct cache_set,
+and fixes all locations where the superb lock was referenced from this
+removed super block by referencing the in-memory super block of struct
+cache.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/alloc.c | 6 ++---
+ drivers/md/bcache/bcache.h | 4 +--
+ drivers/md/bcache/btree.c | 17 +++++++------
+ drivers/md/bcache/btree.h | 2 +-
+ drivers/md/bcache/extents.c | 6 ++---
+ drivers/md/bcache/features.c | 4 +--
+ drivers/md/bcache/io.c | 2 +-
+ drivers/md/bcache/journal.c | 11 ++++----
+ drivers/md/bcache/request.c | 4 +--
+ drivers/md/bcache/super.c | 47 +++++++++++++----------------------
+ drivers/md/bcache/writeback.c | 2 +-
+ 11 files changed, 46 insertions(+), 59 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 65fdbdeb5134..8c371d5eef8e 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -87,7 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
+ {
+ struct cache *ca;
+ struct bucket *b;
+- unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
++ unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
+ int r;
+
+ atomic_sub(sectors, &c->rescale);
+@@ -583,7 +583,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
+ struct open_bucket, list);
+ found:
+ if (!ret->sectors_free && KEY_PTRS(alloc)) {
+- ret->sectors_free = c->sb.bucket_size;
++ ret->sectors_free = c->cache->sb.bucket_size;
+ bkey_copy(&ret->key, alloc);
+ bkey_init(alloc);
+ }
+@@ -677,7 +677,7 @@ bool bch_alloc_sectors(struct cache_set *c,
+ &PTR_CACHE(c, &b->key, i)->sectors_written);
+ }
+
+- if (b->sectors_free < c->sb.block_size)
++ if (b->sectors_free < c->cache->sb.block_size)
+ b->sectors_free = 0;
+
+ /*
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 94d4baf4c405..1d57f48307e6 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -517,8 +517,6 @@ struct cache_set {
+ atomic_t idle_counter;
+ atomic_t at_max_writeback_rate;
+
+- struct cache_sb sb;
+-
+ struct cache *cache;
+
+ struct bcache_device **devices;
+@@ -799,7 +797,7 @@ static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
+
+ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
+ {
+- return s & (c->sb.bucket_size - 1);
++ return s & (c->cache->sb.bucket_size - 1);
+ }
+
+ static inline struct cache *PTR_CACHE(struct cache_set *c,
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 1f3d4870a8b2..910df242c83d 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -117,7 +117,7 @@ static void bch_btree_init_next(struct btree *b)
+
+ if (b->written < btree_blocks(b))
+ bch_bset_init_next(&b->keys, write_block(b),
+- bset_magic(&b->c->sb));
++ bset_magic(&b->c->cache->sb));
+
+ }
+
+@@ -155,7 +155,7 @@ void bch_btree_node_read_done(struct btree *b)
+ * See the comment arount cache_set->fill_iter.
+ */
+ iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
+- iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
++ iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
+ iter->used = 0;
+
+ #ifdef CONFIG_BCACHE_DEBUG
+@@ -178,7 +178,7 @@ void bch_btree_node_read_done(struct btree *b)
+ goto err;
+
+ err = "bad magic";
+- if (i->magic != bset_magic(&b->c->sb))
++ if (i->magic != bset_magic(&b->c->cache->sb))
+ goto err;
+
+ err = "bad checksum";
+@@ -219,7 +219,7 @@ void bch_btree_node_read_done(struct btree *b)
+
+ if (b->written < btree_blocks(b))
+ bch_bset_init_next(&b->keys, write_block(b),
+- bset_magic(&b->c->sb));
++ bset_magic(&b->c->cache->sb));
+ out:
+ mempool_free(iter, &b->c->fill_iter);
+ return;
+@@ -423,7 +423,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
+
+ do_btree_node_write(b);
+
+- atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
++ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
+ &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
+
+ b->written += set_blocks(i, block_bytes(b->c->cache));
+@@ -738,7 +738,7 @@ void bch_btree_cache_free(struct cache_set *c)
+ if (c->verify_data)
+ list_move(&c->verify_data->list, &c->btree_cache);
+
+- free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->sb)));
++ free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
+ #endif
+
+ list_splice(&c->btree_cache_freeable,
+@@ -785,7 +785,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
+ mutex_init(&c->verify_lock);
+
+ c->verify_ondisk = (void *)
+- __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(meta_bucket_pages(&c->sb)));
++ __get_free_pages(GFP_KERNEL|__GFP_COMP,
++ ilog2(meta_bucket_pages(&c->cache->sb)));
+ if (!c->verify_ondisk) {
+ /*
+ * Don't worry about the mca_rereserve buckets
+@@ -1108,7 +1109,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+ }
+
+ b->parent = parent;
+- bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
++ bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
+
+ mutex_unlock(&c->bucket_lock);
+
+diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
+index 257969980c49..50482107134f 100644
+--- a/drivers/md/bcache/btree.h
++++ b/drivers/md/bcache/btree.h
+@@ -194,7 +194,7 @@ static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
+
+ static inline void set_gc_sectors(struct cache_set *c)
+ {
+- atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
++ atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
+ }
+
+ void bkey_put(struct cache_set *c, struct bkey *k);
+diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
+index 9162af5bb6ec..f4658a1f37b8 100644
+--- a/drivers/md/bcache/extents.c
++++ b/drivers/md/bcache/extents.c
+@@ -54,7 +54,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+ size_t bucket = PTR_BUCKET_NR(c, k, i);
+ size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
++ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
+ bucket < ca->sb.first_bucket ||
+ bucket >= ca->sb.nbuckets)
+ return true;
+@@ -75,7 +75,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
+ size_t bucket = PTR_BUCKET_NR(c, k, i);
+ size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+- if (KEY_SIZE(k) + r > c->sb.bucket_size)
++ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
+ return "bad, length too big";
+ if (bucket < ca->sb.first_bucket)
+ return "bad, short offset";
+@@ -136,7 +136,7 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
+ size_t n = PTR_BUCKET_NR(b->c, k, j);
+
+ pr_cont(" bucket %zu", n);
+- if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
++ if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
+ pr_cont(" prio %i",
+ PTR_BUCKET(b->c, k, j)->prio);
+ }
+diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
+index 4442df48d28c..6469223f0b77 100644
+--- a/drivers/md/bcache/features.c
++++ b/drivers/md/bcache/features.c
+@@ -30,7 +30,7 @@ static struct feature feature_list[] = {
+ for (f = &feature_list[0]; f->compat != 0; f++) { \
+ if (f->compat != BCH_FEATURE_ ## type) \
+ continue; \
+- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) { \
++ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) { \
+ if (first) { \
+ out += snprintf(out, buf + size - out, \
+ "["); \
+@@ -44,7 +44,7 @@ static struct feature feature_list[] = {
+ \
+ out += snprintf(out, buf + size - out, "%s", f->string);\
+ \
+- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) \
++ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) \
+ out += snprintf(out, buf + size - out, "]"); \
+ \
+ first = false; \
+diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
+index a14a445618b4..dad71a6b7889 100644
+--- a/drivers/md/bcache/io.c
++++ b/drivers/md/bcache/io.c
+@@ -26,7 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
+ struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
+ struct bio *bio = &b->bio;
+
+- bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->sb));
++ bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
+
+ return bio;
+ }
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index cd46f33db507..aefbdb7e003b 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -666,7 +666,7 @@ static void journal_reclaim(struct cache_set *c)
+
+ bkey_init(k);
+ SET_KEY_PTRS(k, 1);
+- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
++ c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
+
+ out:
+ if (!journal_full(&c->journal))
+@@ -735,7 +735,7 @@ static void journal_write_unlocked(struct closure *cl)
+ struct journal_write *w = c->journal.cur;
+ struct bkey *k = &c->journal.key;
+ unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
+- c->sb.block_size;
++ ca->sb.block_size;
+
+ struct bio *bio;
+ struct bio_list list;
+@@ -762,7 +762,7 @@ static void journal_write_unlocked(struct closure *cl)
+ bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
+
+ w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
+- w->data->magic = jset_magic(&c->sb);
++ w->data->magic = jset_magic(&ca->sb);
+ w->data->version = BCACHE_JSET_VERSION;
+ w->data->last_seq = last_seq(&c->journal);
+ w->data->csum = csum_set(w->data);
+@@ -838,6 +838,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
+ size_t sectors;
+ struct closure cl;
+ bool wait = false;
++ struct cache *ca = c->cache;
+
+ closure_init_stack(&cl);
+
+@@ -847,10 +848,10 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
+ struct journal_write *w = c->journal.cur;
+
+ sectors = __set_blocks(w->data, w->data->keys + nkeys,
+- block_bytes(c->cache)) * c->sb.block_size;
++ block_bytes(ca)) * ca->sb.block_size;
+
+ if (sectors <= min_t(size_t,
+- c->journal.blocks_free * c->sb.block_size,
++ c->journal.blocks_free * ca->sb.block_size,
+ PAGE_SECTORS << JSET_BITS))
+ return w;
+
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index 02408fdbf5bb..37e9cf8dbfc1 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -394,8 +394,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
+ goto skip;
+ }
+
+- if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
+- bio_sectors(bio) & (c->sb.block_size - 1)) {
++ if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
++ bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
+ pr_debug("skipping unaligned io\n");
+ goto skip;
+ }
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index f400e84cb653..bc45cdf189af 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -350,16 +350,10 @@ void bcache_write_super(struct cache_set *c)
+ down(&c->sb_write_mutex);
+ closure_init(cl, &c->cl);
+
+- c->sb.seq++;
++ ca->sb.seq++;
+
+- if (c->sb.version > version)
+- version = c->sb.version;
+-
+- ca->sb.version = version;
+- ca->sb.seq = c->sb.seq;
+- ca->sb.last_mount = c->sb.last_mount;
+-
+- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
++ if (ca->sb.version < version)
++ ca->sb.version = version;
+
+ bio_init(bio, ca->sb_bv, 1);
+ bio_set_dev(bio, ca->bdev);
+@@ -477,7 +471,7 @@ static int __uuid_write(struct cache_set *c)
+ {
+ BKEY_PADDED(key) k;
+ struct closure cl;
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ unsigned int size;
+
+ closure_init_stack(&cl);
+@@ -486,13 +480,12 @@ static int __uuid_write(struct cache_set *c)
+ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
+ return 1;
+
+- size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
++ size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
+ SET_KEY_SIZE(&k.key, size);
+ uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
+ closure_sync(&cl);
+
+ /* Only one bucket used for uuid write */
+- ca = PTR_CACHE(c, &k.key, 0);
+ atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
+
+ bkey_copy(&c->uuid_bucket, &k.key);
+@@ -1205,7 +1198,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ return -EINVAL;
+ }
+
+- if (dc->sb.block_size < c->sb.block_size) {
++ if (dc->sb.block_size < c->cache->sb.block_size) {
+ /* Will die */
+ pr_err("Couldn't attach %s: block size less than set's block size\n",
+ dc->backing_dev_name);
+@@ -1664,6 +1657,9 @@ static void cache_set_free(struct closure *cl)
+ bch_journal_free(c);
+
+ mutex_lock(&bch_register_lock);
++ bch_bset_sort_state_free(&c->sort);
++ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
++
+ ca = c->cache;
+ if (ca) {
+ ca->set = NULL;
+@@ -1671,8 +1667,6 @@ static void cache_set_free(struct closure *cl)
+ kobject_put(&ca->kobj);
+ }
+
+- bch_bset_sort_state_free(&c->sort);
+- free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
+
+ if (c->moving_gc_wq)
+ destroy_workqueue(c->moving_gc_wq);
+@@ -1838,6 +1832,7 @@ void bch_cache_set_unregister(struct cache_set *c)
+ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ {
+ int iter_size;
++ struct cache *ca = container_of(sb, struct cache, sb);
+ struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
+
+ if (!c)
+@@ -1860,23 +1855,15 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ bch_cache_accounting_init(&c->accounting, &c->cl);
+
+ memcpy(c->set_uuid, sb->set_uuid, 16);
+- c->sb.block_size = sb->block_size;
+- c->sb.bucket_size = sb->bucket_size;
+- c->sb.nr_in_set = sb->nr_in_set;
+- c->sb.last_mount = sb->last_mount;
+- c->sb.version = sb->version;
+- if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+- c->sb.feature_compat = sb->feature_compat;
+- c->sb.feature_ro_compat = sb->feature_ro_compat;
+- c->sb.feature_incompat = sb->feature_incompat;
+- }
+
++ c->cache = ca;
++ c->cache->set = c;
+ c->bucket_bits = ilog2(sb->bucket_size);
+ c->block_bits = ilog2(sb->block_size);
+- c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry);
++ c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
+ c->devices_max_used = 0;
+ atomic_set(&c->attached_dev_nr, 0);
+- c->btree_pages = meta_bucket_pages(&c->sb);
++ c->btree_pages = meta_bucket_pages(sb);
+ if (c->btree_pages > BTREE_MAX_PAGES)
+ c->btree_pages = max_t(int, c->btree_pages / 4,
+ BTREE_MAX_PAGES);
+@@ -1914,7 +1901,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+
+ if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
+ sizeof(struct bbio) +
+- sizeof(struct bio_vec) * meta_bucket_pages(&c->sb)))
++ sizeof(struct bio_vec) * meta_bucket_pages(sb)))
+ goto err;
+
+ if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
+@@ -1924,7 +1911,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
+ goto err;
+
+- c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb);
++ c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
+ if (!c->uuids)
+ goto err;
+
+@@ -2104,7 +2091,7 @@ static int run_cache_set(struct cache_set *c)
+ goto err;
+
+ closure_sync(&cl);
+- c->sb.last_mount = (u32)ktime_get_real_seconds();
++ c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
+ bcache_write_super(c);
+
+ list_for_each_entry_safe(dc, t, &uncached_devices, list)
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 4f4ad6b3d43a..3c74996978da 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -35,7 +35,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
+ * This is the size of the cache, minus the amount used for
+ * flash-only devices
+ */
+- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
++ uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
+ atomic_long_read(&c->flash_dev_dirty_sectors);
+
+ /*
+--
+2.26.2
+
diff --git a/for-next/v4-0001-docs-update-trusted-encrypted.rst.patch b/for-next/v4-0001-docs-update-trusted-encrypted.rst.patch
deleted file mode 100644
index 3c33311..0000000
--- a/for-next/v4-0001-docs-update-trusted-encrypted.rst.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From 79c9e5d5bc7814f3597bafef7298a26adf1cf894 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 7 Aug 2020 16:41:14 +0800
-Subject: [PATCH v4] docs: trusted-encrypted.rst: update parameters for command examples
-
-The parameters in command examples for tpm2_createprimary and
-tpm2_evictcontrol are outdated, people (like me) are not able to create
-trusted key by these command examples.
-
-This patch updates the parameters of command example tpm2_createprimary
-and tpm2_evictcontrol in trusted-encrypted.rst. With Linux kernel v5.8
-and tpm2-tools-4.1, people can create a trusted key by following the
-examples in this document.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
-Cc: Dan Williams <dan.j.williams@intel.com>
-Cc: James Bottomley <jejb@linux.ibm.com>
-Cc: Jason Gunthorpe <jgg@ziepe.ca>
-Cc: Jonathan Corbet <corbet@lwn.net>
-Cc: Mimi Zohar <zohar@linux.ibm.com>
-Cc: Peter Huewe <peterhuewe@gmx.de>
----
-Changelog:
-v4: update Reviewed-by list, and Cc linux-doc and linux-integrity
- maintainers.
-v3: update commit log with review comments from Jarkko Sakkinen.
-v2: remove the change of trusted key related operation.
-v1: initial version.
-
- Documentation/security/keys/trusted-encrypted.rst | 5 ++---
- 1 file changed, 2 insertions(+), 3 deletions(-)
-
-diff --git a/Documentation/security/keys/trusted-encrypted.rst b/Documentation/security/keys/trusted-encrypted.rst
-index 9483a7425ad5..1da879a68640 100644
---- a/Documentation/security/keys/trusted-encrypted.rst
-+++ b/Documentation/security/keys/trusted-encrypted.rst
-@@ -39,10 +39,9 @@ With the IBM TSS 2 stack::
-
- Or with the Intel TSS 2 stack::
-
-- #> tpm2_createprimary --hierarchy o -G rsa2048 -o key.ctxt
-+ #> tpm2_createprimary --hierarchy o -G rsa2048 -c key.ctxt
- [...]
-- handle: 0x800000FF
-- #> tpm2_evictcontrol -c key.ctxt -p 0x81000001
-+ #> tpm2_evictcontrol -c key.ctxt 0x81000001
- persistentHandle: 0x81000001
-
- Usage::
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0000-cover-letter.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0000-cover-letter.patch
new file mode 100644
index 0000000..a013fd3
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0000-cover-letter.patch
@@ -0,0 +1,73 @@
+From 6400744caa2bdbde213c6b336196ec074f715502 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Sat, 22 Aug 2020 19:35:42 +0800
+Subject: [PATCH v2 00/12] bcache: remove multiple caches code framework
+
+The multiple caches code framework in bcache is to store multiple
+copies of the cached data among multiple caches of the cache set.
+Current code framework just does simple data write to each cache without
+any extra condition handling (e.g. device failure, slow devices). This
+code framework is not and will never be completed. Considering people
+may use md raid1 for same similar data duplication purpose, the multiple
+caches framework is useless dead code indeed.
+
+Due to the multiple caches code framework, bcache has two data structure
+struct cache and struct cache_set to manage the cache device. Indeed
+since bcache was merged into mainline kernel in Linux v3.10, a cache set
+only has one cache, the unnecessary two level abstraction makes extra
+effort to maintain redundant information between struct cache and struct
+cache set, for examaple the in-memmory super block struct cache_sb.
+
+This is the first wave effort to remove multiple caches framework and
+make the code and data structure relation to be more clear. This series
+explicitly make each cache set only have single cache, and remove the
+embedded partial super block in struct cache_set and directly reference
+cache's in-memory super block.
+
+The patch set now passes basic smoking testing, I post this series early
+for your review and comments. More fixes after testing will follow up
+soon.
+
+Thanks in advance.
+
+Coly Li
+---
+Changelog:
+v2: Add reviewed-by from Hannes Reinecke, update patches by suggestion
+ from Hannes and Christoph.
+v1: initial version.
+
+Coly Li (12):
+ bcache: remove 'int n' from parameter list of bch_bucket_alloc_set()
+ bcache: explicitly make cache_set only have single cache
+ bcache: remove for_each_cache()
+ bcache: add set_uuid in struct cache_set
+ bcache: only use block_bytes() on struct cache
+ bcache: remove useless alloc_bucket_pages()
+ bcache: remove useless bucket_pages()
+ bcache: only use bucket_bytes() on struct cache
+ bcache: don't check seq numbers in register_cache_set()
+ bcache: remove can_attach_cache()
+ bcache: check and set sync status on cache's in-memory super block
+ bcache: remove embedded struct cache_sb from struct cache_set
+
+ drivers/md/bcache/alloc.c | 60 ++++-----
+ drivers/md/bcache/bcache.h | 29 ++--
+ drivers/md/bcache/btree.c | 144 ++++++++++----------
+ drivers/md/bcache/btree.h | 2 +-
+ drivers/md/bcache/debug.c | 10 +-
+ drivers/md/bcache/extents.c | 6 +-
+ drivers/md/bcache/features.c | 4 +-
+ drivers/md/bcache/io.c | 2 +-
+ drivers/md/bcache/journal.c | 246 ++++++++++++++++------------------
+ drivers/md/bcache/movinggc.c | 58 ++++----
+ drivers/md/bcache/request.c | 6 +-
+ drivers/md/bcache/super.c | 232 ++++++++++++--------------------
+ drivers/md/bcache/sysfs.c | 10 +-
+ drivers/md/bcache/writeback.c | 2 +-
+ include/trace/events/bcache.h | 4 +-
+ 15 files changed, 352 insertions(+), 463 deletions(-)
+
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch
index 8105f85..b72045a 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch
@@ -1,7 +1,7 @@
-From 9260c7e003b7652c9a8208fa479ff4c5d72a6737 Mon Sep 17 00:00:00 2001
+From ede5b363b28a12b2481232795aee44bf5344b8ec Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Fri, 14 Aug 2020 00:07:05 +0800
-Subject: [PATCH v2 01/19] bcache: remove 'int n' from parameter list of
+Subject: [PATCH v2 01/12] bcache: remove 'int n' from parameter list of
bch_bucket_alloc_set()
The parameter 'int n' from bch_bucket_alloc_set() is not cleared
@@ -14,6 +14,7 @@ list of bch_bucket_alloc_set(), and explicitly allocates only 1 bucket
for its caller.
Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/md/bcache/alloc.c | 35 +++++++++++++++--------------------
drivers/md/bcache/bcache.h | 4 ++--
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch
index 6400487..bc863af 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch
@@ -1,7 +1,7 @@
-From da9ff41f507337ce4797935e8ba9b70da361d59d Mon Sep 17 00:00:00 2001
+From 4463f2bf6406f234972f6cb21413d685d2924dea Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Fri, 14 Aug 2020 00:30:59 +0800
-Subject: [PATCH v2 02/19] bcache: explicitly make cache_set only have single
+Subject: [PATCH v2 02/12] bcache: explicitly make cache_set only have single
cache
Currently although the bcache code has a framework for multiple caches
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0003-bcache-remove-for_each_cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0003-bcache-remove-for_each_cache.patch
index 605fa1b..6cb26ed 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0003-bcache-remove-for_each_cache.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0003-bcache-remove-for_each_cache.patch
@@ -1,13 +1,14 @@
-From 50516df3a606a49a170bb14e26ed595aff4c84d0 Mon Sep 17 00:00:00 2001
+From 938e528cd160cbab1581c53c593572a39fed519a Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Fri, 14 Aug 2020 01:26:09 +0800
-Subject: [PATCH v2 03/19] bcache: remove for_each_cache()
+Subject: [PATCH v2 03/12] bcache: remove for_each_cache()
Since now each cache_set explicitly has single cache, for_each_cache()
is unnecessary. This patch removes this macro, and update all locations
where it is used, and makes sure all code logic still being consistent.
Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/md/bcache/alloc.c | 17 ++-
drivers/md/bcache/bcache.h | 9 +-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch
index 8573fcd..41c998f 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch
@@ -1,13 +1,14 @@
-From 5f709f50fb5302b446ab136dd4673a68051b9299 Mon Sep 17 00:00:00 2001
+From edbe36debb518e5db2ef66cfd6852379d4758684 Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Fri, 14 Aug 2020 20:12:07 +0800
-Subject: [PATCH v2 04/19] bcache: add set_uuid in struct cache_set
+Subject: [PATCH v2 04/12] bcache: add set_uuid in struct cache_set
This patch adds a separated set_uuid[16] in struct cache_set, to store
the uuid of the cache set. This is the preparation to remove the
embedded struct cache_sb from struct cache_set.
Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/md/bcache/bcache.h | 1 +
drivers/md/bcache/debug.c | 2 +-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch
index 1c10d70..74412bb 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch
@@ -1,7 +1,7 @@
-From 178fa57c56550568bf0d4140d8dc689cc6c11682 Mon Sep 17 00:00:00 2001
+From 9708567a39ed76a51bde0065b05c3d30fa58aa32 Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Fri, 14 Aug 2020 21:25:58 +0800
-Subject: [PATCH v2 05/19] bcache: only use block_bytes() on struct cache
+Subject: [PATCH v2 05/12] bcache: only use block_bytes() on struct cache
Because struct cache_set and struct cache both have struct cache_sb,
therefore macro block_bytes() can be used on both of them. When removing
@@ -13,6 +13,7 @@ one of the preparation to remove the embedded struct cache_sb from
struct cache_set.
Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/md/bcache/bcache.h | 2 +-
drivers/md/bcache/btree.c | 24 ++++++++++++------------
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch
index 38aae59..1194d90 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch
@@ -1,11 +1,12 @@
-From 811f8198f1d5337729bbd855bf0e381e60eeeca3 Mon Sep 17 00:00:00 2001
+From 395b4ef812e2a4ecf373a28d682a64cbda79ea34 Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Fri, 14 Aug 2020 21:28:23 +0800
-Subject: [PATCH v2 06/19] bcache: remove useless alloc_bucket_pages()
+Subject: [PATCH v2 06/12] bcache: remove useless alloc_bucket_pages()
Now no one uses alloc_bucket_pages() anymore, remove it from bcache.h.
Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/md/bcache/super.c | 3 ---
1 file changed, 3 deletions(-)
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0007-bcache-remove-useless-bucket_pages.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0007-bcache-remove-useless-bucket_pages.patch
index 2cfd09f..b31a46f 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0007-bcache-remove-useless-bucket_pages.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0007-bcache-remove-useless-bucket_pages.patch
@@ -1,13 +1,14 @@
-From a34562e8f936f77d726fcd94746a467db5f2bf04 Mon Sep 17 00:00:00 2001
+From df9eac9d5410755cf967640457c575b0aabb35b1 Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Fri, 14 Aug 2020 21:15:28 +0800
-Subject: [PATCH v2 07/19] bcache: remove useless bucket_pages()
+Subject: [PATCH v2 07/12] bcache: remove useless bucket_pages()
It seems alloc_bucket_pages() is the only user of bucket_pages().
Considering alloc_bucket_pages() is removed from bcache code, it is safe
to remove the useless macro bucket_pages() now.
Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/md/bcache/bcache.h | 1 -
1 file changed, 1 deletion(-)
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch
index 4cd89f1..b9cedd8 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch
@@ -1,7 +1,7 @@
-From 964012dfcb5e4ae91630c5d92b51cfba698dc41d Mon Sep 17 00:00:00 2001
+From 9b54949ab941a8f34d8a70211b3f632db4e193f6 Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Fri, 14 Aug 2020 21:20:48 +0800
-Subject: [PATCH v2 08/19] bcache: only use bucket_bytes() on struct cache
+Subject: [PATCH v2 08/12] bcache: only use bucket_bytes() on struct cache
Because struct cache_set and struct cache both have struct cache_sb,
macro bucket_bytes() currently are used on both of them. When removing
@@ -13,6 +13,7 @@ one of the preparation to remove the embedded struct cache_sb from
struct cache_set.
Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/md/bcache/bcache.h | 2 +-
drivers/md/bcache/sysfs.c | 2 +-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0009-bcache-don-t-check-seq-numbers-in-register_cache_.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0009-bcache-don-t-check-seq-numbers-in-register_cache_.patch
new file mode 100644
index 0000000..790d2ce
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0009-bcache-don-t-check-seq-numbers-in-register_cache_.patch
@@ -0,0 +1,53 @@
+From 5b1a516888a054b38038723d40368235faddb5e3 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 23:28:26 +0800
+Subject: [PATCH v2 09/12] bcache: don't check seq numbers in
+ register_cache_set()
+
+In order to update the partial super block of cache set, the seq numbers
+of cache and cache set are checked in register_cache_set(). If cache's
+seq number is larger than cache set's seq number, cache set must update
+its partial super block from cache's super block. It is unncessary when
+the embedded struct cache_sb is removed from struct cache set.
+
+This patch removed the seq numbers checking from register_cache_set(),
+because later there will be no such partial super block in struct cache
+set, the cache set will directly reference in-memory super block from
+struct cache. This is a preparation patch for removing embedded struct
+cache_sb from struct cache_set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/super.c | 15 ---------------
+ 1 file changed, 15 deletions(-)
+
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 28257f11d835..3dfe81bf31c8 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -2176,21 +2176,6 @@ static const char *register_cache_set(struct cache *ca)
+ sysfs_create_link(&c->kobj, &ca->kobj, buf))
+ goto err;
+
+- /*
+- * A special case is both ca->sb.seq and c->sb.seq are 0,
+- * such condition happens on a new created cache device whose
+- * super block is never flushed yet. In this case c->sb.version
+- * and other members should be updated too, otherwise we will
+- * have a mistaken super block version in cache set.
+- */
+- if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
+- c->sb.version = ca->sb.version;
+- memcpy(c->set_uuid, ca->sb.set_uuid, 16);
+- c->sb.flags = ca->sb.flags;
+- c->sb.seq = ca->sb.seq;
+- pr_debug("set version = %llu\n", c->sb.version);
+- }
+-
+ kobject_get(&ca->kobj);
+ ca->set = c;
+ ca->set->cache = ca;
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0010-bcache-remove-can_attach_cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0010-bcache-remove-can_attach_cache.patch
new file mode 100644
index 0000000..4e08eac
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0010-bcache-remove-can_attach_cache.patch
@@ -0,0 +1,50 @@
+From 46164fea1d6e07cbb196e945b4534879b74504bf Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 23:36:56 +0800
+Subject: [PATCH v2 10/12] bcache: remove can_attach_cache()
+
+After removing the embedded struct cache_sb from struct cache_set, cache
+set will directly reference the in-memory super block of struct cache.
+It is unnecessary to compare block_size, bucket_size and nr_in_set from
+the identical in-memory super block in can_attach_cache().
+
+This is a preparation patch for latter removing cache_set->sb from
+struct cache_set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/super.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 3dfe81bf31c8..fcfc8f41b0ed 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -2128,13 +2128,6 @@ static int run_cache_set(struct cache_set *c)
+ return -EIO;
+ }
+
+-static bool can_attach_cache(struct cache *ca, struct cache_set *c)
+-{
+- return ca->sb.block_size == c->sb.block_size &&
+- ca->sb.bucket_size == c->sb.bucket_size &&
+- ca->sb.nr_in_set == c->sb.nr_in_set;
+-}
+-
+ static const char *register_cache_set(struct cache *ca)
+ {
+ char buf[12];
+@@ -2146,9 +2139,6 @@ static const char *register_cache_set(struct cache *ca)
+ if (c->cache)
+ return "duplicate cache set member";
+
+- if (!can_attach_cache(ca, c))
+- return "cache sb does not match set";
+-
+ if (!CACHE_SYNC(&ca->sb))
+ SET_CACHE_SYNC(&c->sb, false);
+
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0011-bcache-check-and-set-sync-status-on-cache-s-in-me.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0011-bcache-check-and-set-sync-status-on-cache-s-in-me.patch
new file mode 100644
index 0000000..dd534a5
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0011-bcache-check-and-set-sync-status-on-cache-s-in-me.patch
@@ -0,0 +1,110 @@
+From 24bb19eb76bea9589158215402aeda67fabba2e9 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 23:53:52 +0800
+Subject: [PATCH v2 11/12] bcache: check and set sync status on cache's
+ in-memory super block
+
+Currently the cache's sync status is checked and set on cache set's in-
+memory partial super block. After removing the embedded struct cache_sb
+from cache set and reference cache's in-memory super block from struct
+cache_set, the sync status can set and check directly on cache's super
+block.
+
+This patch checks and sets the cache sync status directly on cache's
+in-memory super block. This is a preparation for later removing embedded
+struct cache_sb from struct cache_set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/alloc.c | 2 +-
+ drivers/md/bcache/journal.c | 2 +-
+ drivers/md/bcache/super.c | 7 ++-----
+ drivers/md/bcache/sysfs.c | 6 +++---
+ 4 files changed, 7 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 1b8310992dd0..65fdbdeb5134 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -361,7 +361,7 @@ static int bch_allocator_thread(void *arg)
+ * new stuff to them:
+ */
+ allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
+- if (CACHE_SYNC(&ca->set->sb)) {
++ if (CACHE_SYNC(&ca->sb)) {
+ /*
+ * This could deadlock if an allocation with a btree
+ * node locked ever blocked - having the btree node
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index ccd5de0ab0fe..e2810668ede3 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -915,7 +915,7 @@ atomic_t *bch_journal(struct cache_set *c,
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return NULL;
+
+- if (!CACHE_SYNC(&c->sb))
++ if (!CACHE_SYNC(&c->cache->sb))
+ return NULL;
+
+ w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index fcfc8f41b0ed..18f76d1ea0e3 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1970,7 +1970,7 @@ static int run_cache_set(struct cache_set *c)
+ c->nbuckets = ca->sb.nbuckets;
+ set_gc_sectors(c);
+
+- if (CACHE_SYNC(&c->sb)) {
++ if (CACHE_SYNC(&c->cache->sb)) {
+ struct bkey *k;
+ struct jset *j;
+
+@@ -2093,7 +2093,7 @@ static int run_cache_set(struct cache_set *c)
+ * everything is set up - fortunately journal entries won't be
+ * written until the SET_CACHE_SYNC() here:
+ */
+- SET_CACHE_SYNC(&c->sb, true);
++ SET_CACHE_SYNC(&c->cache->sb, true);
+
+ bch_journal_next(&c->journal);
+ bch_journal_meta(c, &cl);
+@@ -2139,9 +2139,6 @@ static const char *register_cache_set(struct cache *ca)
+ if (c->cache)
+ return "duplicate cache set member";
+
+- if (!CACHE_SYNC(&ca->sb))
+- SET_CACHE_SYNC(&c->sb, false);
+-
+ goto found;
+ }
+
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index 4bfe98faadcc..554e3afc9b68 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -711,7 +711,7 @@ SHOW(__bch_cache_set)
+ {
+ struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+
+- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
++ sysfs_print(synchronous, CACHE_SYNC(&c->cache->sb));
+ sysfs_print(journal_delay_ms, c->journal_delay_ms);
+ sysfs_hprint(bucket_size, bucket_bytes(c->cache));
+ sysfs_hprint(block_size, block_bytes(c->cache));
+@@ -812,8 +812,8 @@ STORE(__bch_cache_set)
+ if (attr == &sysfs_synchronous) {
+ bool sync = strtoul_or_return(buf);
+
+- if (sync != CACHE_SYNC(&c->sb)) {
+- SET_CACHE_SYNC(&c->sb, sync);
++ if (sync != CACHE_SYNC(&c->cache->sb)) {
++ SET_CACHE_SYNC(&c->cache->sb, sync);
+ bcache_write_super(c);
+ }
+ }
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0012-bcache-remove-embedded-struct-cache_sb-from-struc.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0012-bcache-remove-embedded-struct-cache_sb-from-struc.patch
new file mode 100644
index 0000000..69e81ad
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0012-bcache-remove-embedded-struct-cache_sb-from-struc.patch
@@ -0,0 +1,469 @@
+From 6400744caa2bdbde213c6b336196ec074f715502 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Sat, 22 Aug 2020 16:11:38 +0800
+Subject: [PATCH v2 12/12] bcache: remove embedded struct cache_sb from struct
+ cache_set
+
+Since bcache code was merged into mainline kerrnel, each cache set only
+as one single cache in it. The multiple caches framework is here but the
+code is far from completed. Considering the multiple copies of cached
+data can also be stored on e.g. md raid1 devices, it is unnecessary to
+support multiple caches in one cache set indeed.
+
+The previous preparation patches fix the dependencies of explicitly
+making a cache set only have single cache. Now we don't have to maintain
+an embedded partial super block in struct cache_set, the in-memory super
+block can be directly referenced from struct cache.
+
+This patch removes the embedded struct cache_sb from struct cache_set,
+and fixes all locations where the superb lock was referenced from this
+removed super block by referencing the in-memory super block of struct
+cache.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/alloc.c | 6 ++---
+ drivers/md/bcache/bcache.h | 4 +--
+ drivers/md/bcache/btree.c | 17 +++++++------
+ drivers/md/bcache/btree.h | 2 +-
+ drivers/md/bcache/extents.c | 6 ++---
+ drivers/md/bcache/features.c | 4 +--
+ drivers/md/bcache/io.c | 2 +-
+ drivers/md/bcache/journal.c | 11 ++++----
+ drivers/md/bcache/request.c | 4 +--
+ drivers/md/bcache/super.c | 47 +++++++++++++----------------------
+ drivers/md/bcache/writeback.c | 2 +-
+ 11 files changed, 46 insertions(+), 59 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 65fdbdeb5134..8c371d5eef8e 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -87,7 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
+ {
+ struct cache *ca;
+ struct bucket *b;
+- unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
++ unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
+ int r;
+
+ atomic_sub(sectors, &c->rescale);
+@@ -583,7 +583,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
+ struct open_bucket, list);
+ found:
+ if (!ret->sectors_free && KEY_PTRS(alloc)) {
+- ret->sectors_free = c->sb.bucket_size;
++ ret->sectors_free = c->cache->sb.bucket_size;
+ bkey_copy(&ret->key, alloc);
+ bkey_init(alloc);
+ }
+@@ -677,7 +677,7 @@ bool bch_alloc_sectors(struct cache_set *c,
+ &PTR_CACHE(c, &b->key, i)->sectors_written);
+ }
+
+- if (b->sectors_free < c->sb.block_size)
++ if (b->sectors_free < c->cache->sb.block_size)
+ b->sectors_free = 0;
+
+ /*
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 94d4baf4c405..1d57f48307e6 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -517,8 +517,6 @@ struct cache_set {
+ atomic_t idle_counter;
+ atomic_t at_max_writeback_rate;
+
+- struct cache_sb sb;
+-
+ struct cache *cache;
+
+ struct bcache_device **devices;
+@@ -799,7 +797,7 @@ static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
+
+ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
+ {
+- return s & (c->sb.bucket_size - 1);
++ return s & (c->cache->sb.bucket_size - 1);
+ }
+
+ static inline struct cache *PTR_CACHE(struct cache_set *c,
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index c91b4d58a5b3..d09103cc7da5 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -117,7 +117,7 @@ static void bch_btree_init_next(struct btree *b)
+
+ if (b->written < btree_blocks(b))
+ bch_bset_init_next(&b->keys, write_block(b),
+- bset_magic(&b->c->sb));
++ bset_magic(&b->c->cache->sb));
+
+ }
+
+@@ -155,7 +155,7 @@ void bch_btree_node_read_done(struct btree *b)
+ * See the comment arount cache_set->fill_iter.
+ */
+ iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
+- iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
++ iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
+ iter->used = 0;
+
+ #ifdef CONFIG_BCACHE_DEBUG
+@@ -178,7 +178,7 @@ void bch_btree_node_read_done(struct btree *b)
+ goto err;
+
+ err = "bad magic";
+- if (i->magic != bset_magic(&b->c->sb))
++ if (i->magic != bset_magic(&b->c->cache->sb))
+ goto err;
+
+ err = "bad checksum";
+@@ -219,7 +219,7 @@ void bch_btree_node_read_done(struct btree *b)
+
+ if (b->written < btree_blocks(b))
+ bch_bset_init_next(&b->keys, write_block(b),
+- bset_magic(&b->c->sb));
++ bset_magic(&b->c->cache->sb));
+ out:
+ mempool_free(iter, &b->c->fill_iter);
+ return;
+@@ -423,7 +423,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
+
+ do_btree_node_write(b);
+
+- atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
++ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
+ &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
+
+ b->written += set_blocks(i, block_bytes(b->c->cache));
+@@ -738,7 +738,7 @@ void bch_btree_cache_free(struct cache_set *c)
+ if (c->verify_data)
+ list_move(&c->verify_data->list, &c->btree_cache);
+
+- free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->sb)));
++ free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
+ #endif
+
+ list_splice(&c->btree_cache_freeable,
+@@ -785,7 +785,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
+ mutex_init(&c->verify_lock);
+
+ c->verify_ondisk = (void *)
+- __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(meta_bucket_pages(&c->sb)));
++ __get_free_pages(GFP_KERNEL|__GFP_COMP,
++ ilog2(meta_bucket_pages(&c->cache->sb)));
+ if (!c->verify_ondisk) {
+ /*
+ * Don't worry about the mca_rereserve buckets
+@@ -1108,7 +1109,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+ }
+
+ b->parent = parent;
+- bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
++ bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
+
+ mutex_unlock(&c->bucket_lock);
+
+diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
+index 257969980c49..50482107134f 100644
+--- a/drivers/md/bcache/btree.h
++++ b/drivers/md/bcache/btree.h
+@@ -194,7 +194,7 @@ static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
+
+ static inline void set_gc_sectors(struct cache_set *c)
+ {
+- atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
++ atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
+ }
+
+ void bkey_put(struct cache_set *c, struct bkey *k);
+diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
+index 9162af5bb6ec..f4658a1f37b8 100644
+--- a/drivers/md/bcache/extents.c
++++ b/drivers/md/bcache/extents.c
+@@ -54,7 +54,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+ size_t bucket = PTR_BUCKET_NR(c, k, i);
+ size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
++ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
+ bucket < ca->sb.first_bucket ||
+ bucket >= ca->sb.nbuckets)
+ return true;
+@@ -75,7 +75,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
+ size_t bucket = PTR_BUCKET_NR(c, k, i);
+ size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+- if (KEY_SIZE(k) + r > c->sb.bucket_size)
++ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
+ return "bad, length too big";
+ if (bucket < ca->sb.first_bucket)
+ return "bad, short offset";
+@@ -136,7 +136,7 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
+ size_t n = PTR_BUCKET_NR(b->c, k, j);
+
+ pr_cont(" bucket %zu", n);
+- if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
++ if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
+ pr_cont(" prio %i",
+ PTR_BUCKET(b->c, k, j)->prio);
+ }
+diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
+index 4442df48d28c..6469223f0b77 100644
+--- a/drivers/md/bcache/features.c
++++ b/drivers/md/bcache/features.c
+@@ -30,7 +30,7 @@ static struct feature feature_list[] = {
+ for (f = &feature_list[0]; f->compat != 0; f++) { \
+ if (f->compat != BCH_FEATURE_ ## type) \
+ continue; \
+- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) { \
++ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) { \
+ if (first) { \
+ out += snprintf(out, buf + size - out, \
+ "["); \
+@@ -44,7 +44,7 @@ static struct feature feature_list[] = {
+ \
+ out += snprintf(out, buf + size - out, "%s", f->string);\
+ \
+- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) \
++ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) \
+ out += snprintf(out, buf + size - out, "]"); \
+ \
+ first = false; \
+diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
+index a14a445618b4..dad71a6b7889 100644
+--- a/drivers/md/bcache/io.c
++++ b/drivers/md/bcache/io.c
+@@ -26,7 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
+ struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
+ struct bio *bio = &b->bio;
+
+- bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->sb));
++ bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
+
+ return bio;
+ }
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index e2810668ede3..c5526e5087ef 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -666,7 +666,7 @@ static void journal_reclaim(struct cache_set *c)
+
+ bkey_init(k);
+ SET_KEY_PTRS(k, 1);
+- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
++ c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
+
+ out:
+ if (!journal_full(&c->journal))
+@@ -735,7 +735,7 @@ static void journal_write_unlocked(struct closure *cl)
+ struct journal_write *w = c->journal.cur;
+ struct bkey *k = &c->journal.key;
+ unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
+- c->sb.block_size;
++ ca->sb.block_size;
+
+ struct bio *bio;
+ struct bio_list list;
+@@ -762,7 +762,7 @@ static void journal_write_unlocked(struct closure *cl)
+ bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
+
+ w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
+- w->data->magic = jset_magic(&c->sb);
++ w->data->magic = jset_magic(&ca->sb);
+ w->data->version = BCACHE_JSET_VERSION;
+ w->data->last_seq = last_seq(&c->journal);
+ w->data->csum = csum_set(w->data);
+@@ -838,6 +838,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
+ size_t sectors;
+ struct closure cl;
+ bool wait = false;
++ struct cache *ca = c->cache;
+
+ closure_init_stack(&cl);
+
+@@ -847,10 +848,10 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
+ struct journal_write *w = c->journal.cur;
+
+ sectors = __set_blocks(w->data, w->data->keys + nkeys,
+- block_bytes(c->cache)) * c->sb.block_size;
++ block_bytes(ca)) * ca->sb.block_size;
+
+ if (sectors <= min_t(size_t,
+- c->journal.blocks_free * c->sb.block_size,
++ c->journal.blocks_free * ca->sb.block_size,
+ PAGE_SECTORS << JSET_BITS))
+ return w;
+
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index 02408fdbf5bb..37e9cf8dbfc1 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -394,8 +394,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
+ goto skip;
+ }
+
+- if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
+- bio_sectors(bio) & (c->sb.block_size - 1)) {
++ if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
++ bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
+ pr_debug("skipping unaligned io\n");
+ goto skip;
+ }
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 18f76d1ea0e3..d06ea4a3e500 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -350,16 +350,10 @@ void bcache_write_super(struct cache_set *c)
+ down(&c->sb_write_mutex);
+ closure_init(cl, &c->cl);
+
+- c->sb.seq++;
++ ca->sb.seq++;
+
+- if (c->sb.version > version)
+- version = c->sb.version;
+-
+- ca->sb.version = version;
+- ca->sb.seq = c->sb.seq;
+- ca->sb.last_mount = c->sb.last_mount;
+-
+- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
++ if (ca->sb.version < version)
++ ca->sb.version = version;
+
+ bio_init(bio, ca->sb_bv, 1);
+ bio_set_dev(bio, ca->bdev);
+@@ -477,7 +471,7 @@ static int __uuid_write(struct cache_set *c)
+ {
+ BKEY_PADDED(key) k;
+ struct closure cl;
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ unsigned int size;
+
+ closure_init_stack(&cl);
+@@ -486,13 +480,12 @@ static int __uuid_write(struct cache_set *c)
+ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
+ return 1;
+
+- size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
++ size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
+ SET_KEY_SIZE(&k.key, size);
+ uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
+ closure_sync(&cl);
+
+ /* Only one bucket used for uuid write */
+- ca = PTR_CACHE(c, &k.key, 0);
+ atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
+
+ bkey_copy(&c->uuid_bucket, &k.key);
+@@ -1205,7 +1198,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ return -EINVAL;
+ }
+
+- if (dc->sb.block_size < c->sb.block_size) {
++ if (dc->sb.block_size < c->cache->sb.block_size) {
+ /* Will die */
+ pr_err("Couldn't attach %s: block size less than set's block size\n",
+ dc->backing_dev_name);
+@@ -1664,6 +1657,9 @@ static void cache_set_free(struct closure *cl)
+ bch_journal_free(c);
+
+ mutex_lock(&bch_register_lock);
++ bch_bset_sort_state_free(&c->sort);
++ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
++
+ ca = c->cache;
+ if (ca) {
+ ca->set = NULL;
+@@ -1671,8 +1667,6 @@ static void cache_set_free(struct closure *cl)
+ kobject_put(&ca->kobj);
+ }
+
+- bch_bset_sort_state_free(&c->sort);
+- free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
+
+ if (c->moving_gc_wq)
+ destroy_workqueue(c->moving_gc_wq);
+@@ -1838,6 +1832,7 @@ void bch_cache_set_unregister(struct cache_set *c)
+ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ {
+ int iter_size;
++ struct cache *ca = container_of(sb, struct cache, sb);
+ struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
+
+ if (!c)
+@@ -1860,23 +1855,15 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ bch_cache_accounting_init(&c->accounting, &c->cl);
+
+ memcpy(c->set_uuid, sb->set_uuid, 16);
+- c->sb.block_size = sb->block_size;
+- c->sb.bucket_size = sb->bucket_size;
+- c->sb.nr_in_set = sb->nr_in_set;
+- c->sb.last_mount = sb->last_mount;
+- c->sb.version = sb->version;
+- if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+- c->sb.feature_compat = sb->feature_compat;
+- c->sb.feature_ro_compat = sb->feature_ro_compat;
+- c->sb.feature_incompat = sb->feature_incompat;
+- }
+
++ c->cache = ca;
++ c->cache->set = c;
+ c->bucket_bits = ilog2(sb->bucket_size);
+ c->block_bits = ilog2(sb->block_size);
+- c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry);
++ c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
+ c->devices_max_used = 0;
+ atomic_set(&c->attached_dev_nr, 0);
+- c->btree_pages = meta_bucket_pages(&c->sb);
++ c->btree_pages = meta_bucket_pages(sb);
+ if (c->btree_pages > BTREE_MAX_PAGES)
+ c->btree_pages = max_t(int, c->btree_pages / 4,
+ BTREE_MAX_PAGES);
+@@ -1914,7 +1901,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+
+ if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
+ sizeof(struct bbio) +
+- sizeof(struct bio_vec) * meta_bucket_pages(&c->sb)))
++ sizeof(struct bio_vec) * meta_bucket_pages(sb)))
+ goto err;
+
+ if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
+@@ -1924,7 +1911,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
+ goto err;
+
+- c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb);
++ c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
+ if (!c->uuids)
+ goto err;
+
+@@ -2104,7 +2091,7 @@ static int run_cache_set(struct cache_set *c)
+ goto err;
+
+ closure_sync(&cl);
+- c->sb.last_mount = (u32)ktime_get_real_seconds();
++ c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
+ bcache_write_super(c);
+
+ list_for_each_entry_safe(dc, t, &uncached_devices, list)
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 4f4ad6b3d43a..3c74996978da 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -35,7 +35,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
+ * This is the size of the cache, minus the amount used for
+ * flash-only devices
+ */
+- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
++ uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
+ atomic_long_read(&c->flash_dev_dirty_sectors);
+
+ /*
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0014-bcache-move-struct-cache_sb-out-of-uapi-bcache.h.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0014-bcache-move-struct-cache_sb-out-of-uapi-bcache.h.patch
deleted file mode 100644
index b5d1ce0..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0014-bcache-move-struct-cache_sb-out-of-uapi-bcache.h.patch
+++ /dev/null
@@ -1,261 +0,0 @@
-From fa53715b39652e9f6de5d0dca377c71cd9e31ee4 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 15 Aug 2020 00:49:17 +0800
-Subject: [PATCH v2 14/19] bcache: move struct cache_sb out of uapi bcache.h
-
-struct cache_sb does not exactly map to cache_sb_disk, it is only for
-in-memory super block and dosn't belong to uapi bcache.h.
-
-This patch moves the struct cache_sb definition and other depending
-macros and inline routines from include/uapi/linux/bcache.h to
-drivers/md/bcache/bcache.h, this is the proper location to have them.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/bcache.h | 99 +++++++++++++++++++++++++++++++++++++
- include/uapi/linux/bcache.h | 98 ------------------------------------
- 2 files changed, 99 insertions(+), 98 deletions(-)
-
-diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index 1d57f48307e6..b755bf7832ac 100644
---- a/drivers/md/bcache/bcache.h
-+++ b/drivers/md/bcache/bcache.h
-@@ -279,6 +279,82 @@ struct bcache_device {
- unsigned int cmd, unsigned long arg);
- };
-
-+/*
-+ * This is for in-memory bcache super block.
-+ * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member
-+ * size, ordering and even whole struct size may be different
-+ * from cache_sb_disk.
-+ */
-+struct cache_sb {
-+ __u64 offset; /* sector where this sb was written */
-+ __u64 version;
-+
-+ __u8 magic[16];
-+
-+ __u8 uuid[16];
-+ union {
-+ __u8 set_uuid[16];
-+ __u64 set_magic;
-+ };
-+ __u8 label[SB_LABEL_SIZE];
-+
-+ __u64 flags;
-+ __u64 seq;
-+
-+ __u64 feature_compat;
-+ __u64 feature_incompat;
-+ __u64 feature_ro_compat;
-+
-+ union {
-+ struct {
-+ /* Cache devices */
-+ __u64 nbuckets; /* device size */
-+
-+ __u16 block_size; /* sectors */
-+ __u16 nr_in_set;
-+ __u16 nr_this_dev;
-+ __u32 bucket_size; /* sectors */
-+ };
-+ struct {
-+ /* Backing devices */
-+ __u64 data_offset;
-+
-+ /*
-+ * block_size from the cache device section is still used by
-+ * backing devices, so don't add anything here until we fix
-+ * things to not need it for backing devices anymore
-+ */
-+ };
-+ };
-+
-+ __u32 last_mount; /* time overflow in y2106 */
-+
-+ __u16 first_bucket;
-+ union {
-+ __u16 njournal_buckets;
-+ __u16 keys;
-+ };
-+ __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
-+};
-+
-+BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
-+BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
-+BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
-+#define CACHE_REPLACEMENT_LRU 0U
-+#define CACHE_REPLACEMENT_FIFO 1U
-+#define CACHE_REPLACEMENT_RANDOM 2U
-+
-+BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
-+#define CACHE_MODE_WRITETHROUGH 0U
-+#define CACHE_MODE_WRITEBACK 1U
-+#define CACHE_MODE_WRITEAROUND 2U
-+#define CACHE_MODE_NONE 3U
-+BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
-+#define BDEV_STATE_NONE 0U
-+#define BDEV_STATE_CLEAN 1U
-+#define BDEV_STATE_DIRTY 2U
-+#define BDEV_STATE_STALE 3U
-+
- struct io {
- /* Used to track sequential IO so it can be skipped */
- struct hlist_node hash;
-@@ -840,6 +916,13 @@ static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
- return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
- }
-
-+static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
-+{
-+ return sb->version == BCACHE_SB_VERSION_BDEV
-+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET
-+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES;
-+}
-+
- /* Btree key macros */
-
- /*
-@@ -958,6 +1041,22 @@ static inline void wait_for_kthread_stop(void)
- }
- }
-
-+/* generate magic number */
-+static inline __u64 jset_magic(struct cache_sb *sb)
-+{
-+ return sb->set_magic ^ JSET_MAGIC;
-+}
-+
-+static inline __u64 pset_magic(struct cache_sb *sb)
-+{
-+ return sb->set_magic ^ PSET_MAGIC;
-+}
-+
-+static inline __u64 bset_magic(struct cache_sb *sb)
-+{
-+ return sb->set_magic ^ BSET_MAGIC;
-+}
-+
- /* Forward declarations */
-
- void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
-diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
-index 52e8bcb33981..18166a3d8503 100644
---- a/include/uapi/linux/bcache.h
-+++ b/include/uapi/linux/bcache.h
-@@ -216,89 +216,6 @@ struct cache_sb_disk {
- __le16 bucket_size_hi;
- };
-
--/*
-- * This is for in-memory bcache super block.
-- * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member
-- * size, ordering and even whole struct size may be different
-- * from cache_sb_disk.
-- */
--struct cache_sb {
-- __u64 offset; /* sector where this sb was written */
-- __u64 version;
--
-- __u8 magic[16];
--
-- __u8 uuid[16];
-- union {
-- __u8 set_uuid[16];
-- __u64 set_magic;
-- };
-- __u8 label[SB_LABEL_SIZE];
--
-- __u64 flags;
-- __u64 seq;
--
-- __u64 feature_compat;
-- __u64 feature_incompat;
-- __u64 feature_ro_compat;
--
-- union {
-- struct {
-- /* Cache devices */
-- __u64 nbuckets; /* device size */
--
-- __u16 block_size; /* sectors */
-- __u16 nr_in_set;
-- __u16 nr_this_dev;
-- __u32 bucket_size; /* sectors */
-- };
-- struct {
-- /* Backing devices */
-- __u64 data_offset;
--
-- /*
-- * block_size from the cache device section is still used by
-- * backing devices, so don't add anything here until we fix
-- * things to not need it for backing devices anymore
-- */
-- };
-- };
--
-- __u32 last_mount; /* time overflow in y2106 */
--
-- __u16 first_bucket;
-- union {
-- __u16 njournal_buckets;
-- __u16 keys;
-- };
-- __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
--};
--
--static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
--{
-- return sb->version == BCACHE_SB_VERSION_BDEV
-- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET
-- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES;
--}
--
--BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
--BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
--BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
--#define CACHE_REPLACEMENT_LRU 0U
--#define CACHE_REPLACEMENT_FIFO 1U
--#define CACHE_REPLACEMENT_RANDOM 2U
--
--BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
--#define CACHE_MODE_WRITETHROUGH 0U
--#define CACHE_MODE_WRITEBACK 1U
--#define CACHE_MODE_WRITEAROUND 2U
--#define CACHE_MODE_NONE 3U
--BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
--#define BDEV_STATE_NONE 0U
--#define BDEV_STATE_CLEAN 1U
--#define BDEV_STATE_DIRTY 2U
--#define BDEV_STATE_STALE 3U
--
- /*
- * Magic numbers
- *
-@@ -310,21 +227,6 @@ BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
- #define PSET_MAGIC 0x6750e15f87337f91ULL
- #define BSET_MAGIC 0x90135c78b99e07f5ULL
-
--static inline __u64 jset_magic(struct cache_sb *sb)
--{
-- return sb->set_magic ^ JSET_MAGIC;
--}
--
--static inline __u64 pset_magic(struct cache_sb *sb)
--{
-- return sb->set_magic ^ PSET_MAGIC;
--}
--
--static inline __u64 bset_magic(struct cache_sb *sb)
--{
-- return sb->set_magic ^ BSET_MAGIC;
--}
--
- /*
- * Journal
- *
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0015-bcache-share-register-sysfs-with-async-register.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0015-bcache-share-register-sysfs-with-async-register.patch
deleted file mode 100644
index 471148a..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0015-bcache-share-register-sysfs-with-async-register.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From c4b3c187fc4c454d67731164fb88783d8f038308 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 15 Aug 2020 16:56:19 +0800
-Subject: [PATCH v2 15/19] bcache: share register sysfs with async register
-
-Previously the experimental async registration uses a separate sysfs
-file register_async. Now the async registration code seems working well
-for a while, we can do furtuher testing with it now.
-
-This patch changes the async bcache registration shares the same sysfs
-file /sys/fs/bcache/register (and register_quiet). Async registration
-will be default behavior if BCACHE_ASYNC_REGISTRATION is set in kernel
-configure. By default, BCACHE_ASYNC_REGISTRATION is not configured yet.
-
-Signed-off-by: Coly Li <colyli@suse.de>
----
- drivers/md/bcache/super.c | 12 +++++++-----
- 1 file changed, 7 insertions(+), 5 deletions(-)
-
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index ad87859d744a..e24e999fea25 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -2381,7 +2381,6 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
-
- kobj_attribute_write(register, register_bcache);
- kobj_attribute_write(register_quiet, register_bcache);
--kobj_attribute_write(register_async, register_bcache);
- kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup);
-
- static bool bch_is_open_backing(struct block_device *bdev)
-@@ -2505,6 +2504,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
- struct cache_sb_disk *sb_disk;
- struct block_device *bdev;
- ssize_t ret;
-+ bool async_registration = false;
-+
-+#ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
-+ async_registration = true;
-+#endif
-
- ret = -EBUSY;
- err = "failed to reference bcache module";
-@@ -2558,7 +2562,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
- goto out_blkdev_put;
-
- err = "failed to register device";
-- if (attr == &ksysfs_register_async) {
-+
-+ if (async_registration) {
- /* register in asynchronous way */
- struct async_reg_args *args =
- kzalloc(sizeof(struct async_reg_args), GFP_KERNEL);
-@@ -2821,9 +2826,6 @@ static int __init bcache_init(void)
- static const struct attribute *files[] = {
- &ksysfs_register.attr,
- &ksysfs_register_quiet.attr,
--#ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
-- &ksysfs_register_async.attr,
--#endif
- &ksysfs_pendings_cleanup.attr,
- NULL
- };
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0016-docs-update-trusted-encrypted.rst.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0016-docs-update-trusted-encrypted.rst.patch
deleted file mode 100644
index 3d9fe08..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0016-docs-update-trusted-encrypted.rst.patch
+++ /dev/null
@@ -1,59 +0,0 @@
-From 5254f03b38c7e640fb8cc6e104a03d4c9da484d6 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Fri, 7 Aug 2020 16:41:14 +0800
-Subject: [PATCH v2 16/19] docs: update trusted-encrypted.rst
-
-The parameters in tmp2 commands are outdated, people are not able to
-create trusted key by the example commands.
-
-This patch updates the paramerters of tpm2 commands, they are verified
-by tpm2-tools-4.1 with Linux v5.8 kernel.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Cc: Dan Williams <dan.j.williams@intel.com>
-Cc: James Bottomley <jejb@linux.ibm.com>
-Cc: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-Cc: Mimi Zohar <zohar@linux.ibm.com>
-Cc: Stefan Berger <stefanb@linux.ibm.com>
----
- Documentation/security/keys/trusted-encrypted.rst | 9 ++++-----
- 1 file changed, 4 insertions(+), 5 deletions(-)
-
-diff --git a/Documentation/security/keys/trusted-encrypted.rst b/Documentation/security/keys/trusted-encrypted.rst
-index 9483a7425ad5..442a2775156e 100644
---- a/Documentation/security/keys/trusted-encrypted.rst
-+++ b/Documentation/security/keys/trusted-encrypted.rst
-@@ -39,10 +39,9 @@ With the IBM TSS 2 stack::
-
- Or with the Intel TSS 2 stack::
-
-- #> tpm2_createprimary --hierarchy o -G rsa2048 -o key.ctxt
-+ #> tpm2_createprimary --hierarchy o -G rsa2048 key.ctxt
- [...]
-- handle: 0x800000FF
-- #> tpm2_evictcontrol -c key.ctxt -p 0x81000001
-+ #> tpm2_evictcontrol -c key.ctxt 0x81000001
- persistentHandle: 0x81000001
-
- Usage::
-@@ -115,7 +114,7 @@ append 'keyhandle=0x81000001' to statements between quotes, such as
-
- ::
-
-- $ keyctl add trusted kmk "new 32" @u
-+ $ keyctl add trusted kmk "new 32 keyhandle=0x81000001" @u
- 440502848
-
- $ keyctl show
-@@ -138,7 +137,7 @@ append 'keyhandle=0x81000001' to statements between quotes, such as
-
- Load a trusted key from the saved blob::
-
-- $ keyctl add trusted kmk "load `cat kmk.blob`" @u
-+ $ keyctl add trusted kmk "load `cat kmk.blob` keyhandle=0x81000001" @u
- 268728824
-
- $ keyctl print 268728824
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0017-net-introduce-helper-sendpage_ok-in-include-linux.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0017-net-introduce-helper-sendpage_ok-in-include-linux.patch
deleted file mode 100644
index 62506f9..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0017-net-introduce-helper-sendpage_ok-in-include-linux.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From 323f53faf7c202b647f7a8a2147215fa44129bac Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 15 Aug 2020 13:40:48 +0800
-Subject: [PATCH v2 17/19] net: introduce helper sendpage_ok() in
- include/linux/net.h
-
-The original problem was from nvme-over-tcp code, who mistakenly uses
-kernel_sendpage() to send pages allocated by __get_free_pages() without
-__GFP_COMP flag. Such pages don't have refcount (page_count is 0) on
-tail pages, sending them by kernel_sendpage() may trigger a kernel panic
-from a corrupted kernel heap, because these pages are incorrectly freed
-in network stack as page_count 0 pages.
-
-This patch introduces a helper sendpage_ok(), it returns true if the
-checking page,
-- is not slab page: PageSlab(page) is false.
-- has page refcount: page_count(page) is not zero
-
-All drivers who want to send page to remote end by kernel_sendpage()
-may use this helper to check whether the page is OK. If the helper does
-not return true, the driver should try other non sendpage method (e.g.
-sock_no_sendpage()) to handle the page.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Cc: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
-Cc: Christoph Hellwig <hch@lst.de>
-Cc: Hannes Reinecke <hare@suse.de>
-Cc: Jan Kara <jack@suse.com>
-Cc: Jens Axboe <axboe@kernel.dk>
-Cc: Mikhail Skorzhinskii <mskorzhinskiy@solarflare.com>
-Cc: Philipp Reisner <philipp.reisner@linbit.com>
-Cc: Sagi Grimberg <sagi@grimberg.me>
-Cc: Vlastimil Babka <vbabka@suse.com>
-Cc: stable@vger.kernel.org
----
- include/linux/net.h | 16 ++++++++++++++++
- 1 file changed, 16 insertions(+)
-
-diff --git a/include/linux/net.h b/include/linux/net.h
-index d48ff1180879..a807fad31958 100644
---- a/include/linux/net.h
-+++ b/include/linux/net.h
-@@ -21,6 +21,7 @@
- #include <linux/rcupdate.h>
- #include <linux/once.h>
- #include <linux/fs.h>
-+#include <linux/mm.h>
- #include <linux/sockptr.h>
-
- #include <uapi/linux/net.h>
-@@ -286,6 +287,21 @@ do { \
- #define net_get_random_once_wait(buf, nbytes) \
- get_random_once_wait((buf), (nbytes))
-
-+/*
-+ * E.g. XFS meta- & log-data is in slab pages, or bcache meta
-+ * data pages, or other high order pages allocated by
-+ * __get_free_pages() without __GFP_COMP, which have a page_count
-+ * of 0 and/or have PageSlab() set. We cannot use send_page for
-+ * those, as that does get_page(); put_page(); and would cause
-+ * either a VM_BUG directly, or __page_cache_release a page that
-+ * would actually still be referenced by someone, leading to some
-+ * obscure delayed Oops somewhere else.
-+ */
-+static inline bool sendpage_ok(struct page *page)
-+{
-+ return (!PageSlab(page) && page_count(page) >= 1);
-+}
-+
- int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
- size_t num, size_t len);
- int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0018-nvme-tcp-check-page-by-sendpage_ok-before-calling.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0018-nvme-tcp-check-page-by-sendpage_ok-before-calling.patch
deleted file mode 100644
index d9e0649..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0018-nvme-tcp-check-page-by-sendpage_ok-before-calling.patch
+++ /dev/null
@@ -1,57 +0,0 @@
-From 2a4fcbc0285d0a00e5c963a620dc625046f65002 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 15 Aug 2020 15:32:59 +0800
-Subject: [PATCH v2 18/19] nvme-tcp: check page by sendpage_ok() before calling
- kernel_sendpage()
-
-Currently nvme_tcp_try_send_data() doesn't use kernel_sendpage() to
-send slab pages. But for pages allocated by __get_free_pages() without
-__GFP_COMP, which also have refcount as 0, they are still sent by
-kernel_sendpage() to remote end, this is problematic.
-
-The new introduced helper sendpage_ok() checks both PageSlab tag and
-page_count counter, and returns true if the checking page is OK to be
-sent by kernel_sendpage().
-
-This patch fixes the page checking issue of nvme_tcp_try_send_data()
-with sendpage_ok(). If sendpage_ok() returns true, send this page by
-kernel_sendpage(), otherwise use sock_no_sendpage to handle this page.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Cc: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
-Cc: Christoph Hellwig <hch@lst.de>
-Cc: Hannes Reinecke <hare@suse.de>
-Cc: Jan Kara <jack@suse.com>
-Cc: Jens Axboe <axboe@kernel.dk>
-Cc: Mikhail Skorzhinskii <mskorzhinskiy@solarflare.com>
-Cc: Philipp Reisner <philipp.reisner@linbit.com>
-Cc: Sagi Grimberg <sagi@grimberg.me>
-Cc: Vlastimil Babka <vbabka@suse.com>
-Cc: stable@vger.kernel.org
----
- drivers/nvme/host/tcp.c | 7 +++----
- 1 file changed, 3 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
-index 62fbaecdc960..902fe742762b 100644
---- a/drivers/nvme/host/tcp.c
-+++ b/drivers/nvme/host/tcp.c
-@@ -912,12 +912,11 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
- else
- flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
-
-- /* can't zcopy slab pages */
-- if (unlikely(PageSlab(page))) {
-- ret = sock_no_sendpage(queue->sock, page, offset, len,
-+ if (sendpage_ok(page)) {
-+ ret = kernel_sendpage(queue->sock, page, offset, len,
- flags);
- } else {
-- ret = kernel_sendpage(queue->sock, page, offset, len,
-+ ret = sock_no_sendpage(queue->sock, page, offset, len,
- flags);
- }
- if (ret <= 0)
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0019-drbd-code-cleanup-by-using-sendpage_ok-to-check-p.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0019-drbd-code-cleanup-by-using-sendpage_ok-to-check-p.patch
deleted file mode 100644
index 46ac593..0000000
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0019-drbd-code-cleanup-by-using-sendpage_ok-to-check-p.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From 4809568371583fd9e8b613f1717ef13ae12c3356 Mon Sep 17 00:00:00 2001
-From: Coly Li <colyli@suse.de>
-Date: Sat, 15 Aug 2020 15:37:00 +0800
-Subject: [PATCH v2 19/19] drbd: code cleanup by using sendpage_ok() to check
- page for kernel_sendpage()
-
-In _drbd_send_page() a page is checked by following code before sending
-it by kernel_sendpage(),
- (page_count(page) < 1) || PageSlab(page)
-If the check is true, this page won't be send by kernel_sendpage() and
-handled by sock_no_sendpage().
-
-This kind of check is exactly what macro sendpage_ok() does, which is
-introduced into include/linux/net.h to solve a similar send page issue
-in nvme-tcp code.
-
-This patch uses macro sendpage_ok() to replace the open coded checks to
-page type and refcount in _drbd_send_page(), as a code cleanup.
-
-Signed-off-by: Coly Li <colyli@suse.de>
-Cc: Philipp Reisner <philipp.reisner@linbit.com>
-Cc: Sagi Grimberg <sagi@grimberg.me>
----
- drivers/block/drbd/drbd_main.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
-index cb687ccdbd96..55dc0c91781e 100644
---- a/drivers/block/drbd/drbd_main.c
-+++ b/drivers/block/drbd/drbd_main.c
-@@ -1553,7 +1553,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
- * put_page(); and would cause either a VM_BUG directly, or
- * __page_cache_release a page that would actually still be referenced
- * by someone, leading to some obscure delayed Oops somewhere else. */
-- if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
-+ if (drbd_disable_sendpage || !sendpage_ok(page))
- return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
-
- msg_flags |= MSG_NOSIGNAL;
---
-2.26.2
-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch
new file mode 100644
index 0000000..1228ced
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0001-bcache-remove-int-n-from-parameter-list-of-bch_bu.patch
@@ -0,0 +1,152 @@
+From 9260c7e003b7652c9a8208fa479ff4c5d72a6737 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 00:07:05 +0800
+Subject: [PATCH v2 01/15] bcache: remove 'int n' from parameter list of
+ bch_bucket_alloc_set()
+
+The parameter 'int n' from bch_bucket_alloc_set() is not cleared
+defined. From the code comments n is the number of buckets to alloc, but
+from the code itself 'n' is the maximum cache to iterate. Indeed all the
+locations where bch_bucket_alloc_set() is called, 'n' is alwasy 1.
+
+This patch removes the confused and unnecessary 'int n' from parameter
+list of bch_bucket_alloc_set(), and explicitly allocates only 1 bucket
+for its caller.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/alloc.c | 35 +++++++++++++++--------------------
+ drivers/md/bcache/bcache.h | 4 ++--
+ drivers/md/bcache/btree.c | 2 +-
+ drivers/md/bcache/super.c | 2 +-
+ 4 files changed, 19 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 52035a78d836..4493ff57476d 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -49,7 +49,7 @@
+ *
+ * bch_bucket_alloc() allocates a single bucket from a specific cache.
+ *
+- * bch_bucket_alloc_set() allocates one or more buckets from different caches
++ * bch_bucket_alloc_set() allocates one bucket from different caches
+ * out of a cache set.
+ *
+ * free_some_buckets() drives all the processes described above. It's called
+@@ -488,34 +488,29 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
+ }
+
+ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+- struct bkey *k, int n, bool wait)
++ struct bkey *k, bool wait)
+ {
+- int i;
++ struct cache *ca;
++ long b;
+
+ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return -1;
+
+ lockdep_assert_held(&c->bucket_lock);
+- BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
+
+ bkey_init(k);
+
+- /* sort by free space/prio of oldest data in caches */
+-
+- for (i = 0; i < n; i++) {
+- struct cache *ca = c->cache_by_alloc[i];
+- long b = bch_bucket_alloc(ca, reserve, wait);
++ ca = c->cache_by_alloc[0];
++ b = bch_bucket_alloc(ca, reserve, wait);
++ if (b == -1)
++ goto err;
+
+- if (b == -1)
+- goto err;
++ k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
++ bucket_to_sector(c, b),
++ ca->sb.nr_this_dev);
+
+- k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
+- bucket_to_sector(c, b),
+- ca->sb.nr_this_dev);
+-
+- SET_KEY_PTRS(k, i + 1);
+- }
++ SET_KEY_PTRS(k, 1);
+
+ return 0;
+ err:
+@@ -525,12 +520,12 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ }
+
+ int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+- struct bkey *k, int n, bool wait)
++ struct bkey *k, bool wait)
+ {
+ int ret;
+
+ mutex_lock(&c->bucket_lock);
+- ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
++ ret = __bch_bucket_alloc_set(c, reserve, k, wait);
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+ }
+@@ -638,7 +633,7 @@ bool bch_alloc_sectors(struct cache_set *c,
+
+ spin_unlock(&c->data_bucket_lock);
+
+- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
++ if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
+ return false;
+
+ spin_lock(&c->data_bucket_lock);
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 4fd03d2496d8..5ff6e9573935 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -994,9 +994,9 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k);
+
+ long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
+ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+- struct bkey *k, int n, bool wait);
++ struct bkey *k, bool wait);
+ int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+- struct bkey *k, int n, bool wait);
++ struct bkey *k, bool wait);
+ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
+ unsigned int sectors, unsigned int write_point,
+ unsigned int write_prio, bool wait);
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 3d8bd0692af3..e2a719fed53b 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1091,7 +1091,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+
+ mutex_lock(&c->bucket_lock);
+ retry:
+- if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
++ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
+ goto err;
+
+ bkey_put(c, &k.key);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 1bbdc410ee3c..7057ec48f3d1 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -486,7 +486,7 @@ static int __uuid_write(struct cache_set *c)
+ closure_init_stack(&cl);
+ lockdep_assert_held(&bch_register_lock);
+
+- if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
++ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
+ return 1;
+
+ size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch
new file mode 100644
index 0000000..d0882f9
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0002-bcache-explicitly-make-cache_set-only-have-single.patch
@@ -0,0 +1,128 @@
+From da9ff41f507337ce4797935e8ba9b70da361d59d Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 00:30:59 +0800
+Subject: [PATCH v2 02/15] bcache: explicitly make cache_set only have single
+ cache
+
+Currently although the bcache code has a framework for multiple caches
+in a cache set, but indeed the multiple caches never completed and users
+use md raid1 for multiple copies of the cached data.
+
+This patch does the following change in struct cache_set, to explicitly
+make a cache_set only have single cache,
+- Change pointer array "*cache[MAX_CACHES_PER_SET]" to a single pointer
+ "*cache".
+- Remove pointer array "*cache_by_alloc[MAX_CACHES_PER_SET]".
+- Remove "caches_loaded".
+
+Now the code looks as exactly what it does in practic: only one cache is
+used in the cache set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+---
+ drivers/md/bcache/alloc.c | 2 +-
+ drivers/md/bcache/bcache.h | 8 +++-----
+ drivers/md/bcache/super.c | 19 ++++++++-----------
+ 3 files changed, 12 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 4493ff57476d..3385f6add6df 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -501,7 +501,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+
+ bkey_init(k);
+
+- ca = c->cache_by_alloc[0];
++ ca = c->cache;
+ b = bch_bucket_alloc(ca, reserve, wait);
+ if (b == -1)
+ goto err;
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 5ff6e9573935..aa112c1adba1 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -519,9 +519,7 @@ struct cache_set {
+
+ struct cache_sb sb;
+
+- struct cache *cache[MAX_CACHES_PER_SET];
+- struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
+- int caches_loaded;
++ struct cache *cache;
+
+ struct bcache_device **devices;
+ unsigned int devices_max_used;
+@@ -808,7 +806,7 @@ static inline struct cache *PTR_CACHE(struct cache_set *c,
+ const struct bkey *k,
+ unsigned int ptr)
+ {
+- return c->cache[PTR_DEV(k, ptr)];
++ return c->cache;
+ }
+
+ static inline size_t PTR_BUCKET_NR(struct cache_set *c,
+@@ -890,7 +888,7 @@ do { \
+ /* Looping macros */
+
+ #define for_each_cache(ca, cs, iter) \
+- for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
++ for (iter = 0; ca = cs->cache, iter < 1; iter++)
+
+ #define for_each_bucket(b, ca) \
+ for (b = (ca)->buckets + (ca)->sb.first_bucket; \
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 7057ec48f3d1..e9ccfa17beb8 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1675,7 +1675,7 @@ static void cache_set_free(struct closure *cl)
+ for_each_cache(ca, c, i)
+ if (ca) {
+ ca->set = NULL;
+- c->cache[ca->sb.nr_this_dev] = NULL;
++ c->cache = NULL;
+ kobject_put(&ca->kobj);
+ }
+
+@@ -2166,7 +2166,7 @@ static const char *register_cache_set(struct cache *ca)
+
+ list_for_each_entry(c, &bch_cache_sets, list)
+ if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
+- if (c->cache[ca->sb.nr_this_dev])
++ if (c->cache)
+ return "duplicate cache set member";
+
+ if (!can_attach_cache(ca, c))
+@@ -2216,14 +2216,11 @@ static const char *register_cache_set(struct cache *ca)
+
+ kobject_get(&ca->kobj);
+ ca->set = c;
+- ca->set->cache[ca->sb.nr_this_dev] = ca;
+- c->cache_by_alloc[c->caches_loaded++] = ca;
++ ca->set->cache = ca;
+
+- if (c->caches_loaded == c->sb.nr_in_set) {
+- err = "failed to run cache set";
+- if (run_cache_set(c) < 0)
+- goto err;
+- }
++ err = "failed to run cache set";
++ if (run_cache_set(c) < 0)
++ goto err;
+
+ return NULL;
+ err:
+@@ -2240,8 +2237,8 @@ void bch_cache_release(struct kobject *kobj)
+ unsigned int i;
+
+ if (ca->set) {
+- BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
+- ca->set->cache[ca->sb.nr_this_dev] = NULL;
++ BUG_ON(ca->set->cache != ca);
++ ca->set->cache = NULL;
+ }
+
+ free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0003-bcache-remove-for_each_cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0003-bcache-remove-for_each_cache.patch
new file mode 100644
index 0000000..195c7a4
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0003-bcache-remove-for_each_cache.patch
@@ -0,0 +1,896 @@
+From 50516df3a606a49a170bb14e26ed595aff4c84d0 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 01:26:09 +0800
+Subject: [PATCH v2 03/15] bcache: remove for_each_cache()
+
+Since now each cache_set explicitly has single cache, for_each_cache()
+is unnecessary. This patch removes this macro, and update all locations
+where it is used, and makes sure all code logic still being consistent.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/alloc.c | 17 ++-
+ drivers/md/bcache/bcache.h | 9 +-
+ drivers/md/bcache/btree.c | 103 +++++++---------
+ drivers/md/bcache/journal.c | 229 ++++++++++++++++-------------------
+ drivers/md/bcache/movinggc.c | 58 +++++----
+ drivers/md/bcache/super.c | 115 ++++++++----------
+ 6 files changed, 237 insertions(+), 294 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 3385f6add6df..1b8310992dd0 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -88,7 +88,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
+ struct cache *ca;
+ struct bucket *b;
+ unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
+- unsigned int i;
+ int r;
+
+ atomic_sub(sectors, &c->rescale);
+@@ -104,14 +103,14 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
+
+ c->min_prio = USHRT_MAX;
+
+- for_each_cache(ca, c, i)
+- for_each_bucket(b, ca)
+- if (b->prio &&
+- b->prio != BTREE_PRIO &&
+- !atomic_read(&b->pin)) {
+- b->prio--;
+- c->min_prio = min(c->min_prio, b->prio);
+- }
++ ca = c->cache;
++ for_each_bucket(b, ca)
++ if (b->prio &&
++ b->prio != BTREE_PRIO &&
++ !atomic_read(&b->pin)) {
++ b->prio--;
++ c->min_prio = min(c->min_prio, b->prio);
++ }
+
+ mutex_unlock(&c->bucket_lock);
+ }
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index aa112c1adba1..7ffe6b2d179b 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -887,9 +887,6 @@ do { \
+
+ /* Looping macros */
+
+-#define for_each_cache(ca, cs, iter) \
+- for (iter = 0; ca = cs->cache, iter < 1; iter++)
+-
+ #define for_each_bucket(b, ca) \
+ for (b = (ca)->buckets + (ca)->sb.first_bucket; \
+ b < (ca)->buckets + (ca)->sb.nbuckets; b++)
+@@ -931,11 +928,9 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
+
+ static inline void wake_up_allocators(struct cache_set *c)
+ {
+- struct cache *ca;
+- unsigned int i;
++ struct cache *ca = c->cache;
+
+- for_each_cache(ca, c, i)
+- wake_up_process(ca->alloc_thread);
++ wake_up_process(ca->alloc_thread);
+ }
+
+ static inline void closure_bio_submit(struct cache_set *c,
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index e2a719fed53b..0817ad510d9f 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1167,19 +1167,18 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
+ static int btree_check_reserve(struct btree *b, struct btree_op *op)
+ {
+ struct cache_set *c = b->c;
+- struct cache *ca;
+- unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
++ struct cache *ca = c->cache;
++ unsigned int reserve = (c->root->level - b->level) * 2 + 1;
+
+ mutex_lock(&c->bucket_lock);
+
+- for_each_cache(ca, c, i)
+- if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
+- if (op)
+- prepare_to_wait(&c->btree_cache_wait, &op->wait,
+- TASK_UNINTERRUPTIBLE);
+- mutex_unlock(&c->bucket_lock);
+- return -EINTR;
+- }
++ if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
++ if (op)
++ prepare_to_wait(&c->btree_cache_wait, &op->wait,
++ TASK_UNINTERRUPTIBLE);
++ mutex_unlock(&c->bucket_lock);
++ return -EINTR;
++ }
+
+ mutex_unlock(&c->bucket_lock);
+
+@@ -1695,7 +1694,6 @@ static void btree_gc_start(struct cache_set *c)
+ {
+ struct cache *ca;
+ struct bucket *b;
+- unsigned int i;
+
+ if (!c->gc_mark_valid)
+ return;
+@@ -1705,14 +1703,14 @@ static void btree_gc_start(struct cache_set *c)
+ c->gc_mark_valid = 0;
+ c->gc_done = ZERO_KEY;
+
+- for_each_cache(ca, c, i)
+- for_each_bucket(b, ca) {
+- b->last_gc = b->gen;
+- if (!atomic_read(&b->pin)) {
+- SET_GC_MARK(b, 0);
+- SET_GC_SECTORS_USED(b, 0);
+- }
++ ca = c->cache;
++ for_each_bucket(b, ca) {
++ b->last_gc = b->gen;
++ if (!atomic_read(&b->pin)) {
++ SET_GC_MARK(b, 0);
++ SET_GC_SECTORS_USED(b, 0);
+ }
++ }
+
+ mutex_unlock(&c->bucket_lock);
+ }
+@@ -1721,7 +1719,8 @@ static void bch_btree_gc_finish(struct cache_set *c)
+ {
+ struct bucket *b;
+ struct cache *ca;
+- unsigned int i;
++ unsigned int i, j;
++ uint64_t *k;
+
+ mutex_lock(&c->bucket_lock);
+
+@@ -1739,7 +1738,6 @@ static void bch_btree_gc_finish(struct cache_set *c)
+ struct bcache_device *d = c->devices[i];
+ struct cached_dev *dc;
+ struct keybuf_key *w, *n;
+- unsigned int j;
+
+ if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+@@ -1756,29 +1754,27 @@ static void bch_btree_gc_finish(struct cache_set *c)
+ rcu_read_unlock();
+
+ c->avail_nbuckets = 0;
+- for_each_cache(ca, c, i) {
+- uint64_t *i;
+
+- ca->invalidate_needs_gc = 0;
++ ca = c->cache;
++ ca->invalidate_needs_gc = 0;
+
+- for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
+- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
++ for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
++ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
+
+- for (i = ca->prio_buckets;
+- i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
+- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
++ for (k = ca->prio_buckets;
++ k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
++ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
+
+- for_each_bucket(b, ca) {
+- c->need_gc = max(c->need_gc, bucket_gc_gen(b));
++ for_each_bucket(b, ca) {
++ c->need_gc = max(c->need_gc, bucket_gc_gen(b));
+
+- if (atomic_read(&b->pin))
+- continue;
++ if (atomic_read(&b->pin))
++ continue;
+
+- BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
++ BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
+
+- if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
+- c->avail_nbuckets++;
+- }
++ if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
++ c->avail_nbuckets++;
+ }
+
+ mutex_unlock(&c->bucket_lock);
+@@ -1830,12 +1826,10 @@ static void bch_btree_gc(struct cache_set *c)
+
+ static bool gc_should_run(struct cache_set *c)
+ {
+- struct cache *ca;
+- unsigned int i;
++ struct cache *ca = c->cache;
+
+- for_each_cache(ca, c, i)
+- if (ca->invalidate_needs_gc)
+- return true;
++ if (ca->invalidate_needs_gc)
++ return true;
+
+ if (atomic_read(&c->sectors_to_gc) < 0)
+ return true;
+@@ -2081,9 +2075,8 @@ int bch_btree_check(struct cache_set *c)
+
+ void bch_initial_gc_finish(struct cache_set *c)
+ {
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ struct bucket *b;
+- unsigned int i;
+
+ bch_btree_gc_finish(c);
+
+@@ -2098,20 +2091,18 @@ void bch_initial_gc_finish(struct cache_set *c)
+ * This is only safe for buckets that have no live data in them, which
+ * there should always be some of.
+ */
+- for_each_cache(ca, c, i) {
+- for_each_bucket(b, ca) {
+- if (fifo_full(&ca->free[RESERVE_PRIO]) &&
+- fifo_full(&ca->free[RESERVE_BTREE]))
+- break;
++ for_each_bucket(b, ca) {
++ if (fifo_full(&ca->free[RESERVE_PRIO]) &&
++ fifo_full(&ca->free[RESERVE_BTREE]))
++ break;
+
+- if (bch_can_invalidate_bucket(ca, b) &&
+- !GC_MARK(b)) {
+- __bch_invalidate_one_bucket(ca, b);
+- if (!fifo_push(&ca->free[RESERVE_PRIO],
+- b - ca->buckets))
+- fifo_push(&ca->free[RESERVE_BTREE],
+- b - ca->buckets);
+- }
++ if (bch_can_invalidate_bucket(ca, b) &&
++ !GC_MARK(b)) {
++ __bch_invalidate_one_bucket(ca, b);
++ if (!fifo_push(&ca->free[RESERVE_PRIO],
++ b - ca->buckets))
++ fifo_push(&ca->free[RESERVE_BTREE],
++ b - ca->buckets);
+ }
+ }
+
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 77fbfd52edcf..027d0f8c4daf 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -179,112 +179,109 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
+ ret; \
+ })
+
+- struct cache *ca;
+- unsigned int iter;
++ struct cache *ca = c->cache;
+ int ret = 0;
++ struct journal_device *ja = &ca->journal;
++ DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
++ unsigned int i, l, r, m;
++ uint64_t seq;
+
+- for_each_cache(ca, c, iter) {
+- struct journal_device *ja = &ca->journal;
+- DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
+- unsigned int i, l, r, m;
+- uint64_t seq;
+-
+- bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
+- pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
++ bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
++ pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
+
++ /*
++ * Read journal buckets ordered by golden ratio hash to quickly
++ * find a sequence of buckets with valid journal entries
++ */
++ for (i = 0; i < ca->sb.njournal_buckets; i++) {
+ /*
+- * Read journal buckets ordered by golden ratio hash to quickly
+- * find a sequence of buckets with valid journal entries
++ * We must try the index l with ZERO first for
++ * correctness due to the scenario that the journal
++ * bucket is circular buffer which might have wrapped
+ */
+- for (i = 0; i < ca->sb.njournal_buckets; i++) {
+- /*
+- * We must try the index l with ZERO first for
+- * correctness due to the scenario that the journal
+- * bucket is circular buffer which might have wrapped
+- */
+- l = (i * 2654435769U) % ca->sb.njournal_buckets;
++ l = (i * 2654435769U) % ca->sb.njournal_buckets;
+
+- if (test_bit(l, bitmap))
+- break;
++ if (test_bit(l, bitmap))
++ break;
+
+- if (read_bucket(l))
+- goto bsearch;
+- }
++ if (read_bucket(l))
++ goto bsearch;
++ }
+
+- /*
+- * If that fails, check all the buckets we haven't checked
+- * already
+- */
+- pr_debug("falling back to linear search\n");
++ /*
++ * If that fails, check all the buckets we haven't checked
++ * already
++ */
++ pr_debug("falling back to linear search\n");
+
+- for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
+- if (read_bucket(l))
+- goto bsearch;
++ for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
++ if (read_bucket(l))
++ goto bsearch;
+
+- /* no journal entries on this device? */
+- if (l == ca->sb.njournal_buckets)
+- continue;
++ /* no journal entries on this device? */
++ if (l == ca->sb.njournal_buckets)
++ goto out;
+ bsearch:
+- BUG_ON(list_empty(list));
++ BUG_ON(list_empty(list));
+
+- /* Binary search */
+- m = l;
+- r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
+- pr_debug("starting binary search, l %u r %u\n", l, r);
++ /* Binary search */
++ m = l;
++ r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
++ pr_debug("starting binary search, l %u r %u\n", l, r);
+
+- while (l + 1 < r) {
+- seq = list_entry(list->prev, struct journal_replay,
+- list)->j.seq;
++ while (l + 1 < r) {
++ seq = list_entry(list->prev, struct journal_replay,
++ list)->j.seq;
+
+- m = (l + r) >> 1;
+- read_bucket(m);
++ m = (l + r) >> 1;
++ read_bucket(m);
+
+- if (seq != list_entry(list->prev, struct journal_replay,
+- list)->j.seq)
+- l = m;
+- else
+- r = m;
+- }
++ if (seq != list_entry(list->prev, struct journal_replay,
++ list)->j.seq)
++ l = m;
++ else
++ r = m;
++ }
+
+- /*
+- * Read buckets in reverse order until we stop finding more
+- * journal entries
+- */
+- pr_debug("finishing up: m %u njournal_buckets %u\n",
+- m, ca->sb.njournal_buckets);
+- l = m;
++ /*
++ * Read buckets in reverse order until we stop finding more
++ * journal entries
++ */
++ pr_debug("finishing up: m %u njournal_buckets %u\n",
++ m, ca->sb.njournal_buckets);
++ l = m;
+
+- while (1) {
+- if (!l--)
+- l = ca->sb.njournal_buckets - 1;
++ while (1) {
++ if (!l--)
++ l = ca->sb.njournal_buckets - 1;
+
+- if (l == m)
+- break;
++ if (l == m)
++ break;
+
+- if (test_bit(l, bitmap))
+- continue;
++ if (test_bit(l, bitmap))
++ continue;
+
+- if (!read_bucket(l))
+- break;
+- }
++ if (!read_bucket(l))
++ break;
++ }
+
+- seq = 0;
++ seq = 0;
+
+- for (i = 0; i < ca->sb.njournal_buckets; i++)
+- if (ja->seq[i] > seq) {
+- seq = ja->seq[i];
+- /*
+- * When journal_reclaim() goes to allocate for
+- * the first time, it'll use the bucket after
+- * ja->cur_idx
+- */
+- ja->cur_idx = i;
+- ja->last_idx = ja->discard_idx = (i + 1) %
+- ca->sb.njournal_buckets;
++ for (i = 0; i < ca->sb.njournal_buckets; i++)
++ if (ja->seq[i] > seq) {
++ seq = ja->seq[i];
++ /*
++ * When journal_reclaim() goes to allocate for
++ * the first time, it'll use the bucket after
++ * ja->cur_idx
++ */
++ ja->cur_idx = i;
++ ja->last_idx = ja->discard_idx = (i + 1) %
++ ca->sb.njournal_buckets;
+
+- }
+- }
++ }
+
++out:
+ if (!list_empty(list))
+ c->journal.seq = list_entry(list->prev,
+ struct journal_replay,
+@@ -342,12 +339,10 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
+
+ static bool is_discard_enabled(struct cache_set *s)
+ {
+- struct cache *ca;
+- unsigned int i;
++ struct cache *ca = s->cache;
+
+- for_each_cache(ca, s, i)
+- if (ca->discard)
+- return true;
++ if (ca->discard)
++ return true;
+
+ return false;
+ }
+@@ -633,9 +628,10 @@ static void do_journal_discard(struct cache *ca)
+ static void journal_reclaim(struct cache_set *c)
+ {
+ struct bkey *k = &c->journal.key;
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ uint64_t last_seq;
+- unsigned int iter, n = 0;
++ unsigned int next;
++ struct journal_device *ja = &ca->journal;
+ atomic_t p __maybe_unused;
+
+ atomic_long_inc(&c->reclaim);
+@@ -647,46 +643,31 @@ static void journal_reclaim(struct cache_set *c)
+
+ /* Update last_idx */
+
+- for_each_cache(ca, c, iter) {
+- struct journal_device *ja = &ca->journal;
+-
+- while (ja->last_idx != ja->cur_idx &&
+- ja->seq[ja->last_idx] < last_seq)
+- ja->last_idx = (ja->last_idx + 1) %
+- ca->sb.njournal_buckets;
+- }
++ while (ja->last_idx != ja->cur_idx &&
++ ja->seq[ja->last_idx] < last_seq)
++ ja->last_idx = (ja->last_idx + 1) %
++ ca->sb.njournal_buckets;
+
+- for_each_cache(ca, c, iter)
+- do_journal_discard(ca);
++ do_journal_discard(ca);
+
+ if (c->journal.blocks_free)
+ goto out;
+
+- /*
+- * Allocate:
+- * XXX: Sort by free journal space
+- */
+-
+- for_each_cache(ca, c, iter) {
+- struct journal_device *ja = &ca->journal;
+- unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
++ next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
++ /* No space available on this device */
++ if (next == ja->discard_idx)
++ goto out;
+
+- /* No space available on this device */
+- if (next == ja->discard_idx)
+- continue;
++ ja->cur_idx = next;
++ k->ptr[0] = MAKE_PTR(0,
++ bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
++ ca->sb.nr_this_dev);
++ atomic_long_inc(&c->reclaimed_journal_buckets);
+
+- ja->cur_idx = next;
+- k->ptr[n++] = MAKE_PTR(0,
+- bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
+- ca->sb.nr_this_dev);
+- atomic_long_inc(&c->reclaimed_journal_buckets);
+- }
++ bkey_init(k);
++ SET_KEY_PTRS(k, 1);
++ c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+
+- if (n) {
+- bkey_init(k);
+- SET_KEY_PTRS(k, n);
+- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+- }
+ out:
+ if (!journal_full(&c->journal))
+ __closure_wake_up(&c->journal.wait);
+@@ -750,7 +731,7 @@ static void journal_write_unlocked(struct closure *cl)
+ __releases(c->journal.lock)
+ {
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ struct journal_write *w = c->journal.cur;
+ struct bkey *k = &c->journal.key;
+ unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
+@@ -780,9 +761,7 @@ static void journal_write_unlocked(struct closure *cl)
+ bkey_copy(&w->data->btree_root, &c->root->key);
+ bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
+
+- for_each_cache(ca, c, i)
+- w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
+-
++ w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
+ w->data->magic = jset_magic(&c->sb);
+ w->data->version = BCACHE_JSET_VERSION;
+ w->data->last_seq = last_seq(&c->journal);
+diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
+index 5872d6470470..b9c3d27ec093 100644
+--- a/drivers/md/bcache/movinggc.c
++++ b/drivers/md/bcache/movinggc.c
+@@ -196,50 +196,48 @@ static unsigned int bucket_heap_top(struct cache *ca)
+
+ void bch_moving_gc(struct cache_set *c)
+ {
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ struct bucket *b;
+- unsigned int i;
++ unsigned long sectors_to_move, reserve_sectors;
+
+ if (!c->copy_gc_enabled)
+ return;
+
+ mutex_lock(&c->bucket_lock);
+
+- for_each_cache(ca, c, i) {
+- unsigned long sectors_to_move = 0;
+- unsigned long reserve_sectors = ca->sb.bucket_size *
++ sectors_to_move = 0;
++ reserve_sectors = ca->sb.bucket_size *
+ fifo_used(&ca->free[RESERVE_MOVINGGC]);
+
+- ca->heap.used = 0;
+-
+- for_each_bucket(b, ca) {
+- if (GC_MARK(b) == GC_MARK_METADATA ||
+- !GC_SECTORS_USED(b) ||
+- GC_SECTORS_USED(b) == ca->sb.bucket_size ||
+- atomic_read(&b->pin))
+- continue;
+-
+- if (!heap_full(&ca->heap)) {
+- sectors_to_move += GC_SECTORS_USED(b);
+- heap_add(&ca->heap, b, bucket_cmp);
+- } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
+- sectors_to_move -= bucket_heap_top(ca);
+- sectors_to_move += GC_SECTORS_USED(b);
+-
+- ca->heap.data[0] = b;
+- heap_sift(&ca->heap, 0, bucket_cmp);
+- }
+- }
++ ca->heap.used = 0;
++
++ for_each_bucket(b, ca) {
++ if (GC_MARK(b) == GC_MARK_METADATA ||
++ !GC_SECTORS_USED(b) ||
++ GC_SECTORS_USED(b) == ca->sb.bucket_size ||
++ atomic_read(&b->pin))
++ continue;
+
+- while (sectors_to_move > reserve_sectors) {
+- heap_pop(&ca->heap, b, bucket_cmp);
+- sectors_to_move -= GC_SECTORS_USED(b);
++ if (!heap_full(&ca->heap)) {
++ sectors_to_move += GC_SECTORS_USED(b);
++ heap_add(&ca->heap, b, bucket_cmp);
++ } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
++ sectors_to_move -= bucket_heap_top(ca);
++ sectors_to_move += GC_SECTORS_USED(b);
++
++ ca->heap.data[0] = b;
++ heap_sift(&ca->heap, 0, bucket_cmp);
+ }
++ }
+
+- while (heap_pop(&ca->heap, b, bucket_cmp))
+- SET_GC_MOVE(b, 1);
++ while (sectors_to_move > reserve_sectors) {
++ heap_pop(&ca->heap, b, bucket_cmp);
++ sectors_to_move -= GC_SECTORS_USED(b);
+ }
+
++ while (heap_pop(&ca->heap, b, bucket_cmp))
++ SET_GC_MOVE(b, 1);
++
+ mutex_unlock(&c->bucket_lock);
+
+ c->moving_gc_keys.last_scanned = ZERO_KEY;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index e9ccfa17beb8..91883d5c4b62 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -343,8 +343,9 @@ static void bcache_write_super_unlock(struct closure *cl)
+ void bcache_write_super(struct cache_set *c)
+ {
+ struct closure *cl = &c->sb_write;
+- struct cache *ca;
+- unsigned int i, version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
++ struct cache *ca = c->cache;
++ struct bio *bio = &ca->sb_bio;
++ unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
+
+ down(&c->sb_write_mutex);
+ closure_init(cl, &c->cl);
+@@ -354,23 +355,19 @@ void bcache_write_super(struct cache_set *c)
+ if (c->sb.version > version)
+ version = c->sb.version;
+
+- for_each_cache(ca, c, i) {
+- struct bio *bio = &ca->sb_bio;
+-
+- ca->sb.version = version;
+- ca->sb.seq = c->sb.seq;
+- ca->sb.last_mount = c->sb.last_mount;
++ ca->sb.version = version;
++ ca->sb.seq = c->sb.seq;
++ ca->sb.last_mount = c->sb.last_mount;
+
+- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
++ SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
+
+- bio_init(bio, ca->sb_bv, 1);
+- bio_set_dev(bio, ca->bdev);
+- bio->bi_end_io = write_super_endio;
+- bio->bi_private = ca;
++ bio_init(bio, ca->sb_bv, 1);
++ bio_set_dev(bio, ca->bdev);
++ bio->bi_end_io = write_super_endio;
++ bio->bi_private = ca;
+
+- closure_get(cl);
+- __write_super(&ca->sb, ca->sb_disk, bio);
+- }
++ closure_get(cl);
++ __write_super(&ca->sb, ca->sb_disk, bio);
+
+ closure_return_with_destructor(cl, bcache_write_super_unlock);
+ }
+@@ -772,26 +769,22 @@ static void bcache_device_unlink(struct bcache_device *d)
+ lockdep_assert_held(&bch_register_lock);
+
+ if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
+- unsigned int i;
+- struct cache *ca;
++ struct cache *ca = d->c->cache;
+
+ sysfs_remove_link(&d->c->kobj, d->name);
+ sysfs_remove_link(&d->kobj, "cache");
+
+- for_each_cache(ca, d->c, i)
+- bd_unlink_disk_holder(ca->bdev, d->disk);
++ bd_unlink_disk_holder(ca->bdev, d->disk);
+ }
+ }
+
+ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
+ const char *name)
+ {
+- unsigned int i;
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ int ret;
+
+- for_each_cache(ca, d->c, i)
+- bd_link_disk_holder(ca->bdev, d->disk);
++ bd_link_disk_holder(ca->bdev, d->disk);
+
+ snprintf(d->name, BCACHEDEVNAME_SIZE,
+ "%s%u", name, d->id);
+@@ -1663,7 +1656,6 @@ static void cache_set_free(struct closure *cl)
+ {
+ struct cache_set *c = container_of(cl, struct cache_set, cl);
+ struct cache *ca;
+- unsigned int i;
+
+ debugfs_remove(c->debug);
+
+@@ -1672,12 +1664,12 @@ static void cache_set_free(struct closure *cl)
+ bch_journal_free(c);
+
+ mutex_lock(&bch_register_lock);
+- for_each_cache(ca, c, i)
+- if (ca) {
+- ca->set = NULL;
+- c->cache = NULL;
+- kobject_put(&ca->kobj);
+- }
++ ca = c->cache;
++ if (ca) {
++ ca->set = NULL;
++ c->cache = NULL;
++ kobject_put(&ca->kobj);
++ }
+
+ bch_bset_sort_state_free(&c->sort);
+ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
+@@ -1703,9 +1695,8 @@ static void cache_set_free(struct closure *cl)
+ static void cache_set_flush(struct closure *cl)
+ {
+ struct cache_set *c = container_of(cl, struct cache_set, caching);
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ struct btree *b;
+- unsigned int i;
+
+ bch_cache_accounting_destroy(&c->accounting);
+
+@@ -1730,9 +1721,8 @@ static void cache_set_flush(struct closure *cl)
+ mutex_unlock(&b->write_lock);
+ }
+
+- for_each_cache(ca, c, i)
+- if (ca->alloc_thread)
+- kthread_stop(ca->alloc_thread);
++ if (ca->alloc_thread)
++ kthread_stop(ca->alloc_thread);
+
+ if (c->journal.cur) {
+ cancel_delayed_work_sync(&c->journal.work);
+@@ -1973,16 +1963,14 @@ static int run_cache_set(struct cache_set *c)
+ {
+ const char *err = "cannot allocate memory";
+ struct cached_dev *dc, *t;
+- struct cache *ca;
++ struct cache *ca = c->cache;
+ struct closure cl;
+- unsigned int i;
+ LIST_HEAD(journal);
+ struct journal_replay *l;
+
+ closure_init_stack(&cl);
+
+- for_each_cache(ca, c, i)
+- c->nbuckets += ca->sb.nbuckets;
++ c->nbuckets = ca->sb.nbuckets;
+ set_gc_sectors(c);
+
+ if (CACHE_SYNC(&c->sb)) {
+@@ -2002,10 +1990,8 @@ static int run_cache_set(struct cache_set *c)
+ j = &list_entry(journal.prev, struct journal_replay, list)->j;
+
+ err = "IO error reading priorities";
+- for_each_cache(ca, c, i) {
+- if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
+- goto err;
+- }
++ if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
++ goto err;
+
+ /*
+ * If prio_read() fails it'll call cache_set_error and we'll
+@@ -2049,9 +2035,8 @@ static int run_cache_set(struct cache_set *c)
+ bch_journal_next(&c->journal);
+
+ err = "error starting allocator thread";
+- for_each_cache(ca, c, i)
+- if (bch_cache_allocator_start(ca))
+- goto err;
++ if (bch_cache_allocator_start(ca))
++ goto err;
+
+ /*
+ * First place it's safe to allocate: btree_check() and
+@@ -2070,28 +2055,23 @@ static int run_cache_set(struct cache_set *c)
+ if (bch_journal_replay(c, &journal))
+ goto err;
+ } else {
+- pr_notice("invalidating existing data\n");
+-
+- for_each_cache(ca, c, i) {
+- unsigned int j;
++ unsigned int j;
+
+- ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
+- 2, SB_JOURNAL_BUCKETS);
++ pr_notice("invalidating existing data\n");
++ ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
++ 2, SB_JOURNAL_BUCKETS);
+
+- for (j = 0; j < ca->sb.keys; j++)
+- ca->sb.d[j] = ca->sb.first_bucket + j;
+- }
++ for (j = 0; j < ca->sb.keys; j++)
++ ca->sb.d[j] = ca->sb.first_bucket + j;
+
+ bch_initial_gc_finish(c);
+
+ err = "error starting allocator thread";
+- for_each_cache(ca, c, i)
+- if (bch_cache_allocator_start(ca))
+- goto err;
++ if (bch_cache_allocator_start(ca))
++ goto err;
+
+ mutex_lock(&c->bucket_lock);
+- for_each_cache(ca, c, i)
+- bch_prio_write(ca, true);
++ bch_prio_write(ca, true);
+ mutex_unlock(&c->bucket_lock);
+
+ err = "cannot allocate new UUID bucket";
+@@ -2467,13 +2447,14 @@ static bool bch_is_open_backing(struct block_device *bdev)
+ static bool bch_is_open_cache(struct block_device *bdev)
+ {
+ struct cache_set *c, *tc;
+- struct cache *ca;
+- unsigned int i;
+
+- list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+- for_each_cache(ca, c, i)
+- if (ca->bdev == bdev)
+- return true;
++ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
++ struct cache *ca = c->cache;
++
++ if (ca->bdev == bdev)
++ return true;
++ }
++
+ return false;
+ }
+
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch
new file mode 100644
index 0000000..61e18a8
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0004-bcache-add-set_uuid-in-struct-cache_set.patch
@@ -0,0 +1,173 @@
+From 5f709f50fb5302b446ab136dd4673a68051b9299 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 20:12:07 +0800
+Subject: [PATCH v2 04/15] bcache: add set_uuid in struct cache_set
+
+This patch adds a separated set_uuid[16] in struct cache_set, to store
+the uuid of the cache set. This is the preparation to remove the
+embedded struct cache_sb from struct cache_set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/bcache.h | 1 +
+ drivers/md/bcache/debug.c | 2 +-
+ drivers/md/bcache/super.c | 24 ++++++++++++------------
+ include/trace/events/bcache.h | 4 ++--
+ 4 files changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 7ffe6b2d179b..94a62acac4fc 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -668,6 +668,7 @@ struct cache_set {
+ struct mutex verify_lock;
+ #endif
+
++ uint8_t set_uuid[16];
+ unsigned int nr_uuids;
+ struct uuid_entry *uuids;
+ BKEY_PADDED(uuid_bucket);
+diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
+index 336f43910383..0ccc1b0baa42 100644
+--- a/drivers/md/bcache/debug.c
++++ b/drivers/md/bcache/debug.c
+@@ -238,7 +238,7 @@ void bch_debug_init_cache_set(struct cache_set *c)
+ if (!IS_ERR_OR_NULL(bcache_debug)) {
+ char name[50];
+
+- snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
++ snprintf(name, 50, "bcache-%pU", c->set_uuid);
+ c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
+ &cache_set_debug_ops);
+ }
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 91883d5c4b62..90a419ad6445 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1189,8 +1189,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ struct cached_dev *exist_dc, *t;
+ int ret = 0;
+
+- if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
+- (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
++ if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) ||
++ (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16)))
+ return -ENOENT;
+
+ if (dc->disk.c) {
+@@ -1262,7 +1262,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ u->first_reg = u->last_reg = rtime;
+ bch_uuid_write(c);
+
+- memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
++ memcpy(dc->sb.set_uuid, c->set_uuid, 16);
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+
+ bch_write_bdev_super(dc, &cl);
+@@ -1324,7 +1324,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ pr_info("Caching %s as %s on set %pU\n",
+ dc->backing_dev_name,
+ dc->disk.disk->disk_name,
+- dc->disk.c->sb.set_uuid);
++ dc->disk.c->set_uuid);
+ return 0;
+ }
+
+@@ -1632,7 +1632,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
+ vaf.va = &args;
+
+ pr_err("error on %pU: %pV, disabling caching\n",
+- c->sb.set_uuid, &vaf);
++ c->set_uuid, &vaf);
+
+ va_end(args);
+
+@@ -1685,7 +1685,7 @@ static void cache_set_free(struct closure *cl)
+ list_del(&c->list);
+ mutex_unlock(&bch_register_lock);
+
+- pr_info("Cache set %pU unregistered\n", c->sb.set_uuid);
++ pr_info("Cache set %pU unregistered\n", c->set_uuid);
+ wake_up(&unregister_wait);
+
+ closure_debug_destroy(&c->cl);
+@@ -1755,7 +1755,7 @@ static void conditional_stop_bcache_device(struct cache_set *c,
+ {
+ if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
+ pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
+- d->disk->disk_name, c->sb.set_uuid);
++ d->disk->disk_name, c->set_uuid);
+ bcache_device_stop(d);
+ } else if (atomic_read(&dc->has_dirty)) {
+ /*
+@@ -1862,7 +1862,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+
+ bch_cache_accounting_init(&c->accounting, &c->cl);
+
+- memcpy(c->sb.set_uuid, sb->set_uuid, 16);
++ memcpy(c->set_uuid, sb->set_uuid, 16);
+ c->sb.block_size = sb->block_size;
+ c->sb.bucket_size = sb->bucket_size;
+ c->sb.nr_in_set = sb->nr_in_set;
+@@ -2145,7 +2145,7 @@ static const char *register_cache_set(struct cache *ca)
+ struct cache_set *c;
+
+ list_for_each_entry(c, &bch_cache_sets, list)
+- if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
++ if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
+ if (c->cache)
+ return "duplicate cache set member";
+
+@@ -2163,7 +2163,7 @@ static const char *register_cache_set(struct cache *ca)
+ return err;
+
+ err = "error creating kobject";
+- if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
++ if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) ||
+ kobject_add(&c->internal, &c->kobj, "internal"))
+ goto err;
+
+@@ -2188,7 +2188,7 @@ static const char *register_cache_set(struct cache *ca)
+ */
+ if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
+ c->sb.version = ca->sb.version;
+- memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
++ memcpy(c->set_uuid, ca->sb.set_uuid, 16);
+ c->sb.flags = ca->sb.flags;
+ c->sb.seq = ca->sb.seq;
+ pr_debug("set version = %llu\n", c->sb.version);
+@@ -2698,7 +2698,7 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
+ list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
+ char *pdev_set_uuid = pdev->dc->sb.set_uuid;
+- char *set_uuid = c->sb.uuid;
++ char *set_uuid = c->set_uuid;
+
+ if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
+ list_del(&pdev->list);
+diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
+index 0bddea663b3b..e41c611d6d3b 100644
+--- a/include/trace/events/bcache.h
++++ b/include/trace/events/bcache.h
+@@ -164,7 +164,7 @@ TRACE_EVENT(bcache_write,
+ ),
+
+ TP_fast_assign(
+- memcpy(__entry->uuid, c->sb.set_uuid, 16);
++ memcpy(__entry->uuid, c->set_uuid, 16);
+ __entry->inode = inode;
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->nr_sector = bio->bi_iter.bi_size >> 9;
+@@ -200,7 +200,7 @@ DECLARE_EVENT_CLASS(cache_set,
+ ),
+
+ TP_fast_assign(
+- memcpy(__entry->uuid, c->sb.set_uuid, 16);
++ memcpy(__entry->uuid, c->set_uuid, 16);
+ ),
+
+ TP_printk("%pU", __entry->uuid)
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch
new file mode 100644
index 0000000..1fb42d9
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0005-bcache-only-use-block_bytes-on-struct-cache.patch
@@ -0,0 +1,258 @@
+From 178fa57c56550568bf0d4140d8dc689cc6c11682 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 21:25:58 +0800
+Subject: [PATCH v2 05/15] bcache: only use block_bytes() on struct cache
+
+Because struct cache_set and struct cache both have struct cache_sb,
+therefore macro block_bytes() can be used on both of them. When removing
+the embedded struct cache_sb from struct cache_set, this macro won't be
+used on struct cache_set anymore.
+
+This patch unifies all block_bytes() usage only on struct cache, this is
+one of the preparation to remove the embedded struct cache_sb from
+struct cache_set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/bcache.h | 2 +-
+ drivers/md/bcache/btree.c | 24 ++++++++++++------------
+ drivers/md/bcache/debug.c | 8 ++++----
+ drivers/md/bcache/journal.c | 8 ++++----
+ drivers/md/bcache/request.c | 2 +-
+ drivers/md/bcache/super.c | 2 +-
+ drivers/md/bcache/sysfs.c | 2 +-
+ 7 files changed, 24 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 94a62acac4fc..29bec61cafbb 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -759,7 +759,7 @@ struct bbio {
+
+ #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
+ #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
+-#define block_bytes(c) ((c)->sb.block_size << 9)
++#define block_bytes(ca) ((ca)->sb.block_size << 9)
+
+ static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
+ {
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 0817ad510d9f..c91b4d58a5b3 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -104,7 +104,7 @@
+
+ static inline struct bset *write_block(struct btree *b)
+ {
+- return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
++ return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
+ }
+
+ static void bch_btree_init_next(struct btree *b)
+@@ -173,7 +173,7 @@ void bch_btree_node_read_done(struct btree *b)
+ goto err;
+
+ err = "bad btree header";
+- if (b->written + set_blocks(i, block_bytes(b->c)) >
++ if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
+ btree_blocks(b))
+ goto err;
+
+@@ -199,13 +199,13 @@ void bch_btree_node_read_done(struct btree *b)
+
+ bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
+
+- b->written += set_blocks(i, block_bytes(b->c));
++ b->written += set_blocks(i, block_bytes(b->c->cache));
+ }
+
+ err = "corrupted btree";
+ for (i = write_block(b);
+ bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
+- i = ((void *) i) + block_bytes(b->c))
++ i = ((void *) i) + block_bytes(b->c->cache))
+ if (i->seq == b->keys.set[0].data->seq)
+ goto err;
+
+@@ -347,7 +347,7 @@ static void do_btree_node_write(struct btree *b)
+
+ b->bio->bi_end_io = btree_node_write_endio;
+ b->bio->bi_private = cl;
+- b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
++ b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
+ b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
+ bch_bio_map(b->bio, i);
+
+@@ -423,10 +423,10 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
+
+ do_btree_node_write(b);
+
+- atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
++ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
+ &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
+
+- b->written += set_blocks(i, block_bytes(b->c));
++ b->written += set_blocks(i, block_bytes(b->c->cache));
+ }
+
+ void bch_btree_node_write(struct btree *b, struct closure *parent)
+@@ -1344,7 +1344,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+
+ if (nodes < 2 ||
+ __set_blocks(b->keys.set[0].data, keys,
+- block_bytes(b->c)) > blocks * (nodes - 1))
++ block_bytes(b->c->cache)) > blocks * (nodes - 1))
+ return 0;
+
+ for (i = 0; i < nodes; i++) {
+@@ -1378,7 +1378,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ k = bkey_next(k)) {
+ if (__set_blocks(n1, n1->keys + keys +
+ bkey_u64s(k),
+- block_bytes(b->c)) > blocks)
++ block_bytes(b->c->cache)) > blocks)
+ break;
+
+ last = k;
+@@ -1394,7 +1394,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ * though)
+ */
+ if (__set_blocks(n1, n1->keys + n2->keys,
+- block_bytes(b->c)) >
++ block_bytes(b->c->cache)) >
+ btree_blocks(new_nodes[i]))
+ goto out_unlock_nocoalesce;
+
+@@ -1403,7 +1403,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ last = &r->b->key;
+ }
+
+- BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
++ BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
+ btree_blocks(new_nodes[i]));
+
+ if (last)
+@@ -2210,7 +2210,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
+ goto err;
+
+ split = set_blocks(btree_bset_first(n1),
+- block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
++ block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
+
+ if (split) {
+ unsigned int keys = 0;
+diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
+index 0ccc1b0baa42..b00fd08d696b 100644
+--- a/drivers/md/bcache/debug.c
++++ b/drivers/md/bcache/debug.c
+@@ -25,8 +25,8 @@ struct dentry *bcache_debug;
+ for (i = (start); \
+ (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
+ i->seq == (start)->seq; \
+- i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
+- block_bytes(b->c))
++ i = (void *) i + set_blocks(i, block_bytes(b->c->cache)) * \
++ block_bytes(b->c->cache))
+
+ void bch_btree_verify(struct btree *b)
+ {
+@@ -82,14 +82,14 @@ void bch_btree_verify(struct btree *b)
+
+ for_each_written_bset(b, ondisk, i) {
+ unsigned int block = ((void *) i - (void *) ondisk) /
+- block_bytes(b->c);
++ block_bytes(b->c->cache);
+
+ pr_err("*** on disk block %u:\n", block);
+ bch_dump_bset(&b->keys, i, block);
+ }
+
+ pr_err("*** block %zu not written\n",
+- ((void *) i - (void *) ondisk) / block_bytes(b->c));
++ ((void *) i - (void *) ondisk) / block_bytes(b->c->cache));
+
+ for (j = 0; j < inmemory->keys; j++)
+ if (inmemory->d[j] != sorted->d[j])
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 027d0f8c4daf..ccd5de0ab0fe 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -98,7 +98,7 @@ reread: left = ca->sb.bucket_size - offset;
+ return ret;
+ }
+
+- blocks = set_blocks(j, block_bytes(ca->set));
++ blocks = set_blocks(j, block_bytes(ca));
+
+ /*
+ * Nodes in 'list' are in linear increasing order of
+@@ -734,7 +734,7 @@ static void journal_write_unlocked(struct closure *cl)
+ struct cache *ca = c->cache;
+ struct journal_write *w = c->journal.cur;
+ struct bkey *k = &c->journal.key;
+- unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
++ unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
+ c->sb.block_size;
+
+ struct bio *bio;
+@@ -754,7 +754,7 @@ static void journal_write_unlocked(struct closure *cl)
+ return;
+ }
+
+- c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
++ c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
+
+ w->data->btree_level = c->root->level;
+
+@@ -847,7 +847,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
+ struct journal_write *w = c->journal.cur;
+
+ sectors = __set_blocks(w->data, w->data->keys + nkeys,
+- block_bytes(c)) * c->sb.block_size;
++ block_bytes(c->cache)) * c->sb.block_size;
+
+ if (sectors <= min_t(size_t,
+ c->journal.blocks_free * c->sb.block_size,
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index c7cadaafa947..02408fdbf5bb 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -99,7 +99,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
+ * bch_data_insert_keys() will insert the keys created so far
+ * and finish the rest when the keylist is empty.
+ */
+- if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
++ if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
+ return -ENOMEM;
+
+ return __bch_keylist_realloc(l, u64s);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 90a419ad6445..36a538c2e960 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1528,7 +1528,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
+
+ kobject_init(&d->kobj, &bch_flash_dev_ktype);
+
+- if (bcache_device_init(d, block_bytes(c), u->sectors,
++ if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
+ NULL, &bcache_flash_ops))
+ goto err;
+
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index ac06c0bc3c0a..b9f524ab5cc8 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -714,7 +714,7 @@ SHOW(__bch_cache_set)
+ sysfs_print(synchronous, CACHE_SYNC(&c->sb));
+ sysfs_print(journal_delay_ms, c->journal_delay_ms);
+ sysfs_hprint(bucket_size, bucket_bytes(c));
+- sysfs_hprint(block_size, block_bytes(c));
++ sysfs_hprint(block_size, block_bytes(c->cache));
+ sysfs_print(tree_depth, c->root->level);
+ sysfs_print(root_usage_percent, bch_root_usage(c));
+
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch
new file mode 100644
index 0000000..2288492
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0006-bcache-remove-useless-alloc_bucket_pages.patch
@@ -0,0 +1,30 @@
+From 811f8198f1d5337729bbd855bf0e381e60eeeca3 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 21:28:23 +0800
+Subject: [PATCH v2 06/15] bcache: remove useless alloc_bucket_pages()
+
+Now no one uses alloc_bucket_pages() anymore, remove it from bcache.h.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/super.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 36a538c2e960..28257f11d835 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1832,9 +1832,6 @@ void bch_cache_set_unregister(struct cache_set *c)
+ bch_cache_set_stop(c);
+ }
+
+-#define alloc_bucket_pages(gfp, c) \
+- ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
+-
+ #define alloc_meta_bucket_pages(gfp, sb) \
+ ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
+
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0007-bcache-remove-useless-bucket_pages.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0007-bcache-remove-useless-bucket_pages.patch
new file mode 100644
index 0000000..1957844
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0007-bcache-remove-useless-bucket_pages.patch
@@ -0,0 +1,30 @@
+From a34562e8f936f77d726fcd94746a467db5f2bf04 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 21:15:28 +0800
+Subject: [PATCH v2 07/15] bcache: remove useless bucket_pages()
+
+It seems alloc_bucket_pages() is the only user of bucket_pages().
+Considering alloc_bucket_pages() is removed from bcache code, it is safe
+to remove the useless macro bucket_pages() now.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/bcache.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 29bec61cafbb..48a2585b6bbb 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -757,7 +757,6 @@ struct bbio {
+ #define btree_default_blocks(c) \
+ ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
+
+-#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
+ #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
+ #define block_bytes(ca) ((ca)->sb.block_size << 9)
+
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch
new file mode 100644
index 0000000..057b8d1
--- /dev/null
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0008-bcache-only-use-bucket_bytes-on-struct-cache.patch
@@ -0,0 +1,50 @@
+From 964012dfcb5e4ae91630c5d92b51cfba698dc41d Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 14 Aug 2020 21:20:48 +0800
+Subject: [PATCH v2 08/15] bcache: only use bucket_bytes() on struct cache
+
+Because struct cache_set and struct cache both have struct cache_sb,
+macro bucket_bytes() currently are used on both of them. When removing
+the embedded struct cache_sb from struct cache_set, this macro won't be
+used on struct cache_set anymore.
+
+This patch unifies all bucket_bytes() usage only on struct cache, this is
+one of the preparation to remove the embedded struct cache_sb from
+struct cache_set.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/md/bcache/bcache.h | 2 +-
+ drivers/md/bcache/sysfs.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 48a2585b6bbb..94d4baf4c405 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -757,7 +757,7 @@ struct bbio {
+ #define btree_default_blocks(c) \
+ ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
+
+-#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
++#define bucket_bytes(ca) ((ca)->sb.bucket_size << 9)
+ #define block_bytes(ca) ((ca)->sb.block_size << 9)
+
+ static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index b9f524ab5cc8..4bfe98faadcc 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -713,7 +713,7 @@ SHOW(__bch_cache_set)
+
+ sysfs_print(synchronous, CACHE_SYNC(&c->sb));
+ sysfs_print(journal_delay_ms, c->journal_delay_ms);
+- sysfs_hprint(bucket_size, bucket_bytes(c));
++ sysfs_hprint(bucket_size, bucket_bytes(c->cache));
+ sysfs_hprint(block_size, block_bytes(c->cache));
+ sysfs_print(tree_depth, c->root->level);
+ sysfs_print(root_usage_percent, bch_root_usage(c));
+--
+2.26.2
+
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch
index 89899d4..ca8ff92 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0009-bcache-avoid-data-copy-between-cache_set-sb-and-c.patch
@@ -1,7 +1,7 @@
From 78c5a3367fe79f81efa030ef2cb2fc171009fc14 Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Fri, 14 Aug 2020 23:18:45 +0800
-Subject: [PATCH v2 09/19] bcache: avoid data copy between cache_set->sb and
+Subject: [PATCH v2 09/15] bcache: avoid data copy between cache_set->sb and
cache->sb
struct cache_sb embedded in struct cache_set is only partial used and
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch
index 1d5a20b..e2f8983 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0010-bcache-don-t-check-seq-numbers-in-register_cache_.patch
@@ -1,7 +1,7 @@
From 754956b7956b6c08c1d8e3eab0a2bda29e220115 Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Fri, 14 Aug 2020 23:28:26 +0800
-Subject: [PATCH v2 10/19] bcache: don't check seq numbers in
+Subject: [PATCH v2 10/15] bcache: don't check seq numbers in
register_cache_set()
In order to update the partial super block of cache set, the seq numbers
@@ -17,6 +17,7 @@ struct cache. This is a preparation patch for removing embedded struct
cache_sb from struct cache_set.
Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/md/bcache/super.c | 15 ---------------
1 file changed, 15 deletions(-)
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0011-bcache-remove-can_attach_cache.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0011-bcache-remove-can_attach_cache.patch
index c01af45..fdbb825 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0011-bcache-remove-can_attach_cache.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0011-bcache-remove-can_attach_cache.patch
@@ -1,7 +1,7 @@
From aeb61b8c57e542123d0082054e6a65f10848a6f1 Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Fri, 14 Aug 2020 23:36:56 +0800
-Subject: [PATCH v2 11/19] bcache: remove can_attach_cache()
+Subject: [PATCH v2 11/15] bcache: remove can_attach_cache()
After removing the embedded struct cache_sb from struct cache_set, cache
set will directly reference the in-memory super block of struct cache.
@@ -12,6 +12,7 @@ This is a preparation patch for latter removing cache_set->sb from
struct cache_set.
Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/md/bcache/super.c | 10 ----------
1 file changed, 10 deletions(-)
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch
index a9920e2..864c8c4 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0012-bcache-check-and-set-sync-status-on-cache-s-in-me.patch
@@ -1,7 +1,7 @@
From 9cbec8384422a47b76db64bfe880e1224893c193 Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Fri, 14 Aug 2020 23:53:52 +0800
-Subject: [PATCH v2 12/19] bcache: check and set sync status on cache's
+Subject: [PATCH v2 12/15] bcache: check and set sync status on cache's
in-memory super block
Currently the cache's sync status is checked and set on cache set's in-
@@ -15,6 +15,7 @@ in-memory super block. This is a preparation for later removing embedded
struct cache_sb from struct cache_set.
Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/md/bcache/alloc.c | 2 +-
drivers/md/bcache/journal.c | 2 +-
diff --git a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch
index 0580cc8..407ffbe 100644
--- a/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch
+++ b/for-test/remove-multiple-cache-devices/single-cache-in-cache-set/v2/v2-0013-bcache-remove-embedded-struct-cache_sb-from-struc.patch
@@ -1,7 +1,7 @@
-From f8c4f864ef0f99ba8d34a3254bc3d03c1bd12897 Mon Sep 17 00:00:00 2001
+From 39296f9bea8a8448b882cbdee9688ddc39e5dd67 Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Sat, 15 Aug 2020 00:20:00 +0800
-Subject: [PATCH v2 13/19] bcache: remove embedded struct cache_sb from struct
+Subject: [PATCH v2 13/15] bcache: remove embedded struct cache_sb from struct
cache_set
Since bcache code was merged into mainline kerrnel, each cache set only
@@ -21,6 +21,7 @@ removed super block by referencing the in-memory super block of struct
cache.
Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/md/bcache/alloc.c | 6 +++---
drivers/md/bcache/bcache.h | 4 +---
@@ -31,9 +32,9 @@ Signed-off-by: Coly Li <colyli@suse.de>
drivers/md/bcache/io.c | 2 +-
drivers/md/bcache/journal.c | 11 ++++++-----
drivers/md/bcache/request.c | 4 ++--
- drivers/md/bcache/super.c | 22 ++++++++++++----------
+ drivers/md/bcache/super.c | 25 ++++++++++++++-----------
drivers/md/bcache/writeback.c | 2 +-
- 11 files changed, 41 insertions(+), 39 deletions(-)
+ 11 files changed, 43 insertions(+), 40 deletions(-)
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 65fdbdeb5134..8c371d5eef8e 100644
@@ -312,7 +313,7 @@ index 02408fdbf5bb..37e9cf8dbfc1 100644
goto skip;
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 6b94b396f9e9..ad87859d744a 100644
+index 6b94b396f9e9..d06ea4a3e500 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -471,7 +471,7 @@ static int __uuid_write(struct cache_set *c)
@@ -348,16 +349,26 @@ index 6b94b396f9e9..ad87859d744a 100644
/* Will die */
pr_err("Couldn't attach %s: block size less than set's block size\n",
dc->backing_dev_name);
-@@ -1666,7 +1665,7 @@ static void cache_set_free(struct closure *cl)
+@@ -1658,6 +1657,9 @@ static void cache_set_free(struct closure *cl)
+ bch_journal_free(c);
+
+ mutex_lock(&bch_register_lock);
++ bch_bset_sort_state_free(&c->sort);
++ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
++
+ ca = c->cache;
+ if (ca) {
+ ca->set = NULL;
+@@ -1665,8 +1667,6 @@ static void cache_set_free(struct closure *cl)
+ kobject_put(&ca->kobj);
}
- bch_bset_sort_state_free(&c->sort);
+- bch_bset_sort_state_free(&c->sort);
- free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
-+ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
if (c->moving_gc_wq)
destroy_workqueue(c->moving_gc_wq);
-@@ -1832,6 +1831,7 @@ void bch_cache_set_unregister(struct cache_set *c)
+@@ -1832,6 +1832,7 @@ void bch_cache_set_unregister(struct cache_set *c)
struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
{
int iter_size;
@@ -365,7 +376,7 @@ index 6b94b396f9e9..ad87859d744a 100644
struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
if (!c)
-@@ -1855,12 +1855,14 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+@@ -1855,12 +1856,14 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
memcpy(c->set_uuid, sb->set_uuid, 16);
@@ -382,7 +393,7 @@ index 6b94b396f9e9..ad87859d744a 100644
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
BTREE_MAX_PAGES);
-@@ -1898,7 +1900,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+@@ -1898,7 +1901,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
sizeof(struct bbio) +
@@ -391,7 +402,7 @@ index 6b94b396f9e9..ad87859d744a 100644
goto err;
if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
-@@ -1908,7 +1910,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+@@ -1908,7 +1911,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
goto err;
@@ -400,7 +411,7 @@ index 6b94b396f9e9..ad87859d744a 100644
if (!c->uuids)
goto err;
-@@ -2088,7 +2090,7 @@ static int run_cache_set(struct cache_set *c)
+@@ -2088,7 +2091,7 @@ static int run_cache_set(struct cache_set *c)
goto err;
closure_sync(&cl);