aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorColy Li <colyli@suse.de>2018-07-23 23:15:18 +0800
committerColy Li <colyli@suse.de>2018-07-23 23:15:18 +0800
commiteb3a3beeaae8cadc40025f0d4efb88ad235104f9 (patch)
treee3f649b2af7655647614eb46d4047782db584253
parent7488c0de19a74ad35310900edfa67c6a9d2e4ce9 (diff)
downloadbcache-patches-eb3a3beeaae8cadc40025f0d4efb88ad235104f9.tar.gz
for-review: update v2-0001-bcache-set-max-writeback-rate-when-I-O-request-is-id.patch
-rw-r--r--for-review/v2-0001-bcache-set-max-writeback-rate-when-I-O-request-is-id.patch (renamed from for-review/0001-bcache-set-max-writeback-rate-when-I-O-request-is-id.patch)152
1 files changed, 102 insertions, 50 deletions
diff --git a/for-review/0001-bcache-set-max-writeback-rate-when-I-O-request-is-id.patch b/for-review/v2-0001-bcache-set-max-writeback-rate-when-I-O-request-is-id.patch
index 3ed20ce..adfa504 100644
--- a/for-review/0001-bcache-set-max-writeback-rate-when-I-O-request-is-id.patch
+++ b/for-review/v2-0001-bcache-set-max-writeback-rate-when-I-O-request-is-id.patch
@@ -1,7 +1,7 @@
-From cee7076a469d03e744e0d0469aab2065ae4c67c9 Mon Sep 17 00:00:00 2001
+From 46b3758825568d6017e51c33330d9dfb54b9d23b Mon Sep 17 00:00:00 2001
From: Coly Li <colyli@suse.de>
Date: Thu, 19 Jul 2018 18:18:55 +0800
-Subject: [PATCH] bcache: set max writeback rate when I/O request is idle
+Subject: [PATCH v2] bcache: set max writeback rate when I/O request is idle
Commit b1092c9af9ed ("bcache: allow quick writeback when backing idle")
allows the writeback rate to be faster if there is no I/O request on a
@@ -34,16 +34,21 @@ Signed-off-by: Coly Li <colyli@suse.de>
Tested-by: Kai Krakow <kai@kaishome.de>
Cc: Michael Lyle <mlyle@lyle.org>
---
- drivers/md/bcache/bcache.h | 9 +---
- drivers/md/bcache/request.c | 42 ++++++++++++++-
- drivers/md/bcache/sysfs.c | 14 +++--
- drivers/md/bcache/util.c | 2 +-
- drivers/md/bcache/util.h | 2 +-
- drivers/md/bcache/writeback.c | 98 +++++++++++++++++++++++++----------
- 6 files changed, 126 insertions(+), 41 deletions(-)
+Channgelog:
+v2, Fix a deadlock reported by Stefan Priebe.
+v1, Initial version.
+
+ drivers/md/bcache/bcache.h | 11 ++--
+ drivers/md/bcache/request.c | 51 ++++++++++++++-
+ drivers/md/bcache/super.c | 1 +
+ drivers/md/bcache/sysfs.c | 14 +++--
+ drivers/md/bcache/util.c | 2 +-
+ drivers/md/bcache/util.h | 2 +-
+ drivers/md/bcache/writeback.c | 115 ++++++++++++++++++++++++++--------
+ 7 files changed, 155 insertions(+), 41 deletions(-)
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
-index d6bf294f3907..f7451e8be03c 100644
+index d6bf294f3907..469ab1a955e0 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -328,13 +328,6 @@ struct cached_dev {
@@ -69,46 +74,64 @@ index d6bf294f3907..f7451e8be03c 100644
struct cache_sb sb;
+@@ -523,6 +518,8 @@ struct cache_set {
+
+ struct bcache_device **devices;
+ unsigned devices_max_used;
++ /* See set_at_max_writeback_rate() for it is used */
++ unsigned previous_dirty_dc_nr;
+ struct list_head cached_devs;
+ uint64_t cached_dev_sectors;
+ struct closure caching;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
-index ae67f5fa8047..fe45f561a054 100644
+index ae67f5fa8047..1af3d96abfa5 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
-@@ -1104,6 +1104,34 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
+@@ -1104,6 +1104,43 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
/* Cached devices - read & write stuff */
-+static void quit_max_writeback_rate(struct cache_set *c)
++static void quit_max_writeback_rate(struct cache_set *c,
++ struct cached_dev *this_dc)
+{
+ int i;
+ struct bcache_device *d;
+ struct cached_dev *dc;
+
-+ mutex_lock(&bch_register_lock);
-+
-+ for (i = 0; i < c->devices_max_used; i++) {
-+ if (!c->devices[i])
-+ continue;
++ /*
++ * If bch_register_lock is acquired by other attach/detach operations,
++ * waiting here will increase I/O request latency for seconds or more.
++ * To avoid such situation, only writeback rate of current cached device
++ * is set to 1, and __update_write_back() will decide writeback rate
++ * of other cached devices (remember c->idle_counter is 0 now).
++ */
++ if (mutex_trylock(&bch_register_lock)){
++ for (i = 0; i < c->devices_max_used; i++) {
++ if (!c->devices[i])
++ continue;
+
-+ if (UUID_FLASH_ONLY(&c->uuids[i]))
-+ continue;
++ if (UUID_FLASH_ONLY(&c->uuids[i]))
++ continue;
+
-+ d = c->devices[i];
-+ dc = container_of(d, struct cached_dev, disk);
-+ /*
-+ * set writeback rate to default minimum value,
-+ * then let update_writeback_rate() to decide the
-+ * upcoming rate.
-+ */
-+ atomic64_set(&dc->writeback_rate.rate, 1);
-+ }
++ d = c->devices[i];
++ dc = container_of(d, struct cached_dev, disk);
++ /*
++ * set writeback rate to default minimum value,
++ * then let update_writeback_rate() to decide the
++ * upcoming rate.
++ */
++ atomic64_set(&dc->writeback_rate.rate, 1);
++ }
+
-+ mutex_unlock(&bch_register_lock);
++ mutex_unlock(&bch_register_lock);
++ } else
++ atomic64_set(&this_dc->writeback_rate.rate, 1);
+}
+
static blk_qc_t cached_dev_make_request(struct request_queue *q,
struct bio *bio)
{
-@@ -1119,7 +1147,19 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
+@@ -1119,7 +1156,19 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
return BLK_QC_T_NONE;
}
@@ -123,12 +146,24 @@ index ae67f5fa8047..fe45f561a054 100644
+ */
+ if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
+ atomic_set(&d->c->at_max_writeback_rate, 0);
-+ quit_max_writeback_rate(d->c);
++ quit_max_writeback_rate(d->c, dc);
+ }
+ }
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
bio_set_dev(bio, dc->bdev);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index fa4058e43202..fa532d9f9353 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1687,6 +1687,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ c->block_bits = ilog2(sb->block_size);
+ c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
+ c->devices_max_used = 0;
++ c->previous_dirty_dc_nr = 0;
+ c->btree_pages = bucket_pages(c);
+ if (c->btree_pages > BTREE_MAX_PAGES)
+ c->btree_pages = max_t(int, c->btree_pages / 4,
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 225b15aa0340..d719021bff81 100644
--- a/drivers/md/bcache/sysfs.c
@@ -195,10 +230,10 @@ index cced87f8eb27..7e17f32ab563 100644
static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
-index ad45ebe1a74b..72059f910230 100644
+index ad45ebe1a74b..11ffadc3cf8f 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
-@@ -49,6 +49,63 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
+@@ -49,6 +49,80 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
}
@@ -208,18 +243,35 @@ index ad45ebe1a74b..72059f910230 100644
+ int i, dirty_dc_nr = 0;
+ struct bcache_device *d;
+
-+ mutex_lock(&bch_register_lock);
-+ for (i = 0; i < c->devices_max_used; i++) {
-+ if (!c->devices[i])
-+ continue;
-+ if (UUID_FLASH_ONLY(&c->uuids[i]))
-+ continue;
-+ d = c->devices[i];
-+ dc = container_of(d, struct cached_dev, disk);
-+ if (atomic_read(&dc->has_dirty))
-+ dirty_dc_nr++;
-+ }
-+ mutex_unlock(&bch_register_lock);
++ /*
++ * bch_register_lock is acquired in cached_dev_detach_finish() before
++ * calling cancel_writeback_rate_update_dwork() to stop the delayed
++ * kworker writeback_rate_update (where the context we are for now).
++ * Therefore call mutex_lock() here may introduce deadlock when shut
++ * down the bcache device.
++ * c->previous_dirty_dc_nr is used to record previous calculated
++ * dirty_dc_nr when mutex_trylock() last time succeeded. Then if
++ * mutex_trylock() failed here, use c->previous_dirty_dc_nr as dirty
++ * cached device number. Of cause it might be inaccurate, but a few more
++ * or less loop before setting c->at_max_writeback_rate is much better
++ * then a deadlock here.
++ */
++ if (mutex_trylock(&bch_register_lock)) {
++ for (i = 0; i < c->devices_max_used; i++) {
++ if (!c->devices[i])
++ continue;
++ if (UUID_FLASH_ONLY(&c->uuids[i]))
++ continue;
++ d = c->devices[i];
++ dc = container_of(d, struct cached_dev, disk);
++ if (atomic_read(&dc->has_dirty))
++ dirty_dc_nr++;
++ }
++ c->previous_dirty_dc_nr = dirty_dc_nr;
++
++ mutex_unlock(&bch_register_lock);
++ } else
++ dirty_dc_nr = c->previous_dirty_dc_nr;
+
+ /*
+ * Idle_counter is increased everytime when update_writeback_rate()
@@ -262,7 +314,7 @@ index ad45ebe1a74b..72059f910230 100644
static void __update_writeback_rate(struct cached_dev *dc)
{
/*
-@@ -104,8 +161,9 @@ static void __update_writeback_rate(struct cached_dev *dc)
+@@ -104,8 +178,9 @@ static void __update_writeback_rate(struct cached_dev *dc)
dc->writeback_rate_proportional = proportional_scaled;
dc->writeback_rate_integral_scaled = integral_scaled;
@@ -274,7 +326,7 @@ index ad45ebe1a74b..72059f910230 100644
dc->writeback_rate_target = target;
}
-@@ -138,9 +196,16 @@ static void update_writeback_rate(struct work_struct *work)
+@@ -138,9 +213,16 @@ static void update_writeback_rate(struct work_struct *work)
down_read(&dc->writeback_lock);
@@ -294,7 +346,7 @@ index ad45ebe1a74b..72059f910230 100644
up_read(&dc->writeback_lock);
-@@ -422,27 +487,6 @@ static void read_dirty(struct cached_dev *dc)
+@@ -422,27 +504,6 @@ static void read_dirty(struct cached_dev *dc)
delay = writeback_delay(dc, size);
@@ -322,7 +374,7 @@ index ad45ebe1a74b..72059f910230 100644
while (!kthread_should_stop() &&
!test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
delay) {
-@@ -715,7 +759,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+@@ -715,7 +776,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_running = true;
dc->writeback_percent = 10;
dc->writeback_delay = 30;