aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-30 09:39:52 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-30 09:39:52 -0700
commitfb45f493c18b5bf0818394662f908d648060310c (patch)
tree7d701e299aeb1248251e18890c58cf4341f6fe11
parent9c4249c8e0221e5cfae758d35b768aee84abf6c0 (diff)
parentaa6df8dd28c01d9a3d2cfcfe9dd0a4a334d1cd81 (diff)
downloadkvm-fb45f493c18b5bf0818394662f908d648060310c.tar.gz
Merge tag 'dm-4.1-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper bugfixes from Mike Snitzer: "Fix two bugs in the request-based DM blk-mq support that was added during the 4.1 merge" * tag 'dm-4.1-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm: fix free_rq_clone() NULL pointer when requeueing unmapped request dm: only initialize the request_queue once
-rw-r--r--drivers/md/dm-ioctl.c17
-rw-r--r--drivers/md/dm.c19
2 files changed, 21 insertions, 15 deletions
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index c8a18e4ee9dce2..720ceeb7fa9b29 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto err_unlock_md_type;
}
- if (dm_get_md_type(md) == DM_TYPE_NONE)
+ if (dm_get_md_type(md) == DM_TYPE_NONE) {
/* Initial table load: acquire type of table. */
dm_set_md_type(md, dm_table_get_type(t));
- else if (dm_get_md_type(md) != dm_table_get_type(t)) {
+
+ /* setup md->queue to reflect md's type (may block) */
+ r = dm_setup_md_queue(md);
+ if (r) {
+ DMWARN("unable to set up device queue for new table.");
+ goto err_unlock_md_type;
+ }
+ } else if (dm_get_md_type(md) != dm_table_get_type(t)) {
DMWARN("can't change device type after initial table load.");
r = -EINVAL;
goto err_unlock_md_type;
}
- /* setup md->queue to reflect md's type (may block) */
- r = dm_setup_md_queue(md);
- if (r) {
- DMWARN("unable to set up device queue for new table.");
- goto err_unlock_md_type;
- }
dm_unlock_md_type(md);
/* stage inactive table */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f8c7ca3e894737..a930b72314ac98 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
dm_put(md);
}
-static void free_rq_clone(struct request *clone)
+static void free_rq_clone(struct request *clone, bool must_be_mapped)
{
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
+ WARN_ON_ONCE(must_be_mapped && !clone->q);
+
blk_rq_unprep_clone(clone);
- if (clone->q->mq_ops)
+ if (md->type == DM_TYPE_MQ_REQUEST_BASED)
+ /* stacked on blk-mq queue(s) */
tio->ti->type->release_clone_rq(clone);
else if (!md->queue->mq_ops)
/* request_fn queue stacked on request_fn queue(s) */
free_clone_request(md, clone);
+ /*
+ * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
+ * no need to call free_clone_request() because we leverage blk-mq by
+ * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
+ */
if (!md->queue->mq_ops)
free_rq_tio(tio);
@@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error)
rq->sense_len = clone->sense_len;
}
- free_rq_clone(clone);
+ free_rq_clone(clone, true);
if (!rq->q->mq_ops)
blk_end_request_all(rq, error);
else
@@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq)
}
if (clone)
- free_rq_clone(clone);
+ free_rq_clone(clone, false);
}
/*
@@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
{
struct request_queue *q = NULL;
- if (md->queue->elevator)
- return 0;
-
/* Fully initialize the queue */
q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
if (!q)