aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-10-25 09:06:58 +0200
committerJens Axboe <axboe@kernel.dk>2021-11-29 06:38:51 -0700
commit639d353143fa3bfa81bbe7af263260d93d23d822 (patch)
tree9d3fff6edcd784f68a8df584a8c585facd431239
parente8dc17e2893b4107366004810ca2a4acf1fc8563 (diff)
downloadscsi-devel-639d353143fa3bfa81bbe7af263260d93d23d822.tar.gz
mmc: core: Use blk_mq_complete_request_direct().
The completion callback for the sdhci-pci device is invoked from a kworker. I couldn't identify in which context is mmc_blk_mq_req_done() invoke but the remaining caller are from invoked from preemptible context. Here it would make sense to complete the request directly instead scheduling ksoftirqd for its completion. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Acked-by: Adrian Hunter <adrian.hunter@intel.com> Link: https://lore.kernel.org/r/20211025070658.1565848-3-bigeasy@linutronix.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/mmc/core/block.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 5e0960560eab76..635b79899b9fe1 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2051,7 +2051,8 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
mmc_put_card(mq->card, &mq->ctx);
}
-static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
+ bool can_sleep)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
@@ -2063,10 +2064,14 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
* Block layer timeouts race with completions which means the normal
* completion path cannot be used during recovery.
*/
- if (mq->in_recovery)
+ if (mq->in_recovery) {
mmc_blk_mq_complete_rq(mq, req);
- else if (likely(!blk_should_fake_timeout(req->q)))
- blk_mq_complete_request(req);
+ } else if (likely(!blk_should_fake_timeout(req->q))) {
+ if (can_sleep)
+ blk_mq_complete_request_direct(req, mmc_blk_mq_complete);
+ else
+ blk_mq_complete_request(req);
+ }
mmc_blk_mq_dec_in_flight(mq, req);
}
@@ -2087,7 +2092,7 @@ void mmc_blk_mq_recovery(struct mmc_queue *mq)
mmc_blk_urgent_bkops(mq, mqrq);
- mmc_blk_mq_post_req(mq, req);
+ mmc_blk_mq_post_req(mq, req, true);
}
static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
@@ -2106,7 +2111,7 @@ static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
if (prev_req)
*prev_req = mq->complete_req;
else
- mmc_blk_mq_post_req(mq, mq->complete_req);
+ mmc_blk_mq_post_req(mq, mq->complete_req, true);
mq->complete_req = NULL;
@@ -2178,7 +2183,8 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
mq->rw_wait = false;
wake_up(&mq->wait);
- mmc_blk_mq_post_req(mq, req);
+ /* context unknown */
+ mmc_blk_mq_post_req(mq, req, false);
}
static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
@@ -2238,7 +2244,7 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
err = mmc_start_request(host, &mqrq->brq.mrq);
if (prev_req)
- mmc_blk_mq_post_req(mq, prev_req);
+ mmc_blk_mq_post_req(mq, prev_req, true);
if (err)
mq->rw_wait = false;