aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2024-02-21 13:58:44 +0100
committerJens Axboe <axboe@kernel.dk>2024-02-27 09:33:08 -0700
commit4f81b87d91be2a00195f85847d040c2276cac2ae (patch)
treeb32b80c123285e1778e93d951af6edbc9a424448 /drivers/block
parent738be136327a56e5a67e1942a2c318fb91914a3f (diff)
downloadlinux-4f81b87d91be2a00195f85847d040c2276cac2ae.tar.gz
xen-blkfront: don't redundantly set max_sements in blkif_recover
blkif_set_queue_limits already sets the max_sements limits, so don't do it a second time. Also remove a comment about a long fixe bug in blk_mq_update_nr_hw_queues. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Roger Pau Monné <roger.pau@citrix.com> Link: https://lore.kernel.org/r/20240221125845.3610668-4-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/xen-blkfront.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 1258f24b28550..7664638a0abbf 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2008,14 +2008,10 @@ static int blkif_recover(struct blkfront_info *info)
struct request *req, *n;
int rc;
struct bio *bio;
- unsigned int segs;
struct blkfront_ring_info *rinfo;
blkfront_gather_backend_features(info);
- /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
blkif_set_queue_limits(info);
- segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
- blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
for_each_rinfo(info, rinfo, r_index) {
rc = blkfront_setup_indirect(rinfo);
@@ -2035,7 +2031,9 @@ static int blkif_recover(struct blkfront_info *info)
list_for_each_entry_safe(req, n, &info->requests, queuelist) {
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
- BUG_ON(req->nr_phys_segments > segs);
+ BUG_ON(req->nr_phys_segments >
+ (info->max_indirect_segments ? :
+ BLKIF_MAX_SEGMENTS_PER_REQUEST));
blk_mq_requeue_request(req, false);
}
blk_mq_start_stopped_hw_queues(info->rq, true);