aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2022-04-06 08:12:27 +0200
committerJens Axboe <axboe@kernel.dk>2022-04-17 19:30:41 -0600
commit066ff571011d8416e903d3d4f1f41e0b5eb91e1d (patch)
tree749f3f28b12204c02e223ec27318d5a231eede0f /block
parent7655db80932d95f501a0811544d9520ec720e38d (diff)
downloadlinux-066ff571011d8416e903d3d4f1f41e0b5eb91e1d.tar.gz
block: turn bio_kmalloc into a simple kmalloc wrapper
Remove the magic autofree semantics and require the callers to explicitly call bio_init to initialize the bio. This allows bio_free to catch accidental bio_put calls on bio_init()ed bios as well. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Coly Li <colyli@suse.de> Acked-by: Mike Snitzer <snitzer@kernel.org> Link: https://lore.kernel.org/r/20220406061228.410163-5-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c47
-rw-r--r--block/blk-crypto-fallback.c14
-rw-r--r--block/blk-map.c42
3 files changed, 53 insertions, 50 deletions
diff --git a/block/bio.c b/block/bio.c
index 643a3a052c7e38..212ccd5b5212d0 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -224,24 +224,13 @@ EXPORT_SYMBOL(bio_uninit);
static void bio_free(struct bio *bio)
{
struct bio_set *bs = bio->bi_pool;
- void *p;
-
- bio_uninit(bio);
+ void *p = bio;
- if (bs) {
- bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
+ WARN_ON_ONCE(!bs);
- /*
- * If we have front padding, adjust the bio pointer before freeing
- */
- p = bio;
- p -= bs->front_pad;
-
- mempool_free(p, &bs->bio_pool);
- } else {
- /* Bio was allocated by bio_kmalloc() */
- kfree(bio);
- }
+ bio_uninit(bio);
+ bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
+ mempool_free(p - bs->front_pad, &bs->bio_pool);
}
/*
@@ -568,28 +557,28 @@ err_free:
EXPORT_SYMBOL(bio_alloc_bioset);
/**
- * bio_kmalloc - kmalloc a bio for I/O
+ * bio_kmalloc - kmalloc a bio
+ * @nr_vecs: number of bio_vecs to allocate
* @gfp_mask: the GFP_* mask given to the slab allocator
- * @nr_iovecs: number of iovecs to pre-allocate
*
- * Use kmalloc to allocate and initialize a bio.
+ * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized
+ * using bio_init() before use. To free a bio returned from this function use
+ * kfree() after calling bio_uninit(). A bio returned from this function can
+ * be reused by calling bio_uninit() before calling bio_init() again.
+ *
+ * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
+ * function are not backed by a mempool can can fail. Do not use this function
+ * for allocations in the file system I/O path.
*
* Returns: Pointer to new bio on success, NULL on failure.
*/
-struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
+struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
{
struct bio *bio;
- if (nr_iovecs > UIO_MAXIOV)
+ if (nr_vecs > UIO_MAXIOV)
return NULL;
-
- bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
- if (unlikely(!bio))
- return NULL;
- bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs,
- 0);
- bio->bi_pool = NULL;
- return bio;
+ return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
}
EXPORT_SYMBOL(bio_kmalloc);
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index 7c854584b52b50..5d1aa5b1d30a12 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -152,23 +152,25 @@ static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
src_bio->bi_status = enc_bio->bi_status;
- bio_put(enc_bio);
+ bio_uninit(enc_bio);
+ kfree(enc_bio);
bio_endio(src_bio);
}
static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
{
+ unsigned int nr_segs = bio_segments(bio_src);
struct bvec_iter iter;
struct bio_vec bv;
struct bio *bio;
- bio = bio_kmalloc(GFP_NOIO, bio_segments(bio_src));
+ bio = bio_kmalloc(nr_segs, GFP_NOIO);
if (!bio)
return NULL;
- bio->bi_bdev = bio_src->bi_bdev;
+ bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs,
+ bio_src->bi_opf);
if (bio_flagged(bio_src, BIO_REMAPPED))
bio_set_flag(bio, BIO_REMAPPED);
- bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
@@ -363,8 +365,8 @@ out_release_keyslot:
blk_crypto_put_keyslot(slot);
out_put_enc_bio:
if (enc_bio)
- bio_put(enc_bio);
-
+ bio_uninit(enc_bio);
+ kfree(enc_bio);
return ret;
}
diff --git a/block/blk-map.c b/block/blk-map.c
index c7f71d83eff189..7ffde64f90195c 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -152,10 +152,10 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
ret = -ENOMEM;
- bio = bio_kmalloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(nr_pages, gfp_mask);
if (!bio)
goto out_bmd;
- bio->bi_opf |= req_op(rq);
+ bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
if (map_data) {
nr_pages = 1 << map_data->page_order;
@@ -224,7 +224,8 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
cleanup:
if (!map_data)
bio_free_pages(bio);
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
out_bmd:
kfree(bmd);
return ret;
@@ -234,6 +235,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
gfp_t gfp_mask)
{
unsigned int max_sectors = queue_max_hw_sectors(rq->q);
+ unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
struct bio *bio;
int ret;
int j;
@@ -241,10 +243,10 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
if (!iov_iter_count(iter))
return -EINVAL;
- bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_VECS));
+ bio = bio_kmalloc(nr_vecs, gfp_mask);
if (!bio)
return -ENOMEM;
- bio->bi_opf |= req_op(rq);
+ bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
while (iov_iter_count(iter)) {
struct page **pages;
@@ -303,7 +305,8 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
out_unmap:
bio_release_pages(bio, false);
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
return ret;
}
@@ -323,7 +326,8 @@ static void bio_invalidate_vmalloc_pages(struct bio *bio)
static void bio_map_kern_endio(struct bio *bio)
{
bio_invalidate_vmalloc_pages(bio);
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
}
/**
@@ -348,9 +352,10 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data,
int offset, i;
struct bio *bio;
- bio = bio_kmalloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(nr_pages, gfp_mask);
if (!bio)
return ERR_PTR(-ENOMEM);
+ bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
if (is_vmalloc) {
flush_kernel_vmap_range(data, len);
@@ -374,7 +379,8 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data,
if (bio_add_pc_page(q, bio, page, bytes,
offset) < bytes) {
/* we don't support partial mappings */
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
return ERR_PTR(-EINVAL);
}
@@ -390,7 +396,8 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data,
static void bio_copy_kern_endio(struct bio *bio)
{
bio_free_pages(bio);
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
}
static void bio_copy_kern_endio_read(struct bio *bio)
@@ -435,9 +442,10 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
return ERR_PTR(-EINVAL);
nr_pages = end - start;
- bio = bio_kmalloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(nr_pages, gfp_mask);
if (!bio)
return ERR_PTR(-ENOMEM);
+ bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
while (len) {
struct page *page;
@@ -471,7 +479,8 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
cleanup:
bio_free_pages(bio);
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
return ERR_PTR(-ENOMEM);
}
@@ -602,7 +611,8 @@ int blk_rq_unmap_user(struct bio *bio)
next_bio = bio;
bio = bio->bi_next;
- bio_put(next_bio);
+ bio_uninit(next_bio);
+ kfree(next_bio);
}
return ret;
@@ -648,8 +658,10 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
bio->bi_opf |= req_op(rq);
ret = blk_rq_append_bio(rq, bio);
- if (unlikely(ret))
- bio_put(bio);
+ if (unlikely(ret)) {
+ bio_uninit(bio);
+ kfree(bio);
+ }
return ret;
}
EXPORT_SYMBOL(blk_rq_map_kern);