Message ID | 20220905134833.6387-4-joshi.k@samsung.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [for-next,v4,1/4] io_uring: introduce io_uring_cmd_import_fixed | expand |
On Mon, Sep 05, 2022 at 07:18:32PM +0530, Kanchan Joshi wrote: > +static struct bio *bio_map_get(struct request *rq, unsigned int nr_vecs, > gfp_t gfp_mask) > { > @@ -259,13 +252,31 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, > bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask, > &fs_bio_set); > if (!bio) > - return -ENOMEM; > + return NULL; This context looks weird? That bio_alloc_bioset should not be there, as biosets are only used for file system I/O, which this is not.
On Tue, Sep 06, 2022 at 08:25:22AM +0200, Christoph Hellwig wrote: >On Mon, Sep 05, 2022 at 07:18:32PM +0530, Kanchan Joshi wrote: >> +static struct bio *bio_map_get(struct request *rq, unsigned int nr_vecs, >> gfp_t gfp_mask) >> { >> @@ -259,13 +252,31 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, >> bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask, >> &fs_bio_set); >> if (!bio) >> - return -ENOMEM; >> + return NULL; > >This context looks weird? That bio_alloc_bioset should not be there, >as biosets are only used for file system I/O, which this is not. if you think it's a deal-breaker, maybe I can add a new bioset in nvme and pass that as argument to this helper. Would you prefer that over the current approach.
On Tue, Sep 06, 2022 at 12:03:29PM +0530, Kanchan Joshi wrote: >On Tue, Sep 06, 2022 at 08:25:22AM +0200, Christoph Hellwig wrote: >>On Mon, Sep 05, 2022 at 07:18:32PM +0530, Kanchan Joshi wrote: >>>+static struct bio *bio_map_get(struct request *rq, unsigned int nr_vecs, >>> gfp_t gfp_mask) >>> { >>>@@ -259,13 +252,31 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, >>> bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask, >>> &fs_bio_set); >>> if (!bio) >>>- return -ENOMEM; >>>+ return NULL; >> >>This context looks weird? That bio_alloc_bioset should not be there, >>as biosets are only used for file system I/O, which this is not. > >if you think it's a deal-breaker, maybe I can add a new bioset in nvme and >pass that as argument to this helper. Would you prefer that over the >current approach. seems I responded without looking carefully. The bioset addition is not part of this series. It got added earlier [1] [2], as part of optimizing polled-io on file/passthru path. [1] https://lore.kernel.org/linux-block/20220806152004.382170-3-axboe@kernel.dk/ [2] https://lore.kernel.org/linux-block/f2863702-e54c-cd74-efcf-8cb238be1a7c@kernel.dk/
On Tue, Sep 06, 2022 at 12:03:29PM +0530, Kanchan Joshi wrote: >> This context looks weird? That bio_alloc_bioset should not be there, >> as biosets are only used for file system I/O, which this is not. > > if you think it's a deal-breaker, maybe I can add a new bioset in nvme and > pass that as argument to this helper. Would you prefer that over the > current approach. The whole point is that biosets exist to allow for forward progress guarantees required for file system I/O. For passthrough I/O bio_kmalloc is perfectly fine and much simpler. Adding yet another bio_set just makes things even worse.
On 9/6/22 12:51 AM, Christoph Hellwig wrote: > On Tue, Sep 06, 2022 at 12:03:29PM +0530, Kanchan Joshi wrote: >>> This context looks weird? That bio_alloc_bioset should not be there, >>> as biosets are only used for file system I/O, which this is not. >> >> if you think it's a deal-breaker, maybe I can add a new bioset in nvme and >> pass that as argument to this helper. Would you prefer that over the >> current approach. > > The whole point is that biosets exist to allow for forward progress > guarantees required for file system I/O. For passthrough I/O > bio_kmalloc is perfectly fine and much simpler. Adding yet another > bio_set just makes things even worse. It's a performance concern too, efficiency is much worse by using kmalloc+kfree for passthrough. You don't get bio caching that way.
diff --git a/block/blk-map.c b/block/blk-map.c index f3768876d618..e2f268167342 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -241,17 +241,10 @@ static void bio_map_put(struct bio *bio) } } -static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, +static struct bio *bio_map_get(struct request *rq, unsigned int nr_vecs, gfp_t gfp_mask) { - unsigned int max_sectors = queue_max_hw_sectors(rq->q); - unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); struct bio *bio; - int ret; - int j; - - if (!iov_iter_count(iter)) - return -EINVAL; if (rq->cmd_flags & REQ_POLLED) { blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE; @@ -259,13 +252,31 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask, &fs_bio_set); if (!bio) - return -ENOMEM; + return NULL; } else { bio = bio_kmalloc(nr_vecs, gfp_mask); if (!bio) - return -ENOMEM; + return NULL; bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); } + return bio; +} + +static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, + gfp_t gfp_mask) +{ + unsigned int max_sectors = queue_max_hw_sectors(rq->q); + unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); + struct bio *bio; + int ret; + int j; + + if (!iov_iter_count(iter)) + return -EINVAL; + + bio = bio_map_get(rq, nr_vecs, gfp_mask); + if (bio == NULL) + return -ENOMEM; while (iov_iter_count(iter)) { struct page **pages, *stack_pages[UIO_FASTIOV]; @@ -612,6 +623,69 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(blk_rq_map_user); +/* Prepare bio for passthrough IO given an existing bvec iter */ +int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter) +{ + struct request_queue *q = rq->q; + size_t iter_count, nr_segs; + struct bio *bio; + struct bio_vec *bv, *bvec_arr, *bvprvp = NULL; + struct queue_limits *lim = &q->limits; + unsigned int nsegs = 0, bytes = 0; + int ret, i; + + iter_count = iov_iter_count(iter); + nr_segs = iter->nr_segs; + + if (!iter_count || (iter_count >> 9) > queue_max_hw_sectors(q)) + return -EINVAL; + if (nr_segs > queue_max_segments(q)) + return -EINVAL; + + /* no iovecs to alloc, as we already have a BVEC iterator */ + bio = bio_map_get(rq, 0, GFP_KERNEL); + if (bio == NULL) + return -ENOMEM; + + bio_iov_bvec_set(bio, iter); + blk_rq_bio_prep(rq, bio, nr_segs); + + /* loop to perform a bunch of sanity checks */ + bvec_arr = (struct bio_vec *)iter->bvec; + for (i = 0; i < nr_segs; i++) { + bv = &bvec_arr[i]; + /* + * If the queue doesn't support SG gaps and adding this + * offset would create a gap, disallow it. + */ + if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { + ret = -EINVAL; + goto out_free; + } + + /* check full condition */ + if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) { + ret = -EINVAL; + goto out_free; + } + + if (bytes + bv->bv_len <= iter_count && + bv->bv_offset + bv->bv_len <= PAGE_SIZE) { + nsegs++; + bytes += bv->bv_len; + } else { + ret = -EINVAL; + goto out_free; + } + bvprvp = bv; + } + return 0; +out_free: + bio_map_put(bio); + return ret; +} +EXPORT_SYMBOL(blk_rq_map_user_bvec); + /** * blk_rq_unmap_user - unmap a request with user data * @bio: start of bio list diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b43c81d91892..83bef362f0f9 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -970,6 +970,7 @@ struct rq_map_data { bool from_user; }; +int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter); int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, gfp_t); int blk_rq_map_user_iov(struct request_queue *, struct request *,