Message ID | 20220906062721.62630-5-joshi.k@samsung.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | fixed-buffer for uring-cmd/passthru | expand |
> req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer, > - meta_len, meta_seed, &meta, timeout, vec, 0, 0); > + meta_len, meta_seed, &meta, timeout, vec, 0, 0, NULL, 0); > if (IS_ERR(req)) > return PTR_ERR(req); 14 Arguments to the function! Kanchan, I'm not pointing out to this patch it has happened over the years, I think it is high time we find a way to trim this down. Least we can do is to pass a structure member than 14 different arguments, is everyone okay with it ? -ck
On Wed, Sep 07, 2022 at 02:51:31PM +0000, Chaitanya Kulkarni wrote: > >> req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer, >> - meta_len, meta_seed, &meta, timeout, vec, 0, 0); >> + meta_len, meta_seed, &meta, timeout, vec, 0, 0, NULL, 0); >> if (IS_ERR(req)) >> return PTR_ERR(req); > >14 Arguments to the function! > >Kanchan, I'm not pointing out to this patch it has happened over >the years, I think it is high time we find a way to trim this >down. > >Least we can do is to pass a structure member than 14 different >arguments, is everyone okay with it ? > Maybe it's just me, but there is something (unrelatedness) about these fields which makes packing all these into a single container feel bit unnatural (or do you already have a good name for such struct?). So how about we split the nvme_alloc_user_request into two. That will also serve the purpose. Here is a patch that creates - new nvme_alloc_user_request with 5 parameters - new nvme_map_user_request with 8 parameters diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 548aca8b5b9f..cb2fa4db50dd 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -65,18 +65,10 @@ static int nvme_finish_user_metadata(struct request *req, void __user *ubuf, } static struct request *nvme_alloc_user_request(struct request_queue *q, - struct nvme_command *cmd, void __user *ubuffer, - unsigned bufflen, void __user *meta_buffer, unsigned meta_len, - u32 meta_seed, void **metap, unsigned timeout, bool vec, + struct nvme_command *cmd, unsigned timeout, blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags) { - bool write = nvme_is_write(cmd); - struct nvme_ns *ns = q->queuedata; - struct block_device *bdev = ns ? ns->disk->part0 : NULL; struct request *req; - struct bio *bio = NULL; - void *meta = NULL; - int ret; req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags); if (IS_ERR(req)) @@ -86,49 +78,61 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, if (timeout) req->timeout = timeout; nvme_req(req)->flags |= NVME_REQ_USERCMD; + return req; +} - if (ubuffer && bufflen) { - if (!vec) - ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, - GFP_KERNEL); - else { - struct iovec fast_iov[UIO_FASTIOV]; - struct iovec *iov = fast_iov; - struct iov_iter iter; - - ret = import_iovec(rq_data_dir(req), ubuffer, bufflen, - UIO_FASTIOV, &iov, &iter); - if (ret < 0) - goto out; - ret = blk_rq_map_user_iov(q, req, NULL, &iter, - GFP_KERNEL); - kfree(iov); - } - if (ret) +static int nvme_map_user_request(struct request *req, void __user *ubuffer, + unsigned bufflen, void __user *meta_buffer, unsigned meta_len, + u32 meta_seed, void **metap, bool vec) +{ + struct request_queue *q = req->q; + struct nvme_ns *ns = q->queuedata; + struct block_device *bdev = ns ? ns->disk->part0 : NULL; + struct bio *bio = NULL; + void *meta = NULL; + int ret; + + if (!ubuffer || !bufflen) + return 0; + + if (!vec) + ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, + GFP_KERNEL); + else { + struct iovec fast_iov[UIO_FASTIOV]; + struct iovec *iov = fast_iov; + struct iov_iter iter; + + ret = import_iovec(rq_data_dir(req), ubuffer, bufflen, + UIO_FASTIOV, &iov, &iter); + if (ret < 0) goto out; - bio = req->bio; - if (bdev) - bio_set_dev(bio, bdev); - if (bdev && meta_buffer && meta_len) { - meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, - meta_seed, write); - if (IS_ERR(meta)) { - ret = PTR_ERR(meta); - goto out_unmap; - } - req->cmd_flags |= REQ_INTEGRITY; - *metap = meta; + ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL); + kfree(iov); + } + bio = req->bio; + if (ret) + goto out_unmap; + if (bdev) + bio_set_dev(bio, bdev); + if (bdev && meta_buffer && meta_len) { + meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, + meta_seed, req_op(req) == REQ_OP_DRV_OUT); + if (IS_ERR(meta)) { + ret = PTR_ERR(meta); + goto out_unmap; } + req->cmd_flags |= REQ_INTEGRITY; + *metap = meta; } - return req; + return ret; out_unmap: if (bio) blk_rq_unmap_user(bio); out: - blk_mq_free_request(req); - return ERR_PTR(ret); + return ret; } static int nvme_submit_user_cmd(struct request_queue *q, @@ -141,13 +145,16 @@ static int nvme_submit_user_cmd(struct request_queue *q, struct bio *bio; int ret; - req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer, - meta_len, meta_seed, &meta, timeout, vec, 0, 0); + req = nvme_alloc_user_request(q, cmd, timeout, 0, 0); if (IS_ERR(req)) return PTR_ERR(req); - bio = req->bio; + ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer, + meta_len, meta_seed, &meta, vec); + if (ret) + goto out; + bio = req->bio; ret = nvme_execute_passthru_rq(req); if (result) @@ -157,6 +164,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, meta_len, ret); if (bio) blk_rq_unmap_user(bio); +out: blk_mq_free_request(req); return ret; } @@ -418,6 +426,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, blk_opf_t rq_flags = 0; blk_mq_req_flags_t blk_flags = 0; void *meta = NULL; + int ret; if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -457,13 +466,17 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, rq_flags |= REQ_POLLED; retry: - req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr), - d.data_len, nvme_to_user_ptr(d.metadata), - d.metadata_len, 0, &meta, d.timeout_ms ? - msecs_to_jiffies(d.timeout_ms) : 0, vec, rq_flags, - blk_flags); + req = nvme_alloc_user_request(q, &c, + d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0, + rq_flags, blk_flags); if (IS_ERR(req)) return PTR_ERR(req); + + ret = nvme_map_user_request(req, nvme_to_user_ptr(d.addr), + d.data_len, nvme_to_user_ptr(d.metadata), + d.metadata_len, 0, &meta, vec); + if (ret) + goto out_err; req->end_io = nvme_uring_cmd_end_io; req->end_io_data = ioucmd; @@ -486,6 +499,9 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, blk_execute_rq_nowait(req, false); return -EIOCBQUEUED; +out_err: + blk_mq_free_request(req); + return ret; } static bool is_ctrl_ioctl(unsigned int cmd)
On 9/8/22 4:47 AM, Kanchan Joshi wrote: > On Wed, Sep 07, 2022 at 02:51:31PM +0000, Chaitanya Kulkarni wrote: >> >>> ????? req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer, >>> -??????????? meta_len, meta_seed, &meta, timeout, vec, 0, 0); >>> +??????????? meta_len, meta_seed, &meta, timeout, vec, 0, 0, NULL, 0); >>> ????? if (IS_ERR(req)) >>> ????????? return PTR_ERR(req); >> >> 14 Arguments to the function! >> >> Kanchan, I'm not pointing out to this patch it has happened over >> the years, I think it is high time we find a way to trim this >> down. >> >> Least we can do is to pass a structure member than 14 different >> arguments, is everyone okay with it ? >> > Maybe it's just me, but there is something (unrelatedness) about these > fields which makes packing all these into a single container feel bit > unnatural (or do you already have a good name for such struct?). I think the bigger question here would be "does it generate better code?". Because it doesn't make the code any better at all, it just potentially makes it more fragile. Packing into a struct is just a work-around for the interface being pretty horrible, and it'd be a much better idea to separate it out into separate functions instead rather than have this behemoth of a function that does it all. In any case, I think that's a separate cleanup that should be done, it should not gate the change. It's already horrible. > So how about we split the nvme_alloc_user_request into two. > That will also serve the purpose. Here is a patch that creates > - new nvme_alloc_user_request with 5 parameters > - new nvme_map_user_request with 8 parameters This is a good start though.
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 548aca8b5b9f..4341d758d6b9 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -65,10 +65,11 @@ static int nvme_finish_user_metadata(struct request *req, void __user *ubuf, } static struct request *nvme_alloc_user_request(struct request_queue *q, - struct nvme_command *cmd, void __user *ubuffer, + struct nvme_command *cmd, u64 ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, u32 meta_seed, void **metap, unsigned timeout, bool vec, - blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags) + blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags, + struct io_uring_cmd *ioucmd, bool fixedbufs) { bool write = nvme_is_write(cmd); struct nvme_ns *ns = q->queuedata; @@ -89,14 +90,27 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, if (ubuffer && bufflen) { if (!vec) - ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, - GFP_KERNEL); + if (fixedbufs) { + struct iov_iter iter; + + ret = io_uring_cmd_import_fixed(ubuffer, + bufflen, rq_data_dir(req), + &iter, ioucmd); + if (ret < 0) + goto out; + ret = blk_rq_map_user_bvec(req, &iter); + } else { + ret = blk_rq_map_user(q, req, NULL, + nvme_to_user_ptr(ubuffer), + bufflen, GFP_KERNEL); + } else { struct iovec fast_iov[UIO_FASTIOV]; struct iovec *iov = fast_iov; struct iov_iter iter; - ret = import_iovec(rq_data_dir(req), ubuffer, bufflen, + ret = import_iovec(rq_data_dir(req), + nvme_to_user_ptr(ubuffer), bufflen, UIO_FASTIOV, &iov, &iter); if (ret < 0) goto out; @@ -132,7 +146,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, } static int nvme_submit_user_cmd(struct request_queue *q, - struct nvme_command *cmd, void __user *ubuffer, + struct nvme_command *cmd, u64 ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, u32 meta_seed, u64 *result, unsigned timeout, bool vec) { @@ -142,7 +156,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, int ret; req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer, - meta_len, meta_seed, &meta, timeout, vec, 0, 0); + meta_len, meta_seed, &meta, timeout, vec, 0, 0, NULL, 0); if (IS_ERR(req)) return PTR_ERR(req); @@ -220,7 +234,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.appmask = cpu_to_le16(io.appmask); return nvme_submit_user_cmd(ns->queue, &c, - nvme_to_user_ptr(io.addr), length, + io.addr, length, metadata, meta_len, lower_32_bits(io.slba), NULL, 0, false); } @@ -274,7 +288,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, timeout = msecs_to_jiffies(cmd.timeout_ms); status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, - nvme_to_user_ptr(cmd.addr), cmd.data_len, + cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 0, &result, timeout, false); @@ -320,7 +334,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, timeout = msecs_to_jiffies(cmd.timeout_ms); status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, - nvme_to_user_ptr(cmd.addr), cmd.data_len, + cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 0, &cmd.result, timeout, vec); @@ -457,11 +471,11 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, rq_flags |= REQ_POLLED; retry: - req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr), + req = nvme_alloc_user_request(q, &c, d.addr, d.data_len, nvme_to_user_ptr(d.metadata), d.metadata_len, 0, &meta, d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0, vec, rq_flags, - blk_flags); + blk_flags, ioucmd, issue_flags & IO_URING_F_FIXEDBUFS); if (IS_ERR(req)) return PTR_ERR(req); req->end_io = nvme_uring_cmd_end_io;