@@ -633,11 +633,13 @@ static inline void nvme_init_request(struct request *req,
}
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags)
+ struct nvme_command *cmd, blk_mq_req_flags_t flags,
+ unsigned int rq_flags)
{
+ unsigned int cmd_flags = nvme_req_op(cmd) | rq_flags;
struct request *req;
- req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
+ req = blk_mq_alloc_request(q, cmd_flags, flags);
if (!IS_ERR(req))
nvme_init_request(req, cmd);
return req;
@@ -1081,7 +1083,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int ret;
if (qid == NVME_QID_ANY)
- req = nvme_alloc_request(q, cmd, flags);
+ req = nvme_alloc_request(q, cmd, flags, 0);
else
req = nvme_alloc_request_qid(q, cmd, flags, qid);
if (IS_ERR(req))
@@ -1277,7 +1279,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
}
rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, 0);
if (IS_ERR(rq)) {
/* allocation failure, reset the controller */
dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
@@ -142,7 +142,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, u64 *result, unsigned timeout,
- struct io_uring_cmd *ioucmd)
+ struct io_uring_cmd *ioucmd, unsigned int rq_flags)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
@@ -152,7 +152,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
void *meta = NULL;
int ret;
- req = nvme_alloc_request(q, cmd, 0);
+ req = nvme_alloc_request(q, cmd, 0, rq_flags);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -212,11 +212,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
struct nvme_command c;
unsigned length, meta_len;
void __user *metadata;
+ unsigned int rq_flags = 0;
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
- if (io.flags)
- return -EINVAL;
+
+ if (io.flags & NVME_HIPRI)
+ rq_flags |= REQ_POLLED;
switch (io.opcode) {
case nvme_cmd_write:
@@ -254,7 +256,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
memset(&c, 0, sizeof(c));
c.rw.opcode = io.opcode;
- c.rw.flags = io.flags;
+ c.rw.flags = 0;
c.rw.nsid = cpu_to_le32(ns->head->ns_id);
c.rw.slba = cpu_to_le64(io.slba);
c.rw.length = cpu_to_le16(io.nblocks);
@@ -266,7 +268,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return nvme_submit_user_cmd(ns->queue, &c,
io.addr, length, metadata, meta_len,
- lower_32_bits(io.slba), NULL, 0, NULL);
+ lower_32_bits(io.slba), NULL, 0, NULL, rq_flags);
}
static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
@@ -288,6 +290,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
{
struct nvme_passthru_cmd cmd;
struct nvme_command c;
+ unsigned int rq_flags = 0;
unsigned timeout = 0;
u64 result;
int status;
@@ -296,14 +299,14 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return -EACCES;
if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
- if (cmd.flags)
- return -EINVAL;
+ if (cmd.flags & NVME_HIPRI)
+ rq_flags |= REQ_POLLED;
if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
return -EINVAL;
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
- c.common.flags = cmd.flags;
+ c.common.flags = 0;
c.common.nsid = cpu_to_le32(cmd.nsid);
c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
@@ -319,7 +322,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
- cmd.metadata_len, 0, &result, timeout, NULL);
+ cmd.metadata_len, 0, &result, timeout, NULL, rq_flags);
if (status >= 0) {
if (put_user(result, &ucmd->result))
@@ -335,6 +338,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
{
struct nvme_passthru_cmd64 cmd;
struct nvme_command c;
+ unsigned int rq_flags = 0;
unsigned timeout = 0;
int status;
@@ -342,14 +346,15 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return -EACCES;
if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
- if (cmd.flags)
- return -EINVAL;
+ if (cmd.flags & NVME_HIPRI)
+ rq_flags |= REQ_POLLED;
+
if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
return -EINVAL;
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
- c.common.flags = cmd.flags;
+ c.common.flags = 0;
c.common.nsid = cpu_to_le32(cmd.nsid);
c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
@@ -365,7 +370,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
- cmd.metadata_len, 0, &cmd.result, timeout, ioucmd);
+ cmd.metadata_len, 0, &cmd.result, timeout, ioucmd, rq_flags);
if (!ioucmd && status >= 0) {
if (put_user(cmd.result, &ucmd->result))
@@ -696,7 +696,8 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags);
+ struct nvme_command *cmd, blk_mq_req_flags_t flags,
+ unsigned int rq_flags);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
@@ -1429,7 +1429,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
req->tag, nvmeq->qid);
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
- BLK_MQ_REQ_NOWAIT);
+ BLK_MQ_REQ_NOWAIT, 0);
if (IS_ERR(abort_req)) {
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
@@ -2475,7 +2475,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
- req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
+ req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, 0);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -253,7 +253,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
timeout = nvmet_req_subsys(req)->admin_timeout;
}
- rq = nvme_alloc_request(q, req->cmd, 0);
+ rq = nvme_alloc_request(q, req->cmd, 0, 0);
if (IS_ERR(rq)) {
status = NVME_SC_INTERNAL;
goto out_put_ns;
@@ -9,6 +9,10 @@
#include <linux/types.h>
+enum nvme_io_flags {
+ NVME_HIPRI = 1 << 0, /* use polling queue if available */
+};
+
struct nvme_user_io {
__u8 opcode;
__u8 flags;