Message ID | 20200930220414.562527-2-kbusch@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | nvme qemu cleanups and fixes | expand |
On Sep 30 15:04, Keith Busch wrote: > The code switches on the opcode to invoke a function specific to that > opcode. There's no point in consolidating back to a common function that > just switches on that same opcode without any actual common code. > Restore the opcode specific behavior without going back through another > level of switches. > > Signed-off-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Klaus Jensen <k.jensen@samsung.com> Point taken. I could've sweared I had a better reason for this. > --- > hw/block/nvme.c | 91 ++++++++++++++++--------------------------------- > 1 file changed, 29 insertions(+), 62 deletions(-) > > diff --git a/hw/block/nvme.c b/hw/block/nvme.c > index da8344f196..db52ea0db9 100644 > --- a/hw/block/nvme.c > +++ b/hw/block/nvme.c > @@ -927,68 +927,12 @@ static void nvme_rw_cb(void *opaque, int ret) > nvme_enqueue_req_completion(nvme_cq(req), req); > } > > -static uint16_t nvme_do_aio(BlockBackend *blk, int64_t offset, size_t len, > - NvmeRequest *req) > -{ > - BlockAcctCookie *acct = &req->acct; > - BlockAcctStats *stats = blk_get_stats(blk); > - > - bool is_write = false; > - > - trace_pci_nvme_do_aio(nvme_cid(req), req->cmd.opcode, > - nvme_io_opc_str(req->cmd.opcode), blk_name(blk), > - offset, len); > - > - switch (req->cmd.opcode) { > - case NVME_CMD_FLUSH: > - block_acct_start(stats, acct, 0, BLOCK_ACCT_FLUSH); > - req->aiocb = blk_aio_flush(blk, nvme_rw_cb, req); > - break; > - > - case NVME_CMD_WRITE_ZEROES: > - block_acct_start(stats, acct, len, BLOCK_ACCT_WRITE); > - req->aiocb = blk_aio_pwrite_zeroes(blk, offset, len, > - BDRV_REQ_MAY_UNMAP, nvme_rw_cb, > - req); > - break; > - > - case NVME_CMD_WRITE: > - is_write = true; > - > - /* fallthrough */ > - > - case NVME_CMD_READ: > - block_acct_start(stats, acct, len, > - is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); > - > - if (req->qsg.sg) { > - if (is_write) { > - req->aiocb = dma_blk_write(blk, &req->qsg, offset, > - BDRV_SECTOR_SIZE, nvme_rw_cb, req); > - } else { > - req->aiocb = dma_blk_read(blk, &req->qsg, offset, > - BDRV_SECTOR_SIZE, nvme_rw_cb, req); > - } > - } else { > - if (is_write) { > - req->aiocb = blk_aio_pwritev(blk, offset, &req->iov, 0, > - nvme_rw_cb, req); > - } else { > - req->aiocb = blk_aio_preadv(blk, offset, &req->iov, 0, > - nvme_rw_cb, req); > - } > - } > - > - break; > - } > - > - return NVME_NO_COMPLETE; > -} > - > static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req) > { > - NvmeNamespace *ns = req->ns; > - return nvme_do_aio(ns->blkconf.blk, 0, 0, req); > + block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, > + BLOCK_ACCT_FLUSH); > + req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req); > + return NVME_NO_COMPLETE; > } > > static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req) > @@ -1009,7 +953,11 @@ static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req) > return status; > } > > - return nvme_do_aio(ns->blkconf.blk, offset, count, req); > + block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, > + BLOCK_ACCT_WRITE); > + req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count, > + BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req); > + return NVME_NO_COMPLETE; > } > > static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req) > @@ -1023,6 +971,7 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req) > uint64_t data_offset = nvme_l2b(ns, slba); > enum BlockAcctType acct = req->cmd.opcode == NVME_CMD_WRITE ? > BLOCK_ACCT_WRITE : BLOCK_ACCT_READ; > + BlockBackend *blk = ns->blkconf.blk; > uint16_t status; > > trace_pci_nvme_rw(nvme_cid(req), nvme_io_opc_str(rw->opcode), > @@ -1045,7 +994,25 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req) > goto invalid; > } > > - return nvme_do_aio(ns->blkconf.blk, data_offset, data_size, req); > + block_acct_start(blk_get_stats(blk), &req->acct, data_size, acct); > + if (req->qsg.sg) { > + if (acct == BLOCK_ACCT_WRITE) { > + req->aiocb = dma_blk_write(blk, &req->qsg, data_offset, > + BDRV_SECTOR_SIZE, nvme_rw_cb, req); > + } else { > + req->aiocb = dma_blk_read(blk, &req->qsg, data_offset, > + BDRV_SECTOR_SIZE, nvme_rw_cb, req); > + } > + } else { > + if (acct == BLOCK_ACCT_WRITE) { > + req->aiocb = blk_aio_pwritev(blk, data_offset, &req->iov, 0, > + nvme_rw_cb, req); > + } else { > + req->aiocb = blk_aio_preadv(blk, data_offset, &req->iov, 0, > + nvme_rw_cb, req); > + } > + } > + return NVME_NO_COMPLETE; > > invalid: > block_acct_invalid(blk_get_stats(ns->blkconf.blk), acct); > -- > 2.24.1 > >
On Oct 1 06:05, Klaus Jensen wrote: > On Sep 30 15:04, Keith Busch wrote: > > The code switches on the opcode to invoke a function specific to that > > opcode. There's no point in consolidating back to a common function that > > just switches on that same opcode without any actual common code. > > Restore the opcode specific behavior without going back through another > > level of switches. > > > > Signed-off-by: Keith Busch <kbusch@kernel.org> > > Reviewed-by: Klaus Jensen <k.jensen@samsung.com> > > Point taken. I could've sweared I had a better reason for this. > Can you also remove the nvme_do_aio trace event?
On Thu, Oct 01, 2020 at 10:48:03AM +0200, Klaus Jensen wrote: > On Oct 1 06:05, Klaus Jensen wrote: > > On Sep 30 15:04, Keith Busch wrote: > > > The code switches on the opcode to invoke a function specific to that > > > opcode. There's no point in consolidating back to a common function that > > > just switches on that same opcode without any actual common code. > > > Restore the opcode specific behavior without going back through another > > > level of switches. > > > > > > Signed-off-by: Keith Busch <kbusch@kernel.org> > > > > Reviewed-by: Klaus Jensen <k.jensen@samsung.com> > > > > Point taken. I could've sweared I had a better reason for this. > > > > Can you also remove the nvme_do_aio trace event? Ah, thanks for pointing that out. I'll fix up the trace. I think you may have a case where this might make sense still in the making? If so we can reevaluate when it's ready.
On Oct 1 06:05, Klaus Jensen wrote: > On Sep 30 15:04, Keith Busch wrote: > > The code switches on the opcode to invoke a function specific to that > > opcode. There's no point in consolidating back to a common function that > > just switches on that same opcode without any actual common code. > > Restore the opcode specific behavior without going back through another > > level of switches. > > > > Signed-off-by: Keith Busch <kbusch@kernel.org> > > Reviewed-by: Klaus Jensen <k.jensen@samsung.com> > > Point taken. I could've sweared I had a better reason for this. > > > --- > > hw/block/nvme.c | 91 ++++++++++++++++--------------------------------- > > 1 file changed, 29 insertions(+), 62 deletions(-) > > > > diff --git a/hw/block/nvme.c b/hw/block/nvme.c > > index da8344f196..db52ea0db9 100644 > > --- a/hw/block/nvme.c > > +++ b/hw/block/nvme.c > > @@ -927,68 +927,12 @@ static void nvme_rw_cb(void *opaque, int ret) > > nvme_enqueue_req_completion(nvme_cq(req), req); > > } > > > > -static uint16_t nvme_do_aio(BlockBackend *blk, int64_t offset, size_t len, > > - NvmeRequest *req) > > -{ > > - BlockAcctCookie *acct = &req->acct; > > - BlockAcctStats *stats = blk_get_stats(blk); > > - > > - bool is_write = false; > > - > > - trace_pci_nvme_do_aio(nvme_cid(req), req->cmd.opcode, > > - nvme_io_opc_str(req->cmd.opcode), blk_name(blk), > > - offset, len); > > - > > - switch (req->cmd.opcode) { > > - case NVME_CMD_FLUSH: > > - block_acct_start(stats, acct, 0, BLOCK_ACCT_FLUSH); > > - req->aiocb = blk_aio_flush(blk, nvme_rw_cb, req); > > - break; > > - > > - case NVME_CMD_WRITE_ZEROES: > > - block_acct_start(stats, acct, len, BLOCK_ACCT_WRITE); > > - req->aiocb = blk_aio_pwrite_zeroes(blk, offset, len, > > - BDRV_REQ_MAY_UNMAP, nvme_rw_cb, > > - req); > > - break; > > - > > - case NVME_CMD_WRITE: > > - is_write = true; > > - > > - /* fallthrough */ > > - > > - case NVME_CMD_READ: > > - block_acct_start(stats, acct, len, > > - is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); > > - > > - if (req->qsg.sg) { > > - if (is_write) { > > - req->aiocb = dma_blk_write(blk, &req->qsg, offset, > > - BDRV_SECTOR_SIZE, nvme_rw_cb, req); > > - } else { > > - req->aiocb = dma_blk_read(blk, &req->qsg, offset, > > - BDRV_SECTOR_SIZE, nvme_rw_cb, req); > > - } > > - } else { > > - if (is_write) { > > - req->aiocb = blk_aio_pwritev(blk, offset, &req->iov, 0, > > - nvme_rw_cb, req); > > - } else { > > - req->aiocb = blk_aio_preadv(blk, offset, &req->iov, 0, > > - nvme_rw_cb, req); > > - } > > - } > > - > > - break; > > - } > > - > > - return NVME_NO_COMPLETE; > > -} > > - > > static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req) > > { > > - NvmeNamespace *ns = req->ns; > > - return nvme_do_aio(ns->blkconf.blk, 0, 0, req); > > + block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, Uh, ouch! This and the rest needs to be changed to ns->blkconf.blk and not n->conf.blk. > > + BLOCK_ACCT_FLUSH); > > + req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req); > > + return NVME_NO_COMPLETE; > > } > > > > static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req) > > @@ -1009,7 +953,11 @@ static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req) > > return status; > > } > > > > - return nvme_do_aio(ns->blkconf.blk, offset, count, req); > > + block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, > > + BLOCK_ACCT_WRITE); > > + req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count, > > + BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req); > > + return NVME_NO_COMPLETE; > > } > > > > static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req) > > @@ -1023,6 +971,7 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req) > > uint64_t data_offset = nvme_l2b(ns, slba); > > enum BlockAcctType acct = req->cmd.opcode == NVME_CMD_WRITE ? > > BLOCK_ACCT_WRITE : BLOCK_ACCT_READ; > > + BlockBackend *blk = ns->blkconf.blk; > > uint16_t status; > > > > trace_pci_nvme_rw(nvme_cid(req), nvme_io_opc_str(rw->opcode), > > @@ -1045,7 +994,25 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req) > > goto invalid; > > } > > > > - return nvme_do_aio(ns->blkconf.blk, data_offset, data_size, req); > > + block_acct_start(blk_get_stats(blk), &req->acct, data_size, acct); > > + if (req->qsg.sg) { > > + if (acct == BLOCK_ACCT_WRITE) { > > + req->aiocb = dma_blk_write(blk, &req->qsg, data_offset, > > + BDRV_SECTOR_SIZE, nvme_rw_cb, req); > > + } else { > > + req->aiocb = dma_blk_read(blk, &req->qsg, data_offset, > > + BDRV_SECTOR_SIZE, nvme_rw_cb, req); > > + } > > + } else { > > + if (acct == BLOCK_ACCT_WRITE) { > > + req->aiocb = blk_aio_pwritev(blk, data_offset, &req->iov, 0, > > + nvme_rw_cb, req); > > + } else { > > + req->aiocb = blk_aio_preadv(blk, data_offset, &req->iov, 0, > > + nvme_rw_cb, req); > > + } > > + } > > + return NVME_NO_COMPLETE; > > > > invalid: > > block_acct_invalid(blk_get_stats(ns->blkconf.blk), acct); > > -- > > 2.24.1 > > > > > > -- > One of us - No more doubt, silence or taboo about mental illness.
> -----Original Message----- > From: Keith Busch <kbusch@kernel.org> > Sent: Wednesday, September 30, 2020 6:04 PM > To: qemu-block@nongnu.org; qemu-devel@nongnu.org; Klaus Jensen > <k.jensen@samsung.com> > Cc: Niklas Cassel <Niklas.Cassel@wdc.com>; Dmitry Fomichev > <Dmitry.Fomichev@wdc.com>; Kevin Wolf <kwolf@redhat.com>; Philippe > Mathieu-Daudé <philmd@redhat.com>; Keith Busch <kbusch@kernel.org> > Subject: [PATCH 1/9] hw/block/nvme: remove pointless rw indirection > > The code switches on the opcode to invoke a function specific to that > opcode. There's no point in consolidating back to a common function that > just switches on that same opcode without any actual common code. > Restore the opcode specific behavior without going back through another > level of switches. > > Signed-off-by: Keith Busch <kbusch@kernel.org> > --- > hw/block/nvme.c | 91 ++++++++++++++++--------------------------------- > 1 file changed, 29 insertions(+), 62 deletions(-) > > diff --git a/hw/block/nvme.c b/hw/block/nvme.c > index da8344f196..db52ea0db9 100644 > --- a/hw/block/nvme.c > +++ b/hw/block/nvme.c > @@ -927,68 +927,12 @@ static void nvme_rw_cb(void *opaque, int ret) > nvme_enqueue_req_completion(nvme_cq(req), req); > } > > -static uint16_t nvme_do_aio(BlockBackend *blk, int64_t offset, size_t len, > - NvmeRequest *req) > -{ > - BlockAcctCookie *acct = &req->acct; > - BlockAcctStats *stats = blk_get_stats(blk); > - > - bool is_write = false; > - > - trace_pci_nvme_do_aio(nvme_cid(req), req->cmd.opcode, > - nvme_io_opc_str(req->cmd.opcode), blk_name(blk), > - offset, len); > - > - switch (req->cmd.opcode) { > - case NVME_CMD_FLUSH: > - block_acct_start(stats, acct, 0, BLOCK_ACCT_FLUSH); > - req->aiocb = blk_aio_flush(blk, nvme_rw_cb, req); > - break; > - > - case NVME_CMD_WRITE_ZEROES: > - block_acct_start(stats, acct, len, BLOCK_ACCT_WRITE); > - req->aiocb = blk_aio_pwrite_zeroes(blk, offset, len, > - BDRV_REQ_MAY_UNMAP, nvme_rw_cb, > - req); > - break; > - > - case NVME_CMD_WRITE: > - is_write = true; > - > - /* fallthrough */ > - > - case NVME_CMD_READ: > - block_acct_start(stats, acct, len, > - is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); > - > - if (req->qsg.sg) { > - if (is_write) { > - req->aiocb = dma_blk_write(blk, &req->qsg, offset, > - BDRV_SECTOR_SIZE, nvme_rw_cb, req); > - } else { > - req->aiocb = dma_blk_read(blk, &req->qsg, offset, > - BDRV_SECTOR_SIZE, nvme_rw_cb, req); > - } > - } else { > - if (is_write) { > - req->aiocb = blk_aio_pwritev(blk, offset, &req->iov, 0, > - nvme_rw_cb, req); > - } else { > - req->aiocb = blk_aio_preadv(blk, offset, &req->iov, 0, > - nvme_rw_cb, req); > - } > - } > - > - break; > - } > - > - return NVME_NO_COMPLETE; > -} > - > static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req) > { > - NvmeNamespace *ns = req->ns; > - return nvme_do_aio(ns->blkconf.blk, 0, 0, req); > + block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, This should be block_acct_start(blk_get_stats(req->ns->blkconf.blk), &req->acct, 0, > + BLOCK_ACCT_FLUSH); > + req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req); and here req->aiocb = blk_aio_flush(req->ns->blkconf.blk, nvme_rw_cb, req); > + return NVME_NO_COMPLETE; > } > > static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req) > @@ -1009,7 +953,11 @@ static uint16_t nvme_write_zeroes(NvmeCtrl *n, > NvmeRequest *req) > return status; > } > > - return nvme_do_aio(ns->blkconf.blk, offset, count, req); > + block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, block_acct_start(blk_get_stats(ns->blkconf.blk), &req->acct, 0, > + BLOCK_ACCT_WRITE); > + req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count, Here as well, req->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, offset, count, > + BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req); > + return NVME_NO_COMPLETE; > } > > static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req) > @@ -1023,6 +971,7 @@ static uint16_t nvme_rw(NvmeCtrl *n, > NvmeRequest *req) > uint64_t data_offset = nvme_l2b(ns, slba); > enum BlockAcctType acct = req->cmd.opcode == NVME_CMD_WRITE ? > BLOCK_ACCT_WRITE : BLOCK_ACCT_READ; > + BlockBackend *blk = ns->blkconf.blk; > uint16_t status; > > trace_pci_nvme_rw(nvme_cid(req), nvme_io_opc_str(rw->opcode), > @@ -1045,7 +994,25 @@ static uint16_t nvme_rw(NvmeCtrl *n, > NvmeRequest *req) > goto invalid; > } > > - return nvme_do_aio(ns->blkconf.blk, data_offset, data_size, req); > + block_acct_start(blk_get_stats(blk), &req->acct, data_size, acct); > + if (req->qsg.sg) { > + if (acct == BLOCK_ACCT_WRITE) { > + req->aiocb = dma_blk_write(blk, &req->qsg, data_offset, > + BDRV_SECTOR_SIZE, nvme_rw_cb, req); > + } else { > + req->aiocb = dma_blk_read(blk, &req->qsg, data_offset, > + BDRV_SECTOR_SIZE, nvme_rw_cb, req); > + } > + } else { > + if (acct == BLOCK_ACCT_WRITE) { > + req->aiocb = blk_aio_pwritev(blk, data_offset, &req->iov, 0, > + nvme_rw_cb, req); > + } else { > + req->aiocb = blk_aio_preadv(blk, data_offset, &req->iov, 0, > + nvme_rw_cb, req); > + } > + } > + return NVME_NO_COMPLETE; > > invalid: > block_acct_invalid(blk_get_stats(ns->blkconf.blk), acct); > -- > 2.24.1
diff --git a/hw/block/nvme.c b/hw/block/nvme.c index da8344f196..db52ea0db9 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -927,68 +927,12 @@ static void nvme_rw_cb(void *opaque, int ret) nvme_enqueue_req_completion(nvme_cq(req), req); } -static uint16_t nvme_do_aio(BlockBackend *blk, int64_t offset, size_t len, - NvmeRequest *req) -{ - BlockAcctCookie *acct = &req->acct; - BlockAcctStats *stats = blk_get_stats(blk); - - bool is_write = false; - - trace_pci_nvme_do_aio(nvme_cid(req), req->cmd.opcode, - nvme_io_opc_str(req->cmd.opcode), blk_name(blk), - offset, len); - - switch (req->cmd.opcode) { - case NVME_CMD_FLUSH: - block_acct_start(stats, acct, 0, BLOCK_ACCT_FLUSH); - req->aiocb = blk_aio_flush(blk, nvme_rw_cb, req); - break; - - case NVME_CMD_WRITE_ZEROES: - block_acct_start(stats, acct, len, BLOCK_ACCT_WRITE); - req->aiocb = blk_aio_pwrite_zeroes(blk, offset, len, - BDRV_REQ_MAY_UNMAP, nvme_rw_cb, - req); - break; - - case NVME_CMD_WRITE: - is_write = true; - - /* fallthrough */ - - case NVME_CMD_READ: - block_acct_start(stats, acct, len, - is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); - - if (req->qsg.sg) { - if (is_write) { - req->aiocb = dma_blk_write(blk, &req->qsg, offset, - BDRV_SECTOR_SIZE, nvme_rw_cb, req); - } else { - req->aiocb = dma_blk_read(blk, &req->qsg, offset, - BDRV_SECTOR_SIZE, nvme_rw_cb, req); - } - } else { - if (is_write) { - req->aiocb = blk_aio_pwritev(blk, offset, &req->iov, 0, - nvme_rw_cb, req); - } else { - req->aiocb = blk_aio_preadv(blk, offset, &req->iov, 0, - nvme_rw_cb, req); - } - } - - break; - } - - return NVME_NO_COMPLETE; -} - static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req) { - NvmeNamespace *ns = req->ns; - return nvme_do_aio(ns->blkconf.blk, 0, 0, req); + block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, + BLOCK_ACCT_FLUSH); + req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req); + return NVME_NO_COMPLETE; } static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req) @@ -1009,7 +953,11 @@ static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req) return status; } - return nvme_do_aio(ns->blkconf.blk, offset, count, req); + block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, + BLOCK_ACCT_WRITE); + req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count, + BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req); + return NVME_NO_COMPLETE; } static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req) @@ -1023,6 +971,7 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req) uint64_t data_offset = nvme_l2b(ns, slba); enum BlockAcctType acct = req->cmd.opcode == NVME_CMD_WRITE ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ; + BlockBackend *blk = ns->blkconf.blk; uint16_t status; trace_pci_nvme_rw(nvme_cid(req), nvme_io_opc_str(rw->opcode), @@ -1045,7 +994,25 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req) goto invalid; } - return nvme_do_aio(ns->blkconf.blk, data_offset, data_size, req); + block_acct_start(blk_get_stats(blk), &req->acct, data_size, acct); + if (req->qsg.sg) { + if (acct == BLOCK_ACCT_WRITE) { + req->aiocb = dma_blk_write(blk, &req->qsg, data_offset, + BDRV_SECTOR_SIZE, nvme_rw_cb, req); + } else { + req->aiocb = dma_blk_read(blk, &req->qsg, data_offset, + BDRV_SECTOR_SIZE, nvme_rw_cb, req); + } + } else { + if (acct == BLOCK_ACCT_WRITE) { + req->aiocb = blk_aio_pwritev(blk, data_offset, &req->iov, 0, + nvme_rw_cb, req); + } else { + req->aiocb = blk_aio_preadv(blk, data_offset, &req->iov, 0, + nvme_rw_cb, req); + } + } + return NVME_NO_COMPLETE; invalid: block_acct_invalid(blk_get_stats(ns->blkconf.blk), acct);
The code switches on the opcode to invoke a function specific to that opcode. There's no point in consolidating back to a common function that just switches on that same opcode without any actual common code. Restore the opcode specific behavior without going back through another level of switches. Signed-off-by: Keith Busch <kbusch@kernel.org> --- hw/block/nvme.c | 91 ++++++++++++++++--------------------------------- 1 file changed, 29 insertions(+), 62 deletions(-)