Message ID | 20220111161937.56272-2-pankaj.gupta.linux@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | virtio-pmem: Asynchronous flush | expand |
On Tue, Jan 11, 2022 at 8:23 AM Pankaj Gupta <pankaj.gupta.linux@gmail.com> wrote: > > Enable asynchronous flush for virtio pmem using work queue. Also, > coalesce the flush requests when a flush is already in process. > This functionality is copied from md/RAID code. > > When a flush is already in process, new flush requests wait till > previous flush completes in another context (work queue). For all > the requests come between ongoing flush and new flush start time, only > single flush executes, thus adhers to flush coalscing logic. This is s/adhers/adheres/ s/coalscing/coalescing/ > important for maintaining the flush request order with request coalscing. s/coalscing/coalescing/ > > Signed-off-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com> > --- > drivers/nvdimm/nd_virtio.c | 74 +++++++++++++++++++++++++++--------- > drivers/nvdimm/virtio_pmem.c | 10 +++++ > drivers/nvdimm/virtio_pmem.h | 16 ++++++++ > 3 files changed, 83 insertions(+), 17 deletions(-) > > diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c > index 10351d5b49fa..179ea7a73338 100644 > --- a/drivers/nvdimm/nd_virtio.c > +++ b/drivers/nvdimm/nd_virtio.c > @@ -100,26 +100,66 @@ static int virtio_pmem_flush(struct nd_region *nd_region) > /* The asynchronous flush callback function */ > int async_pmem_flush(struct nd_region *nd_region, struct bio *bio) > { > - /* > - * Create child bio for asynchronous flush and chain with > - * parent bio. Otherwise directly call nd_region flush. > + /* queue asynchronous flush and coalesce the flush requests */ > + struct virtio_device *vdev = nd_region->provider_data; > + struct virtio_pmem *vpmem = vdev->priv; > + ktime_t req_start = ktime_get_boottime(); > + int ret = -EINPROGRESS; > + > + spin_lock_irq(&vpmem->lock); Why a new lock and not continue to use ->pmem_lock? Have you tested this with CONFIG_PROVE_LOCKING? Along those lines do you have a selftest that can be added to the kernel as well so that 0day or other bots could offer early warnings on regressions? > + /* flush requests wait until ongoing flush completes, > + * hence coalescing all the pending requests. > */ > - if (bio && bio->bi_iter.bi_sector != -1) { > - struct bio *child = bio_alloc(GFP_ATOMIC, 0); > - > - if (!child) > - return -ENOMEM; > - bio_copy_dev(child, bio); > - child->bi_opf = REQ_PREFLUSH; > - child->bi_iter.bi_sector = -1; > - bio_chain(child, bio); > - submit_bio(child); > - return 0; > + wait_event_lock_irq(vpmem->sb_wait, > + !vpmem->flush_bio || > + ktime_before(req_start, vpmem->prev_flush_start), > + vpmem->lock); > + /* new request after previous flush is completed */ > + if (ktime_after(req_start, vpmem->prev_flush_start)) { > + WARN_ON(vpmem->flush_bio); > + vpmem->flush_bio = bio; > + bio = NULL; > + } > + spin_unlock_irq(&vpmem->lock); > + > + if (!bio) > + queue_work(vpmem->pmem_wq, &vpmem->flush_work); > + else { > + /* flush completed in other context while we waited */ > + if (bio && (bio->bi_opf & REQ_PREFLUSH)) > + bio->bi_opf &= ~REQ_PREFLUSH; > + else if (bio && (bio->bi_opf & REQ_FUA)) > + bio->bi_opf &= ~REQ_FUA; > + > + ret = vpmem->prev_flush_err; > } > - if (virtio_pmem_flush(nd_region)) > - return -EIO; > > - return 0; > + return ret; > }; > EXPORT_SYMBOL_GPL(async_pmem_flush); > + > +void submit_async_flush(struct work_struct *ws) This name is too generic to be exported from drivers/nvdimm/nd_virtio.c ...it strikes me that there is little reason for nd_virtio and virtio_pmem to be separate modules. They are both enabled by the same Kconfig, so why not combine them into one module and drop the exports? > +{ > + struct virtio_pmem *vpmem = container_of(ws, struct virtio_pmem, flush_work); > + struct bio *bio = vpmem->flush_bio; > + > + vpmem->start_flush = ktime_get_boottime(); > + vpmem->prev_flush_err = virtio_pmem_flush(vpmem->nd_region); > + vpmem->prev_flush_start = vpmem->start_flush; > + vpmem->flush_bio = NULL; > + wake_up(&vpmem->sb_wait); > + > + if (vpmem->prev_flush_err) > + bio->bi_status = errno_to_blk_status(-EIO); > + > + /* Submit parent bio only for PREFLUSH */ > + if (bio && (bio->bi_opf & REQ_PREFLUSH)) { > + bio->bi_opf &= ~REQ_PREFLUSH; > + submit_bio(bio); > + } else if (bio && (bio->bi_opf & REQ_FUA)) { > + bio->bi_opf &= ~REQ_FUA; > + bio_endio(bio); > + } > +} > +EXPORT_SYMBOL_GPL(submit_async_flush); > MODULE_LICENSE("GPL"); > diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c > index 726c7354d465..75ed9b7ddea1 100644 > --- a/drivers/nvdimm/virtio_pmem.c > +++ b/drivers/nvdimm/virtio_pmem.c > @@ -24,6 +24,7 @@ static int init_vq(struct virtio_pmem *vpmem) > return PTR_ERR(vpmem->req_vq); > > spin_lock_init(&vpmem->pmem_lock); > + spin_lock_init(&vpmem->lock); > INIT_LIST_HEAD(&vpmem->req_list); > > return 0; > @@ -57,7 +58,14 @@ static int virtio_pmem_probe(struct virtio_device *vdev) > dev_err(&vdev->dev, "failed to initialize virtio pmem vq's\n"); > goto out_err; > } > + vpmem->pmem_wq = alloc_workqueue("vpmem_wq", WQ_MEM_RECLAIM, 0); > + if (!vpmem->pmem_wq) { > + err = -ENOMEM; > + goto out_err; > + } > > + INIT_WORK(&vpmem->flush_work, submit_async_flush); > + init_waitqueue_head(&vpmem->sb_wait); > virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, > start, &vpmem->start); > virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, > @@ -90,10 +98,12 @@ static int virtio_pmem_probe(struct virtio_device *vdev) > goto out_nd; > } > nd_region->provider_data = dev_to_virtio(nd_region->dev.parent->parent); > + vpmem->nd_region = nd_region; > return 0; > out_nd: > nvdimm_bus_unregister(vpmem->nvdimm_bus); > out_vq: > + destroy_workqueue(vpmem->pmem_wq); > vdev->config->del_vqs(vdev); > out_err: > return err; > diff --git a/drivers/nvdimm/virtio_pmem.h b/drivers/nvdimm/virtio_pmem.h > index 0dddefe594c4..495dc20e1cdb 100644 > --- a/drivers/nvdimm/virtio_pmem.h > +++ b/drivers/nvdimm/virtio_pmem.h > @@ -35,9 +35,24 @@ struct virtio_pmem { > /* Virtio pmem request queue */ > struct virtqueue *req_vq; > > + struct bio *flush_bio; > + /* last_flush is when the last completed flush was started */ > + ktime_t prev_flush_start, start_flush; > + int prev_flush_err; > + > + /* work queue for deferred flush */ > + struct work_struct flush_work; > + struct workqueue_struct *pmem_wq; > + > + /* Synchronize flush wait queue data */ > + spinlock_t lock; > + /* for waiting for previous flush to complete */ > + wait_queue_head_t sb_wait; > + > /* nvdimm bus registers virtio pmem device */ > struct nvdimm_bus *nvdimm_bus; > struct nvdimm_bus_descriptor nd_desc; > + struct nd_region *nd_region; > > /* List to store deferred work if virtqueue is full */ > struct list_head req_list; > @@ -52,4 +67,5 @@ struct virtio_pmem { > > void virtio_pmem_host_ack(struct virtqueue *vq); > int async_pmem_flush(struct nd_region *nd_region, struct bio *bio); > +void submit_async_flush(struct work_struct *ws); > #endif > -- > 2.25.1 > >
> > > > Enable asynchronous flush for virtio pmem using work queue. Also, > > coalesce the flush requests when a flush is already in process. > > This functionality is copied from md/RAID code. > > > > When a flush is already in process, new flush requests wait till > > previous flush completes in another context (work queue). For all > > the requests come between ongoing flush and new flush start time, only > > single flush executes, thus adhers to flush coalscing logic. This is > > s/adhers/adheres/ > > s/coalscing/coalescing/ > > > important for maintaining the flush request order with request coalscing. > > s/coalscing/coalescing/ o.k. Sorry for the spelling mistakes. > > > > > Signed-off-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com> > > --- > > drivers/nvdimm/nd_virtio.c | 74 +++++++++++++++++++++++++++--------- > > drivers/nvdimm/virtio_pmem.c | 10 +++++ > > drivers/nvdimm/virtio_pmem.h | 16 ++++++++ > > 3 files changed, 83 insertions(+), 17 deletions(-) > > > > diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c > > index 10351d5b49fa..179ea7a73338 100644 > > --- a/drivers/nvdimm/nd_virtio.c > > +++ b/drivers/nvdimm/nd_virtio.c > > @@ -100,26 +100,66 @@ static int virtio_pmem_flush(struct nd_region *nd_region) > > /* The asynchronous flush callback function */ > > int async_pmem_flush(struct nd_region *nd_region, struct bio *bio) > > { > > - /* > > - * Create child bio for asynchronous flush and chain with > > - * parent bio. Otherwise directly call nd_region flush. > > + /* queue asynchronous flush and coalesce the flush requests */ > > + struct virtio_device *vdev = nd_region->provider_data; > > + struct virtio_pmem *vpmem = vdev->priv; > > + ktime_t req_start = ktime_get_boottime(); > > + int ret = -EINPROGRESS; > > + > > + spin_lock_irq(&vpmem->lock); > > Why a new lock and not continue to use ->pmem_lock? This spinlock is to protect entry in 'wait_event_lock_irq' and the Other spinlock is to protect virtio queue data. > > Have you tested this with CONFIG_PROVE_LOCKING? No, I only ran xfs tests and some of my unit test program. Will enable and test with CONFIG_PROVE_LOCKING. > > Along those lines do you have a selftest that can be added to the > kernel as well so that 0day or other bots could offer early warnings > on regressions? Will try to add one. Thank you Dan for the feedback! Best regards, Pankaj > > > + /* flush requests wait until ongoing flush completes, > > + * hence coalescing all the pending requests. > > */ > > - if (bio && bio->bi_iter.bi_sector != -1) { > > - struct bio *child = bio_alloc(GFP_ATOMIC, 0); > > - > > - if (!child) > > - return -ENOMEM; > > - bio_copy_dev(child, bio); > > - child->bi_opf = REQ_PREFLUSH; > > - child->bi_iter.bi_sector = -1; > > - bio_chain(child, bio); > > - submit_bio(child); > > - return 0; > > + wait_event_lock_irq(vpmem->sb_wait, > > + !vpmem->flush_bio || > > + ktime_before(req_start, vpmem->prev_flush_start), > > + vpmem->lock); > > + /* new request after previous flush is completed */ > > + if (ktime_after(req_start, vpmem->prev_flush_start)) { > > + WARN_ON(vpmem->flush_bio); > > + vpmem->flush_bio = bio; > > + bio = NULL; > > + } > > + spin_unlock_irq(&vpmem->lock); > > + > > + if (!bio) > > + queue_work(vpmem->pmem_wq, &vpmem->flush_work); > > + else { > > + /* flush completed in other context while we waited */ > > + if (bio && (bio->bi_opf & REQ_PREFLUSH)) > > + bio->bi_opf &= ~REQ_PREFLUSH; > > + else if (bio && (bio->bi_opf & REQ_FUA)) > > + bio->bi_opf &= ~REQ_FUA; > > + > > + ret = vpmem->prev_flush_err; > > } > > - if (virtio_pmem_flush(nd_region)) > > - return -EIO; > > > > - return 0; > > + return ret; > > }; > > EXPORT_SYMBOL_GPL(async_pmem_flush); > > + > > +void submit_async_flush(struct work_struct *ws) > > This name is too generic to be exported from drivers/nvdimm/nd_virtio.c > > ...it strikes me that there is little reason for nd_virtio and > virtio_pmem to be separate modules. They are both enabled by the same > Kconfig, so why not combine them into one module and drop the exports? makes sense. > > > +{ > > + struct virtio_pmem *vpmem = container_of(ws, struct virtio_pmem, flush_work); > > + struct bio *bio = vpmem->flush_bio; > > + > > + vpmem->start_flush = ktime_get_boottime(); > > + vpmem->prev_flush_err = virtio_pmem_flush(vpmem->nd_region); > > + vpmem->prev_flush_start = vpmem->start_flush; > > + vpmem->flush_bio = NULL; > > + wake_up(&vpmem->sb_wait); > > + > > + if (vpmem->prev_flush_err) > > + bio->bi_status = errno_to_blk_status(-EIO); > > + > > + /* Submit parent bio only for PREFLUSH */ > > + if (bio && (bio->bi_opf & REQ_PREFLUSH)) { > > + bio->bi_opf &= ~REQ_PREFLUSH; > > + submit_bio(bio); > > + } else if (bio && (bio->bi_opf & REQ_FUA)) { > > + bio->bi_opf &= ~REQ_FUA; > > + bio_endio(bio); > > + } > > +} > > +EXPORT_SYMBOL_GPL(submit_async_flush); > > MODULE_LICENSE("GPL"); > > diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c > > index 726c7354d465..75ed9b7ddea1 100644 > > --- a/drivers/nvdimm/virtio_pmem.c > > +++ b/drivers/nvdimm/virtio_pmem.c > > @@ -24,6 +24,7 @@ static int init_vq(struct virtio_pmem *vpmem) > > return PTR_ERR(vpmem->req_vq); > > > > spin_lock_init(&vpmem->pmem_lock); > > + spin_lock_init(&vpmem->lock); > > INIT_LIST_HEAD(&vpmem->req_list); > > > > return 0; > > @@ -57,7 +58,14 @@ static int virtio_pmem_probe(struct virtio_device *vdev) > > dev_err(&vdev->dev, "failed to initialize virtio pmem vq's\n"); > > goto out_err; > > } > > + vpmem->pmem_wq = alloc_workqueue("vpmem_wq", WQ_MEM_RECLAIM, 0); > > + if (!vpmem->pmem_wq) { > > + err = -ENOMEM; > > + goto out_err; > > + } > > > > + INIT_WORK(&vpmem->flush_work, submit_async_flush); > > + init_waitqueue_head(&vpmem->sb_wait); > > virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, > > start, &vpmem->start); > > virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, > > @@ -90,10 +98,12 @@ static int virtio_pmem_probe(struct virtio_device *vdev) > > goto out_nd; > > } > > nd_region->provider_data = dev_to_virtio(nd_region->dev.parent->parent); > > + vpmem->nd_region = nd_region; > > return 0; > > out_nd: > > nvdimm_bus_unregister(vpmem->nvdimm_bus); > > out_vq: > > + destroy_workqueue(vpmem->pmem_wq); > > vdev->config->del_vqs(vdev); > > out_err: > > return err; > > diff --git a/drivers/nvdimm/virtio_pmem.h b/drivers/nvdimm/virtio_pmem.h > > index 0dddefe594c4..495dc20e1cdb 100644 > > --- a/drivers/nvdimm/virtio_pmem.h > > +++ b/drivers/nvdimm/virtio_pmem.h > > @@ -35,9 +35,24 @@ struct virtio_pmem { > > /* Virtio pmem request queue */ > > struct virtqueue *req_vq; > > > > + struct bio *flush_bio; > > + /* last_flush is when the last completed flush was started */ > > + ktime_t prev_flush_start, start_flush; > > + int prev_flush_err; > > + > > + /* work queue for deferred flush */ > > + struct work_struct flush_work; > > + struct workqueue_struct *pmem_wq; > > + > > + /* Synchronize flush wait queue data */ > > + spinlock_t lock; > > + /* for waiting for previous flush to complete */ > > + wait_queue_head_t sb_wait; > > + > > /* nvdimm bus registers virtio pmem device */ > > struct nvdimm_bus *nvdimm_bus; > > struct nvdimm_bus_descriptor nd_desc; > > + struct nd_region *nd_region; > > > > /* List to store deferred work if virtqueue is full */ > > struct list_head req_list; > > @@ -52,4 +67,5 @@ struct virtio_pmem { > > > > void virtio_pmem_host_ack(struct virtqueue *vq); > > int async_pmem_flush(struct nd_region *nd_region, struct bio *bio); > > +void submit_async_flush(struct work_struct *ws); > > #endif > > -- > > 2.25.1 > > > >
On Wed, Feb 16, 2022 at 12:47 AM Pankaj Gupta <pankaj.gupta.linux@gmail.com> wrote: > > > > > > > Enable asynchronous flush for virtio pmem using work queue. Also, > > > coalesce the flush requests when a flush is already in process. > > > This functionality is copied from md/RAID code. > > > > > > When a flush is already in process, new flush requests wait till > > > previous flush completes in another context (work queue). For all > > > the requests come between ongoing flush and new flush start time, only > > > single flush executes, thus adhers to flush coalscing logic. This is > > > > s/adhers/adheres/ > > > > s/coalscing/coalescing/ > > > > > important for maintaining the flush request order with request coalscing. > > > > s/coalscing/coalescing/ > > o.k. Sorry for the spelling mistakes. > > > > > > > > > Signed-off-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com> > > > --- > > > drivers/nvdimm/nd_virtio.c | 74 +++++++++++++++++++++++++++--------- > > > drivers/nvdimm/virtio_pmem.c | 10 +++++ > > > drivers/nvdimm/virtio_pmem.h | 16 ++++++++ > > > 3 files changed, 83 insertions(+), 17 deletions(-) > > > > > > diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c > > > index 10351d5b49fa..179ea7a73338 100644 > > > --- a/drivers/nvdimm/nd_virtio.c > > > +++ b/drivers/nvdimm/nd_virtio.c > > > @@ -100,26 +100,66 @@ static int virtio_pmem_flush(struct nd_region *nd_region) > > > /* The asynchronous flush callback function */ > > > int async_pmem_flush(struct nd_region *nd_region, struct bio *bio) > > > { > > > - /* > > > - * Create child bio for asynchronous flush and chain with > > > - * parent bio. Otherwise directly call nd_region flush. > > > + /* queue asynchronous flush and coalesce the flush requests */ > > > + struct virtio_device *vdev = nd_region->provider_data; > > > + struct virtio_pmem *vpmem = vdev->priv; > > > + ktime_t req_start = ktime_get_boottime(); > > > + int ret = -EINPROGRESS; > > > + > > > + spin_lock_irq(&vpmem->lock); > > > > Why a new lock and not continue to use ->pmem_lock? > > This spinlock is to protect entry in 'wait_event_lock_irq' > and the Other spinlock is to protect virtio queue data. Understood, but md shares the mddev->lock for both purposes, so I would ask that you either document what motivates the locking split, or just reuse the lock until a strong reason to split them arises.
> > > > > > > > > > Enable asynchronous flush for virtio pmem using work queue. Also, > > > > coalesce the flush requests when a flush is already in process. > > > > This functionality is copied from md/RAID code. > > > > > > > > When a flush is already in process, new flush requests wait till > > > > previous flush completes in another context (work queue). For all > > > > the requests come between ongoing flush and new flush start time, only > > > > single flush executes, thus adhers to flush coalscing logic. This is > > > > > > s/adhers/adheres/ > > > > > > s/coalscing/coalescing/ > > > > > > > important for maintaining the flush request order with request coalscing. > > > > > > s/coalscing/coalescing/ > > > > o.k. Sorry for the spelling mistakes. > > > > > > > > > > > > > Signed-off-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com> > > > > --- > > > > drivers/nvdimm/nd_virtio.c | 74 +++++++++++++++++++++++++++--------- > > > > drivers/nvdimm/virtio_pmem.c | 10 +++++ > > > > drivers/nvdimm/virtio_pmem.h | 16 ++++++++ > > > > 3 files changed, 83 insertions(+), 17 deletions(-) > > > > > > > > diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c > > > > index 10351d5b49fa..179ea7a73338 100644 > > > > --- a/drivers/nvdimm/nd_virtio.c > > > > +++ b/drivers/nvdimm/nd_virtio.c > > > > @@ -100,26 +100,66 @@ static int virtio_pmem_flush(struct nd_region *nd_region) > > > > /* The asynchronous flush callback function */ > > > > int async_pmem_flush(struct nd_region *nd_region, struct bio *bio) > > > > { > > > > - /* > > > > - * Create child bio for asynchronous flush and chain with > > > > - * parent bio. Otherwise directly call nd_region flush. > > > > + /* queue asynchronous flush and coalesce the flush requests */ > > > > + struct virtio_device *vdev = nd_region->provider_data; > > > > + struct virtio_pmem *vpmem = vdev->priv; > > > > + ktime_t req_start = ktime_get_boottime(); > > > > + int ret = -EINPROGRESS; > > > > + > > > > + spin_lock_irq(&vpmem->lock); > > > > > > Why a new lock and not continue to use ->pmem_lock? > > > > This spinlock is to protect entry in 'wait_event_lock_irq' > > and the Other spinlock is to protect virtio queue data. > > Understood, but md shares the mddev->lock for both purposes, so I > would ask that you either document what motivates the locking split, > or just reuse the lock until a strong reason to split them arises. O.k. Will check again if we could use same lock Or document it. Thanks, Pankaj
diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c index 10351d5b49fa..179ea7a73338 100644 --- a/drivers/nvdimm/nd_virtio.c +++ b/drivers/nvdimm/nd_virtio.c @@ -100,26 +100,66 @@ static int virtio_pmem_flush(struct nd_region *nd_region) /* The asynchronous flush callback function */ int async_pmem_flush(struct nd_region *nd_region, struct bio *bio) { - /* - * Create child bio for asynchronous flush and chain with - * parent bio. Otherwise directly call nd_region flush. + /* queue asynchronous flush and coalesce the flush requests */ + struct virtio_device *vdev = nd_region->provider_data; + struct virtio_pmem *vpmem = vdev->priv; + ktime_t req_start = ktime_get_boottime(); + int ret = -EINPROGRESS; + + spin_lock_irq(&vpmem->lock); + /* flush requests wait until ongoing flush completes, + * hence coalescing all the pending requests. */ - if (bio && bio->bi_iter.bi_sector != -1) { - struct bio *child = bio_alloc(GFP_ATOMIC, 0); - - if (!child) - return -ENOMEM; - bio_copy_dev(child, bio); - child->bi_opf = REQ_PREFLUSH; - child->bi_iter.bi_sector = -1; - bio_chain(child, bio); - submit_bio(child); - return 0; + wait_event_lock_irq(vpmem->sb_wait, + !vpmem->flush_bio || + ktime_before(req_start, vpmem->prev_flush_start), + vpmem->lock); + /* new request after previous flush is completed */ + if (ktime_after(req_start, vpmem->prev_flush_start)) { + WARN_ON(vpmem->flush_bio); + vpmem->flush_bio = bio; + bio = NULL; + } + spin_unlock_irq(&vpmem->lock); + + if (!bio) + queue_work(vpmem->pmem_wq, &vpmem->flush_work); + else { + /* flush completed in other context while we waited */ + if (bio && (bio->bi_opf & REQ_PREFLUSH)) + bio->bi_opf &= ~REQ_PREFLUSH; + else if (bio && (bio->bi_opf & REQ_FUA)) + bio->bi_opf &= ~REQ_FUA; + + ret = vpmem->prev_flush_err; } - if (virtio_pmem_flush(nd_region)) - return -EIO; - return 0; + return ret; }; EXPORT_SYMBOL_GPL(async_pmem_flush); + +void submit_async_flush(struct work_struct *ws) +{ + struct virtio_pmem *vpmem = container_of(ws, struct virtio_pmem, flush_work); + struct bio *bio = vpmem->flush_bio; + + vpmem->start_flush = ktime_get_boottime(); + vpmem->prev_flush_err = virtio_pmem_flush(vpmem->nd_region); + vpmem->prev_flush_start = vpmem->start_flush; + vpmem->flush_bio = NULL; + wake_up(&vpmem->sb_wait); + + if (vpmem->prev_flush_err) + bio->bi_status = errno_to_blk_status(-EIO); + + /* Submit parent bio only for PREFLUSH */ + if (bio && (bio->bi_opf & REQ_PREFLUSH)) { + bio->bi_opf &= ~REQ_PREFLUSH; + submit_bio(bio); + } else if (bio && (bio->bi_opf & REQ_FUA)) { + bio->bi_opf &= ~REQ_FUA; + bio_endio(bio); + } +} +EXPORT_SYMBOL_GPL(submit_async_flush); MODULE_LICENSE("GPL"); diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c index 726c7354d465..75ed9b7ddea1 100644 --- a/drivers/nvdimm/virtio_pmem.c +++ b/drivers/nvdimm/virtio_pmem.c @@ -24,6 +24,7 @@ static int init_vq(struct virtio_pmem *vpmem) return PTR_ERR(vpmem->req_vq); spin_lock_init(&vpmem->pmem_lock); + spin_lock_init(&vpmem->lock); INIT_LIST_HEAD(&vpmem->req_list); return 0; @@ -57,7 +58,14 @@ static int virtio_pmem_probe(struct virtio_device *vdev) dev_err(&vdev->dev, "failed to initialize virtio pmem vq's\n"); goto out_err; } + vpmem->pmem_wq = alloc_workqueue("vpmem_wq", WQ_MEM_RECLAIM, 0); + if (!vpmem->pmem_wq) { + err = -ENOMEM; + goto out_err; + } + INIT_WORK(&vpmem->flush_work, submit_async_flush); + init_waitqueue_head(&vpmem->sb_wait); virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, start, &vpmem->start); virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, @@ -90,10 +98,12 @@ static int virtio_pmem_probe(struct virtio_device *vdev) goto out_nd; } nd_region->provider_data = dev_to_virtio(nd_region->dev.parent->parent); + vpmem->nd_region = nd_region; return 0; out_nd: nvdimm_bus_unregister(vpmem->nvdimm_bus); out_vq: + destroy_workqueue(vpmem->pmem_wq); vdev->config->del_vqs(vdev); out_err: return err; diff --git a/drivers/nvdimm/virtio_pmem.h b/drivers/nvdimm/virtio_pmem.h index 0dddefe594c4..495dc20e1cdb 100644 --- a/drivers/nvdimm/virtio_pmem.h +++ b/drivers/nvdimm/virtio_pmem.h @@ -35,9 +35,24 @@ struct virtio_pmem { /* Virtio pmem request queue */ struct virtqueue *req_vq; + struct bio *flush_bio; + /* last_flush is when the last completed flush was started */ + ktime_t prev_flush_start, start_flush; + int prev_flush_err; + + /* work queue for deferred flush */ + struct work_struct flush_work; + struct workqueue_struct *pmem_wq; + + /* Synchronize flush wait queue data */ + spinlock_t lock; + /* for waiting for previous flush to complete */ + wait_queue_head_t sb_wait; + /* nvdimm bus registers virtio pmem device */ struct nvdimm_bus *nvdimm_bus; struct nvdimm_bus_descriptor nd_desc; + struct nd_region *nd_region; /* List to store deferred work if virtqueue is full */ struct list_head req_list; @@ -52,4 +67,5 @@ struct virtio_pmem { void virtio_pmem_host_ack(struct virtqueue *vq); int async_pmem_flush(struct nd_region *nd_region, struct bio *bio); +void submit_async_flush(struct work_struct *ws); #endif
Enable asynchronous flush for virtio pmem using work queue. Also, coalesce the flush requests when a flush is already in process. This functionality is copied from md/RAID code. When a flush is already in process, new flush requests wait till previous flush completes in another context (work queue). For all the requests come between ongoing flush and new flush start time, only single flush executes, thus adhers to flush coalscing logic. This is important for maintaining the flush request order with request coalscing. Signed-off-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com> --- drivers/nvdimm/nd_virtio.c | 74 +++++++++++++++++++++++++++--------- drivers/nvdimm/virtio_pmem.c | 10 +++++ drivers/nvdimm/virtio_pmem.h | 16 ++++++++ 3 files changed, 83 insertions(+), 17 deletions(-)