Message ID | 20131114200859.22521.65770.stgit@viggo.jf.intel.com (mailing list archive) |
---|---|
State | Accepted |
Commit | 6f57fd0578dff23a4bd16118f0cb4201bcec91f1 |
Headers | show |
On Thu, Nov 14, 2013 at 12:08:59PM -0800, Dan Williams wrote: > From: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> > > Use the generic unmap object to unmap dma buffers. > > Cc: Vinod Koul <vinod.koul@intel.com> > Cc: Tomasz Figa <t.figa@samsung.com> > Cc: Dave Jiang <dave.jiang@intel.com> > Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> > Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> > Acked-by: Jon Mason <jon.mason@intel.com> > [djbw: fix up unmap len, and GFP flags] > Signed-off-by: Dan Williams <dan.j.williams@intel.com> > --- > drivers/ntb/ntb_transport.c | 85 +++++++++++++++++++++++++++++-------------- > 1 files changed, 58 insertions(+), 27 deletions(-) > > diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c > index 12a9e83c008b..222c2baa3a4b 100644 > --- a/drivers/ntb/ntb_transport.c > +++ b/drivers/ntb/ntb_transport.c > @@ -1034,7 +1034,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, > struct dma_chan *chan = qp->dma_chan; > struct dma_device *device; > size_t pay_off, buff_off; > - dma_addr_t src, dest; > + struct dmaengine_unmap_data *unmap; > dma_cookie_t cookie; > void *buf = entry->buf; > unsigned long flags; > @@ -1045,35 +1045,50 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, > goto err; > > if (len < copy_bytes) > - goto err1; > + goto err_wait; Nit, "err_wait" isn't accurate. I think "err_memcpy" or the link would be more accurate, since I don't see any waiting. Aside from that (and my code beauty issues from the previous version), it works for me. Thanks, Jon > > device = chan->device; > pay_off = (size_t) offset & ~PAGE_MASK; > buff_off = (size_t) buf & ~PAGE_MASK; > > if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) > - goto err1; > + goto err_wait; > > - dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE); > - if (dma_mapping_error(device->dev, dest)) > - goto err1; > + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); > + if (!unmap) > + goto err_wait; > > - src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE); > - if (dma_mapping_error(device->dev, src)) > - goto err2; > + unmap->len = len; > + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), > + pay_off, len, DMA_TO_DEVICE); > + if (dma_mapping_error(device->dev, unmap->addr[0])) > + goto err_get_unmap; > + > + unmap->to_cnt = 1; > > - flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE | > + unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), > + buff_off, len, DMA_FROM_DEVICE); > + if (dma_mapping_error(device->dev, unmap->addr[1])) > + goto err_get_unmap; > + > + unmap->from_cnt = 1; > + > + flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | > DMA_PREP_INTERRUPT; > - txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); > + txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], > + unmap->addr[0], len, flags); > if (!txd) > - goto err3; > + goto err_get_unmap; > > txd->callback = ntb_rx_copy_callback; > txd->callback_param = entry; > + dma_set_unmap(txd, unmap); > > cookie = dmaengine_submit(txd); > if (dma_submit_error(cookie)) > - goto err3; > + goto err_set_unmap; > + > + dmaengine_unmap_put(unmap); > > qp->last_cookie = cookie; > > @@ -1081,11 +1096,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, > > return; > > -err3: > - dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); > -err2: > - dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE); > -err1: > +err_set_unmap: > + dmaengine_unmap_put(unmap); > +err_get_unmap: > + dmaengine_unmap_put(unmap); > +err_wait: > /* If the callbacks come out of order, the writing of the index to the > * last completed will be out of order. This may result in the > * receive stalling forever. > @@ -1245,7 +1260,8 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, > struct dma_chan *chan = qp->dma_chan; > struct dma_device *device; > size_t dest_off, buff_off; > - dma_addr_t src, dest; > + struct dmaengine_unmap_data *unmap; > + dma_addr_t dest; > dma_cookie_t cookie; > void __iomem *offset; > size_t len = entry->len; > @@ -1273,28 +1289,43 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, > if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) > goto err; > > - src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE); > - if (dma_mapping_error(device->dev, src)) > + unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); > + if (!unmap) > goto err; > > - flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT; > - txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); > + unmap->len = len; > + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), > + buff_off, len, DMA_TO_DEVICE); > + if (dma_mapping_error(device->dev, unmap->addr[0])) > + goto err_get_unmap; > + > + unmap->to_cnt = 1; > + > + flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | > + DMA_PREP_INTERRUPT; > + txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, > + flags); > if (!txd) > - goto err1; > + goto err_get_unmap; > > txd->callback = ntb_tx_copy_callback; > txd->callback_param = entry; > + dma_set_unmap(txd, unmap); > > cookie = dmaengine_submit(txd); > if (dma_submit_error(cookie)) > - goto err1; > + goto err_set_unmap; > + > + dmaengine_unmap_put(unmap); > > dma_async_issue_pending(chan); > qp->tx_async++; > > return; > -err1: > - dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); > +err_set_unmap: > + dmaengine_unmap_put(unmap); > +err_get_unmap: > + dmaengine_unmap_put(unmap); > err: > ntb_memcpy_tx(entry, offset); > qp->tx_memcpy++; > -- To unsubscribe from this list: send the line "unsubscribe dmaengine" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Thu, Nov 14, 2013 at 12:51 PM, Jon Mason <jon.mason@intel.com> wrote: > On Thu, Nov 14, 2013 at 12:08:59PM -0800, Dan Williams wrote: >> From: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> >> >> Use the generic unmap object to unmap dma buffers. >> >> Cc: Vinod Koul <vinod.koul@intel.com> >> Cc: Tomasz Figa <t.figa@samsung.com> >> Cc: Dave Jiang <dave.jiang@intel.com> >> Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> >> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> >> Acked-by: Jon Mason <jon.mason@intel.com> >> [djbw: fix up unmap len, and GFP flags] >> Signed-off-by: Dan Williams <dan.j.williams@intel.com> >> --- >> drivers/ntb/ntb_transport.c | 85 +++++++++++++++++++++++++++++-------------- >> 1 files changed, 58 insertions(+), 27 deletions(-) >> >> diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c >> index 12a9e83c008b..222c2baa3a4b 100644 >> --- a/drivers/ntb/ntb_transport.c >> +++ b/drivers/ntb/ntb_transport.c >> @@ -1034,7 +1034,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, >> struct dma_chan *chan = qp->dma_chan; >> struct dma_device *device; >> size_t pay_off, buff_off; >> - dma_addr_t src, dest; >> + struct dmaengine_unmap_data *unmap; >> dma_cookie_t cookie; >> void *buf = entry->buf; >> unsigned long flags; >> @@ -1045,35 +1045,50 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, >> goto err; >> >> if (len < copy_bytes) >> - goto err1; >> + goto err_wait; > > Nit, "err_wait" isn't accurate. I think "err_memcpy" or the link > would be more accurate, since I don't see any waiting. Hm, refers to the dma_sync_wait() to barrier in flight async copies before the ntb_memcpy_rx(). The reason I decided to change from errN to err_<undo_action> is: err3: dmaengine_unmap_put(unmap); err2: dmaengine_unmap_put(unmap); err1: looks odd at first glance... "where did the second ref come from?". It's unfortunate that we even need to worry about errors at dmaengine_submit() time as everything should have been pre-cleared by ->device_prep_dma_memcpy(). However, we need some dmaengine subsystem refactoring to make that a guarantee. -- To unsubscribe from this list: send the line "unsubscribe dmaengine" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Thu, Nov 14, 2013 at 01:06:59PM -0800, Dan Williams wrote: > On Thu, Nov 14, 2013 at 12:51 PM, Jon Mason <jon.mason@intel.com> wrote: > > On Thu, Nov 14, 2013 at 12:08:59PM -0800, Dan Williams wrote: > >> From: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> > >> > >> Use the generic unmap object to unmap dma buffers. > >> > >> Cc: Vinod Koul <vinod.koul@intel.com> > >> Cc: Tomasz Figa <t.figa@samsung.com> > >> Cc: Dave Jiang <dave.jiang@intel.com> > >> Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> > >> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> > >> Acked-by: Jon Mason <jon.mason@intel.com> > >> [djbw: fix up unmap len, and GFP flags] > >> Signed-off-by: Dan Williams <dan.j.williams@intel.com> > >> --- > >> drivers/ntb/ntb_transport.c | 85 +++++++++++++++++++++++++++++-------------- > >> 1 files changed, 58 insertions(+), 27 deletions(-) > >> > >> diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c > >> index 12a9e83c008b..222c2baa3a4b 100644 > >> --- a/drivers/ntb/ntb_transport.c > >> +++ b/drivers/ntb/ntb_transport.c > >> @@ -1034,7 +1034,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, > >> struct dma_chan *chan = qp->dma_chan; > >> struct dma_device *device; > >> size_t pay_off, buff_off; > >> - dma_addr_t src, dest; > >> + struct dmaengine_unmap_data *unmap; > >> dma_cookie_t cookie; > >> void *buf = entry->buf; > >> unsigned long flags; > >> @@ -1045,35 +1045,50 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, > >> goto err; > >> > >> if (len < copy_bytes) > >> - goto err1; > >> + goto err_wait; > > > > Nit, "err_wait" isn't accurate. I think "err_memcpy" or the link > > would be more accurate, since I don't see any waiting. > > Hm, refers to the dma_sync_wait() to barrier in flight async copies > before the ntb_memcpy_rx(). Oops, you are correct. Disregard my previous comment. Looks good to me now. > > The reason I decided to change from errN to err_<undo_action> is: > > err3: > dmaengine_unmap_put(unmap); > err2: > dmaengine_unmap_put(unmap); > err1: > > looks odd at first glance... "where did the second ref come from?". > > It's unfortunate that we even need to worry about errors at > dmaengine_submit() time as everything should have been pre-cleared by > ->device_prep_dma_memcpy(). However, we need some dmaengine subsystem > refactoring to make that a guarantee. -- To unsubscribe from this list: send the line "unsubscribe dmaengine" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 12a9e83c008b..222c2baa3a4b 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1034,7 +1034,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, struct dma_chan *chan = qp->dma_chan; struct dma_device *device; size_t pay_off, buff_off; - dma_addr_t src, dest; + struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; void *buf = entry->buf; unsigned long flags; @@ -1045,35 +1045,50 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, goto err; if (len < copy_bytes) - goto err1; + goto err_wait; device = chan->device; pay_off = (size_t) offset & ~PAGE_MASK; buff_off = (size_t) buf & ~PAGE_MASK; if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) - goto err1; + goto err_wait; - dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE); - if (dma_mapping_error(device->dev, dest)) - goto err1; + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); + if (!unmap) + goto err_wait; - src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE); - if (dma_mapping_error(device->dev, src)) - goto err2; + unmap->len = len; + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), + pay_off, len, DMA_TO_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[0])) + goto err_get_unmap; + + unmap->to_cnt = 1; - flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE | + unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), + buff_off, len, DMA_FROM_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[1])) + goto err_get_unmap; + + unmap->from_cnt = 1; + + flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; - txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); + txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], + unmap->addr[0], len, flags); if (!txd) - goto err3; + goto err_get_unmap; txd->callback = ntb_rx_copy_callback; txd->callback_param = entry; + dma_set_unmap(txd, unmap); cookie = dmaengine_submit(txd); if (dma_submit_error(cookie)) - goto err3; + goto err_set_unmap; + + dmaengine_unmap_put(unmap); qp->last_cookie = cookie; @@ -1081,11 +1096,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, return; -err3: - dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); -err2: - dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE); -err1: +err_set_unmap: + dmaengine_unmap_put(unmap); +err_get_unmap: + dmaengine_unmap_put(unmap); +err_wait: /* If the callbacks come out of order, the writing of the index to the * last completed will be out of order. This may result in the * receive stalling forever. @@ -1245,7 +1260,8 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, struct dma_chan *chan = qp->dma_chan; struct dma_device *device; size_t dest_off, buff_off; - dma_addr_t src, dest; + struct dmaengine_unmap_data *unmap; + dma_addr_t dest; dma_cookie_t cookie; void __iomem *offset; size_t len = entry->len; @@ -1273,28 +1289,43 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) goto err; - src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE); - if (dma_mapping_error(device->dev, src)) + unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); + if (!unmap) goto err; - flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT; - txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); + unmap->len = len; + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), + buff_off, len, DMA_TO_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[0])) + goto err_get_unmap; + + unmap->to_cnt = 1; + + flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | + DMA_PREP_INTERRUPT; + txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, + flags); if (!txd) - goto err1; + goto err_get_unmap; txd->callback = ntb_tx_copy_callback; txd->callback_param = entry; + dma_set_unmap(txd, unmap); cookie = dmaengine_submit(txd); if (dma_submit_error(cookie)) - goto err1; + goto err_set_unmap; + + dmaengine_unmap_put(unmap); dma_async_issue_pending(chan); qp->tx_async++; return; -err1: - dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); +err_set_unmap: + dmaengine_unmap_put(unmap); +err_get_unmap: + dmaengine_unmap_put(unmap); err: ntb_memcpy_tx(entry, offset); qp->tx_memcpy++;