@@ -266,7 +266,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
if (rq->cmd_flags & REQ_WRITE)
memset(q->dma_drain_buffer, 0, q->dma_drain_size);
- sg->page_link &= ~0x02;
+ sg_unmark_end(sg);
sg = sg_next(sg);
sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
q->dma_drain_size,
@@ -582,7 +582,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
* the dmaengine may try to DMA the incorrect amount of data.
*/
sg_init_table(&ctx->sgl, 1);
- ctx->sgl.page_link = ctx->sg->page_link;
+ sg_assign_page(&ctx->sgl, sg_page(ctx->sg));
ctx->sgl.offset = ctx->sg->offset;
sg_dma_len(&ctx->sgl) = len32;
sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
@@ -886,18 +886,14 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
sg_init_table(imxdmac->sg_list, periods);
for (i = 0; i < periods; i++) {
- imxdmac->sg_list[i].page_link = 0;
- imxdmac->sg_list[i].offset = 0;
+ sg_set_page(&imxdmac->sg_list[i], NULL, period_len, 0);
imxdmac->sg_list[i].dma_address = dma_addr;
sg_dma_len(&imxdmac->sg_list[i]) = period_len;
dma_addr += period_len;
}
/* close the loop */
- imxdmac->sg_list[periods].offset = 0;
- sg_dma_len(&imxdmac->sg_list[periods]) = 0;
- imxdmac->sg_list[periods].page_link =
- ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
+ sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list);
desc->type = IMXDMA_DESC_CYCLIC;
desc->sg = imxdmac->sg_list;
@@ -2562,10 +2562,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
dma_addr += period_len;
}
- sg[periods].offset = 0;
- sg_dma_len(&sg[periods]) = 0;
- sg[periods].page_link =
- ((unsigned long)sg | 0x01) & ~0x02;
+ sg_chain(sg, periods + 1, sg);
txd = d40_prep_sg(chan, sg, sg, periods, direction,
DMA_PREP_INTERRUPT);
@@ -469,7 +469,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
sg_set_buf(__sg, buf + offset, len);
offset += len;
remain -= len;
- (__sg++)->page_link &= ~0x02;
+ sg_unmark_end(__sg++);
sg_len++;
} while (remain);
}
@@ -477,7 +477,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
list_for_each_entry(req, &packed->list, queuelist) {
sg_len += blk_rq_map_sg(mq->queue, req, __sg);
__sg = sg + (sg_len - 1);
- (__sg++)->page_link &= ~0x02;
+ sg_unmark_end(__sg++);
}
sg_mark_end(sg + (sg_len - 1));
return sg_len;
@@ -25,13 +25,8 @@
#include <linux/scatterlist.h>
#include <linux/sched.h>
-static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
- struct scatterlist *sg2)
-{
- sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
- sg1[num - 1].page_link &= ~0x02;
- sg1[num - 1].page_link |= 0x01;
-}
+#define scatterwalk_sg_chain(prv, num, sgl) sg_chain(prv, num, sgl)
+#define scatterwalk_sg_next(sgl) sg_next(sgl)
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
struct scatterlist *sg,
Replace open coded sg_chain() and sg_unmark_end() instances with the aforementioned helpers. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- block/blk-merge.c | 2 +- drivers/crypto/omap-sham.c | 2 +- drivers/dma/imx-dma.c | 8 ++------ drivers/dma/ste_dma40.c | 5 +---- drivers/mmc/card/queue.c | 4 ++-- include/crypto/scatterwalk.h | 9 ++------- 6 files changed, 9 insertions(+), 21 deletions(-)