@@ -262,6 +262,7 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents,
const size_t *split_sizes,
struct scatterlist **out, int *out_mapped_nents,
gfp_t gfp_mask);
+unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents);
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
@@ -306,6 +307,16 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
size_t buflen, off_t skip);
+static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
+{
+ return sg_dma_count_chunks(sgt->sgl, sgt->nents);
+}
+
+static inline bool sgt_dma_contiguous(struct sg_table *sgt)
+{
+ return sgt_dma_count_chunks(sgt) == 1;
+}
+
/*
* Maximum number of entries that will be allocated in one piece, if
* a list larger than this is required then chaining will be utilized.
@@ -142,6 +142,35 @@ void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
}
EXPORT_SYMBOL(sg_init_one);
+/**
+ * sg_dma_count_chunks - return number of contiguous DMA chunks in scatterlist
+ * @sgl: SG table
+ * @nents: number of entries in SG table
+ */
+unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
+{
+ dma_addr_t next = ~(dma_addr_t)0;
+ unsigned int count = 0, i;
+ struct scatterlist *s;
+
+ for_each_sg(sgl, s, nents, i) {
+ /*
+ * sg_dma_address(s) is only valid for entries that have
+ * sg_dma_len(s) != 0.
+ */
+ if (!sg_dma_len(s))
+ continue;
+
+ if (sg_dma_address(s) != next) {
+ next = sg_dma_address(s) + sg_dma_len(s);
+ count++;
+ }
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(sg_dma_count_chunks);
+
/*
* The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
* helpers.