@@ -266,6 +266,8 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned long offset, unsigned long size,
gfp_t gfp_mask);
+void sg_trim_table(struct sg_table *);
+
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);
@@ -243,6 +243,59 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
EXPORT_SYMBOL(__sg_free_table);
/**
+ * sg_trim_table - Free unused sg table entries at the end of the table
+ * @table: The sg table header to use
+ *
+ * Description:
+ * In cases where it is hard to know to minimum numbers of ents table will
+ * need to hold at sg_alloc_time, and we end up with a table with unused sg
+ * entries at its end, this function will trim (free) the unused sg entry
+ * blocks and adjust the table->orig_nents down.
+ *
+ **/
+void sg_trim_table(struct sg_table *table)
+{
+ struct scatterlist *sgl, *next, *prev = NULL;
+ unsigned int seen_nents = 0;
+ const unsigned int max_ents = SG_MAX_SINGLE_ALLOC;
+
+ sgl = table->sgl;
+ while (sgl) {
+ unsigned int alloc_size = table->orig_nents;
+ unsigned int sg_size;
+
+ /*
+ * If we have more than max_ents segments left,
+ * then assign 'next' to the sg table after the current one.
+ * sg_size is then one less than alloc size, since the last
+ * element is the chain pointer.
+ */
+ if (alloc_size > max_ents) {
+ next = sg_chain_ptr(&sgl[max_ents - 1]);
+ alloc_size = max_ents;
+ sg_size = alloc_size - 1;
+ } else {
+ sg_size = alloc_size;
+ next = NULL;
+ }
+
+ if (seen_nents >= table->nents) {
+ if (prev)
+ sg_mark_end(prev);
+ prev = NULL;
+ table->orig_nents -= sg_size;
+ sg_kfree(sgl, alloc_size);
+ } else {
+ prev = sgl;
+ }
+
+ seen_nents += sg_size;
+ sgl = next;
+ }
+}
+EXPORT_SYMBOL(sg_trim_table);
+
+/**
* sg_free_table - Free a previously allocated sg table
* @table: The mapped sg table header
*