@@ -183,6 +183,15 @@ enum {
SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
};
+/* The callback notifies userspace to release buffers when skb DMA is done in
+ * lower device, the desc is used to track userspace buffer index.
+ */
+struct skb_ubuf_info {
+ /* support buffers allocation from userspace */
+ void (*callback)(struct sk_buff *);
+ size_t desc;
+};
+
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
@@ -205,6 +214,10 @@ struct skb_shared_info {
/* Intermediate layers must ensure that destructor_arg
* remains valid until skb destructor */
void * destructor_arg;
+
+ /* DMA mapping from userspace buffers */
+ struct skb_ubuf_info ubuf;
+
/* must be last field, see pskb_expand_head() */
skb_frag_t frags[MAX_SKB_FRAGS];
};
@@ -210,6 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
+ shinfo->ubuf.callback = NULL;
if (fclone) {
struct sk_buff *child = skb + 1;
@@ -329,6 +330,15 @@ static void skb_release_data(struct sk_buff *skb)
if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
+
+ /*
+ * if skb buf is from userspace, we need to notify the caller
+ * the lower device DMA has done;
+ */
+ if (skb_shinfo(skb)->ubuf.callback) {
+ skb_shinfo(skb)->ubuf.callback(skb);
+ skb_shinfo(skb)->ubuf.callback = NULL;
+ }
kfree(skb->head);
}
@@ -492,6 +502,7 @@ bool skb_recycle_check(struct sk_buff *skb, int skb_size)
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
+ shinfo->ubuf.callback = NULL;
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->data = skb->head + NET_SKB_PAD;