@@ -518,6 +518,9 @@ enum {
* use frags only up until ubuf_info is released
*/
SKBFL_MANAGED_FRAG_REFS = BIT(4),
+
+ /* don't move or copy the fragment */
+ SKBFL_FIXED_FRAG = BIT(5),
};
#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
@@ -1674,6 +1677,11 @@ static inline bool skb_zcopy_managed(const struct sk_buff *skb)
return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS;
}
+static inline bool skb_fixed(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->flags & SKBFL_FIXED_FRAG;
+}
+
static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1,
const struct sk_buff *skb2)
{
@@ -3135,7 +3143,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
/* Frags must be orphaned, even if refcounted, if skb might loop to rx path */
static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
{
- if (likely(!skb_zcopy(skb)))
+ if (likely(!skb_zcopy(skb) || skb_fixed(skb)))
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}