@@ -319,19 +319,24 @@ struct udp_dev_scratch {
bool csum_unnecessary;
};
+static inline struct udp_dev_scratch* udp_skb_scratch(struct sk_buff *skb)
+{
+ return (struct udp_dev_scratch *)&skb->dev_scratch;
+}
+
static inline unsigned int udp_skb_len(struct sk_buff *skb)
{
- return ((struct udp_dev_scratch *)&skb->dev_scratch)->len;
+ return udp_skb_scratch(skb)->len;
}
static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
{
- return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary;
+ return udp_skb_scratch(skb)->csum_unnecessary;
}
static inline bool udp_skb_is_linear(struct sk_buff *skb)
{
- return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear;
+ return udp_skb_scratch(skb)->is_linear;
}
#else
@@ -1163,32 +1163,47 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
return ret;
}
+#define UDP_SKB_IS_STATELESS 0x80000000
+
#if BITS_PER_LONG == 64
static void udp_set_dev_scratch(struct sk_buff *skb)
{
- struct udp_dev_scratch *scratch;
+ struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
- scratch = (struct udp_dev_scratch *)&skb->dev_scratch;
scratch->truesize = skb->truesize;
scratch->len = skb->len;
scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
scratch->is_linear = !skb_is_nonlinear(skb);
+ if (likely(!skb->_skb_refdst))
+ scratch->truesize |= UDP_SKB_IS_STATELESS;
}
static int udp_skb_truesize(struct sk_buff *skb)
{
- return ((struct udp_dev_scratch *)&skb->dev_scratch)->truesize;
+ return udp_skb_scratch(skb)->truesize & ~UDP_SKB_IS_STATELESS;
+}
+
+static bool udp_skb_has_head_state(struct sk_buff *skb)
+{
+ return !(udp_skb_scratch(skb)->truesize & UDP_SKB_IS_STATELESS);
}
#else
static void udp_set_dev_scratch(struct sk_buff *skb)
{
skb->dev_scratch = skb->truesize;
+ if (likely(!skb->_skb_refdst))
+ scratch->dev_scratch |= UDP_SKB_IS_STATELESS;
}
static int udp_skb_truesize(struct sk_buff *skb)
{
- return skb->dev_scratch;
+ return skb->dev_scratch & ~UDP_SKB_IS_STATELESS;
+}
+
+static bool udp_skb_has_head_state(struct sk_buff *skb)
+{
+ return !(skb->dev_scratch & UDP_SKB_IS_STATELESS);
}
#endif
@@ -1388,10 +1403,10 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
unlock_sock_fast(sk, slow);
}
- /* we cleared the head states previously only if the skb lacks any IP
- * options, see __udp_queue_rcv_skb().
+ /* In the more common cases we cleared the head states previously,
+ * see __udp_queue_rcv_skb().
*/
- if (unlikely(IPCB(skb)->opt.optlen > 0))
+ if (unlikely(udp_skb_has_head_state(skb)))
skb_release_head_state(skb);
consume_stateless_skb(skb);
}
@@ -1784,11 +1799,12 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk_mark_napi_id_once(sk, skb);
}
- /* At recvmsg() time we need skb->dst to process IP options-related
- * cmsg, elsewhere can we clear all pending head states while they are
- * hot in the cache
+ /* At recvmsg() time we may access skb->dst or skb->sp depending on
+ * the IP options and the cmsg flags, elsewhere can we clear all
+ * pending head states while they are hot in the cache
*/
- if (likely(IPCB(skb)->opt.optlen == 0))
+ if (likely(IPCB(skb)->opt.optlen == 0 &&
+ !(inet_sk(sk)->cmsg_flags & IP_CMSG_PASSSEC)))
skb_release_head_state(skb);
rc = __udp_enqueue_schedule_skb(sk, skb);