@@ -87,6 +87,7 @@ struct virtnet_stat_desc {
struct virtnet_sq_free_stats {
u64 packets;
u64 bytes;
+ u64 xsk;
};
struct virtnet_sq_stats {
@@ -530,6 +531,7 @@ struct virtio_net_common_hdr {
};
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+static void virtnet_xsk_completed(struct send_queue *sq, int num);
enum virtnet_xmit_type {
VIRTNET_XMIT_TYPE_SKB,
@@ -733,6 +735,11 @@ static int virtnet_sq_set_premapped(struct send_queue *sq, bool premapped)
return 0;
}
+static u32 virtnet_ptr_to_xsk(void *ptr)
+{
+ return ((unsigned long)ptr) >> VIRTIO_XSK_FLAG_OFFSET;
+}
+
static void __free_old_xmit(struct send_queue *sq, bool in_napi,
struct virtnet_sq_free_stats *stats)
{
@@ -773,12 +780,22 @@ static void __free_old_xmit(struct send_queue *sq, bool in_napi,
goto retry;
case VIRTNET_XMIT_TYPE_XSK:
- /* Make gcc happy. DONE in subsequent commit */
+ stats->bytes += virtnet_ptr_to_xsk(ptr);
+ stats->xsk++;
break;
}
}
}
+static void virtnet_free_old_xmit(struct send_queue *sq, bool in_napi,
+ struct virtnet_sq_free_stats *stats)
+{
+ __free_old_xmit(sq, in_napi, stats);
+
+ if (stats->xsk)
+ virtnet_xsk_completed(sq, stats->xsk);
+}
+
/* Converting between virtqueue no. and kernel tx/rx queue no.
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
*/
@@ -1207,7 +1224,7 @@ static void free_old_xmit(struct send_queue *sq, bool in_napi)
{
struct virtnet_sq_free_stats stats = {0};
- __free_old_xmit(sq, in_napi, &stats);
+ virtnet_free_old_xmit(sq, in_napi, &stats);
/* Avoid overhead when no packets have been processed
* happens when called speculatively from start_xmit.
@@ -1348,8 +1365,12 @@ static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
u64 kicks = 0;
int sent;
+ /* Avoid to wakeup napi meanless, so call __free_old_xmit. */
__free_old_xmit(sq, true, &stats);
+ if (stats.xsk)
+ xsk_tx_completed(sq->xsk.pool, stats.xsk);
+
sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks);
if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
@@ -1368,6 +1389,16 @@ static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
return sent == budget;
}
+static void xsk_wakeup(struct send_queue *sq)
+{
+ if (napi_if_scheduled_mark_missed(&sq->napi))
+ return;
+
+ local_bh_disable();
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+ local_bh_enable();
+}
+
static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -1381,14 +1412,19 @@ static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
sq = &vi->sq[qid];
- if (napi_if_scheduled_mark_missed(&sq->napi))
- return 0;
+ xsk_wakeup(sq);
+ return 0;
+}
- local_bh_disable();
- virtqueue_napi_schedule(&sq->napi, sq->vq);
- local_bh_enable();
+static void virtnet_xsk_completed(struct send_queue *sq, int num)
+{
+ xsk_tx_completed(sq->xsk.pool, num);
- return 0;
+ /* If this is called by rx poll, start_xmit and xdp xmit we should
+ * wakeup the tx napi to consume the xsk tx queue, because the tx
+ * interrupt may not be triggered.
+ */
+ xsk_wakeup(sq);
}
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
@@ -1504,7 +1540,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
/* Free up any pending old buffers before queueing new ones. */
- __free_old_xmit(sq, false, &stats);
+ virtnet_free_old_xmit(sq, false, &stats);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
@@ -5854,7 +5890,7 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
goto retry;
case VIRTNET_XMIT_TYPE_XSK:
- /* Make gcc happy. DONE in subsequent commit */
+ xsk_tx_completed(sq->xsk.pool, 1);
break;
}
}
virtnet_free_old_xmit distinguishes three type ptr(skb, xdp frame, xsk buffer) by the last bits of the pointer. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> --- drivers/net/virtio_net.c | 56 +++++++++++++++++++++++++++++++++------- 1 file changed, 46 insertions(+), 10 deletions(-)