@@ -581,14 +581,16 @@ struct mlx5e_mpw_info {
#define MLX5E_MAX_RX_FRAGS 4
struct mlx5e_rq;
+struct mlx5e_xdp_buff;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 cqe_bcnt,
- u32 head_offset, u32 page_idx);
+ u32 head_offset, u32 page_idx,
+ struct mlx5e_xdp_buff *mxbuf);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
- struct mlx5_cqe64 *cqe, u32 cqe_bcnt);
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct mlx5e_xdp_buff *mxbuf);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
@@ -249,7 +249,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u16 cqe_bcnt,
u32 head_offset,
- u32 page_idx)
+ u32 page_idx,
+ struct mlx5e_xdp_buff *mxbuf_)
{
struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units.xsk_buffs[page_idx]);
struct bpf_prog *prog;
@@ -304,7 +305,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
struct mlx5_cqe64 *cqe,
- u32 cqe_bcnt)
+ u32 cqe_bcnt,
+ struct mlx5e_xdp_buff *mxbuf_)
{
struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(*wi->xskp);
struct bpf_prog *prog;
@@ -16,10 +16,12 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u16 cqe_bcnt,
u32 head_offset,
- u32 page_idx);
+ u32 page_idx,
+ struct mlx5e_xdp_buff *mxbuf_);
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
struct mlx5_cqe64 *cqe,
- u32 cqe_bcnt);
+ u32 cqe_bcnt,
+ struct mlx5e_xdp_buff *mxbuf_);
#endif /* __MLX5_EN_XSK_RX_H__ */
@@ -63,11 +63,11 @@
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
- u32 page_idx);
+ u32 page_idx, struct mlx5e_xdp_buff *mxbuf);
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
- u32 page_idx);
+ u32 page_idx, struct mlx5e_xdp_buff *mxbuf);
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -1662,7 +1662,8 @@ static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
- struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt,
+ struct mlx5e_xdp_buff *mxbuf)
{
struct mlx5e_frag_page *frag_page = wi->frag_page;
u16 rx_headroom = rq->buff.headroom;
@@ -1684,17 +1685,15 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct mlx5e_xdp_buff mxbuf;
-
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- cqe_bcnt, &mxbuf);
- if (mlx5e_xdp_handle(rq, prog, &mxbuf))
+ cqe_bcnt, mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, mxbuf))
return NULL; /* page/packet was consumed by XDP */
- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -1710,14 +1709,14 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
- struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt,
+ struct mlx5e_xdp_buff *mxbuf)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
u16 rx_headroom = rq->buff.headroom;
struct mlx5e_frag_page *frag_page;
struct skb_shared_info *sinfo;
- struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
@@ -1737,8 +1736,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
net_prefetch(va + rx_headroom);
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- frag_consumed_bytes, &mxbuf);
- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+ frag_consumed_bytes, mxbuf);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
@@ -1750,7 +1749,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page,
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp, frag_page,
wi->offset, frag_consumed_bytes);
truesize += frag_info->frag_stride;
@@ -1760,7 +1759,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_wqe_frag_info *pwi;
@@ -1770,21 +1769,21 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
- mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
- mxbuf.xdp.data_end - mxbuf.xdp.data,
- mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ skb = mlx5e_build_linear_skb(rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
+ mxbuf->xdp.data_end - mxbuf->xdp.data,
+ mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb))
return NULL;
skb_mark_for_recycle(skb);
head_wi->frag_page->frags++;
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, wi - head_wi - 1,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_buff_is_frag_pfmemalloc(&mxbuf->xdp));
for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
pwi->frag_page->frags++;
@@ -1815,6 +1814,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
+ struct mlx5e_xdp_buff mxbuf;
struct sk_buff *skb;
u32 cqe_bcnt;
u16 ci;
@@ -1832,7 +1832,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
mlx5e_xsk_skb_from_cqe_linear,
- rq, wi, cqe, cqe_bcnt);
+ rq, wi, cqe, cqe_bcnt, &mxbuf);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
@@ -1863,6 +1863,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
+ struct mlx5e_xdp_buff mxbuf;
struct sk_buff *skb;
u32 cqe_bcnt;
u16 ci;
@@ -1879,7 +1880,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, wi, cqe, cqe_bcnt);
+ rq, wi, cqe, cqe_bcnt, &mxbuf);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
@@ -1907,6 +1908,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
+ struct mlx5e_xdp_buff mxbuf;
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5_wq_ll *wq;
struct sk_buff *skb;
@@ -1932,7 +1934,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
mlx5e_skb_from_cqe_mpwrq_linear,
mlx5e_skb_from_cqe_mpwrq_nonlinear,
- rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
+ rq, wi, cqe, cqe_bcnt, head_offset, page_idx, &mxbuf);
if (!skb)
goto mpwrq_cqe_out;
@@ -1979,7 +1981,7 @@ mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
- u32 page_idx)
+ u32 page_idx, struct mlx5e_xdp_buff *mxbuf)
{
struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
@@ -1987,7 +1989,6 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
u32 frag_offset = head_offset;
u32 byte_cnt = cqe_bcnt;
struct skb_shared_info *sinfo;
- struct mlx5e_xdp_buff mxbuf;
unsigned int truesize = 0;
struct bpf_prog *prog;
struct sk_buff *skb;
@@ -2033,9 +2034,9 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
}
- mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
+ mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, mxbuf);
- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
while (byte_cnt) {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
@@ -2046,7 +2047,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
else
truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp, frag_page, frag_offset,
pg_consumed_bytes);
byte_cnt -= pg_consumed_bytes;
frag_offset = 0;
@@ -2054,7 +2055,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
if (prog) {
- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_frag_page *pfp;
@@ -2067,10 +2068,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
+ skb = mlx5e_build_linear_skb(rq, mxbuf->xdp.data_hard_start,
linear_frame_sz,
- mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
- mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
+ mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq, &wi->linear_page);
return NULL;
@@ -2080,13 +2081,13 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
wi->linear_page.frags++;
mlx5e_page_release_fragmented(rq, &wi->linear_page);
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, frag_page - head_page,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_buff_is_frag_pfmemalloc(&mxbuf->xdp));
pagep = head_page;
do
@@ -2097,12 +2098,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
} else {
dma_addr_t addr;
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
xdp_update_skb_shared_info(skb, sinfo->nr_frags,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_buff_is_frag_pfmemalloc(&mxbuf->xdp));
pagep = frag_page - sinfo->nr_frags;
do
@@ -2124,7 +2125,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
- u32 page_idx)
+ u32 page_idx, struct mlx5e_xdp_buff *mxbuf)
{
struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
u16 rx_headroom = rq->buff.headroom;
@@ -2152,20 +2153,19 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- cqe_bcnt, &mxbuf);
- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ cqe_bcnt, mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
frag_page->frags++;
return NULL; /* page/packet was consumed by XDP */
}
- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -2288,12 +2288,14 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
bool flush = cqe->shampo.flush;
bool match = cqe->shampo.match;
struct mlx5e_rq_stats *stats = rq->stats;
+ struct mlx5e_xdp_buff mxbuf;
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5e_mpw_info *wi;
struct mlx5_wq_ll *wq;
wi = mlx5e_get_mpw_info(rq, wqe_id);
wi->consumed_strides += cstrides;
+ mxbuf.xdp.flags = 0;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
mlx5e_handle_rx_err_cqe(rq, cqe);
@@ -2316,7 +2318,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
else
*skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
- data_offset, page_idx);
+ data_offset, page_idx, &mxbuf);
if (unlikely(!*skb))
goto free_hd_entry;
@@ -2377,6 +2379,7 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
+ struct mlx5e_xdp_buff mxbuf;
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5_wq_ll *wq;
struct sk_buff *skb;
@@ -2404,7 +2407,7 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
mlx5e_skb_from_cqe_mpwrq_nonlinear,
mlx5e_xsk_skb_from_cqe_mpwrq_linear,
rq, wi, cqe, cqe_bcnt, head_offset,
- page_idx);
+ page_idx, &mxbuf);
if (!skb)
goto mpwrq_cqe_out;
@@ -2632,6 +2635,7 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
+ struct mlx5e_xdp_buff mxbuf;
struct sk_buff *skb;
u32 cqe_bcnt;
u16 ci;
@@ -2648,7 +2652,7 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, wi, cqe, cqe_bcnt);
+ rq, wi, cqe, cqe_bcnt, &mxbuf);
if (!skb)
goto wq_cyc_pop;
@@ -2722,6 +2726,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
+ struct mlx5e_xdp_buff mxbuf;
struct sk_buff *skb;
u32 cqe_bcnt;
u16 trap_id;
@@ -2737,7 +2742,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
goto wq_cyc_pop;
}
- skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
+ skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt, &mxbuf);
if (!skb)
goto wq_cyc_pop;