Message ID | d6ed575afaf89fc35e233af5ccd063da944b4a3a.1601648734.git.lorenzo@kernel.org (mailing list archive) |
---|---|
State | Changes Requested |
Headers | show |
Series | mvneta: introduce XDP multi-buffer support | expand |
Lorenzo Bianconi <lorenzo@kernel.org> writes: > Introduce the capability to allocate a xdp multi-buff in > bpf_prog_test_run_xdp routine. This is a preliminary patch to > introduce > the selftests for new xdp multi-buff ebpf helpers > > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> > --- > net/bpf/test_run.c | 51 > ++++++++++++++++++++++++++++++++++++++-------- > 1 file changed, 43 insertions(+), 8 deletions(-) > > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c > index bd291f5f539c..ec7286cd051b 100644 > --- a/net/bpf/test_run.c > +++ b/net/bpf/test_run.c > @@ -617,44 +617,79 @@ int bpf_prog_test_run_xdp(struct bpf_prog > *prog, const union bpf_attr *kattr, > { > u32 tailroom = SKB_DATA_ALIGN(sizeof(struct > skb_shared_info)); > u32 headroom = XDP_PACKET_HEADROOM; > - u32 size = kattr->test.data_size_in; > u32 repeat = kattr->test.repeat; > struct netdev_rx_queue *rxqueue; > + struct skb_shared_info *sinfo; > struct xdp_buff xdp = {}; > + u32 max_data_sz, size; > u32 retval, duration; > - u32 max_data_sz; > + int i, ret, data_len; > void *data; > - int ret; > > if (kattr->test.ctx_in || kattr->test.ctx_out) > return -EINVAL; > > - /* XDP have extra tailroom as (most) drivers use full page > */ > max_data_sz = 4096 - headroom - tailroom; For the sake of consistency, can this 4096 be changed to PAGE_SIZE ? Same as in data_len = min_t(int, kattr->test.data_size_in - size, PAGE_SIZE); expression below > + size = min_t(u32, kattr->test.data_size_in, max_data_sz); > + data_len = size; > > - data = bpf_test_init(kattr, kattr->test.data_size_in, > - max_data_sz, headroom, tailroom); > + data = bpf_test_init(kattr, size, max_data_sz, headroom, > tailroom); > if (IS_ERR(data)) > return PTR_ERR(data); > > xdp.data_hard_start = data; > xdp.data = data + headroom; > xdp.data_meta = xdp.data; > - xdp.data_end = xdp.data + size; > + xdp.data_end = xdp.data + data_len; > xdp.frame_sz = headroom + max_data_sz + tailroom; > > + sinfo = xdp_get_shared_info_from_buff(&xdp); > + if (unlikely(kattr->test.data_size_in > size)) { > + void __user *data_in = > u64_to_user_ptr(kattr->test.data_in); > + > + while (size < kattr->test.data_size_in) { > + skb_frag_t *frag = > &sinfo->frags[sinfo->nr_frags]; > + struct page *page; > + int data_len; > + > + page = alloc_page(GFP_KERNEL); > + if (!page) { > + ret = -ENOMEM; > + goto out; > + } > + > + __skb_frag_set_page(frag, page); > + data_len = min_t(int, > kattr->test.data_size_in - size, > + PAGE_SIZE); > + skb_frag_size_set(frag, data_len); > + if (copy_from_user(page_address(page), > data_in + size, > + data_len)) { > + ret = -EFAULT; > + goto out; > + } > + sinfo->nr_frags++; > + size += data_len; > + } > + xdp.mb = 1; > + } > + > rxqueue = > __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, > 0); > xdp.rxq = &rxqueue->xdp_rxq; > bpf_prog_change_xdp(NULL, prog); > ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, > true); > if (ret) > goto out; > + > if (xdp.data != data + headroom || xdp.data_end != > xdp.data + size) > - size = xdp.data_end - xdp.data; > + size += xdp.data_end - xdp.data - data_len; Can we please drop the variable shadowing of data_len ? This is confusing since the initial value of data_len is correct in the `size` calculation, while its value inside the while loop it not. This seem to be syntactically correct, but I think it's better practice to avoid shadowing here. > + > ret = bpf_test_finish(kattr, uattr, xdp.data, size, > retval, duration); > out: > bpf_prog_change_xdp(prog, NULL); > + for (i = 0; i < sinfo->nr_frags; i++) > + __free_page(skb_frag_page(&sinfo->frags[i])); > kfree(data); > + > return ret; > }
On Thu, 8 Oct 2020 11:06:14 +0300 Shay Agroskin <shayagr@amazon.com> wrote: > Lorenzo Bianconi <lorenzo@kernel.org> writes: > > > Introduce the capability to allocate a xdp multi-buff in > > bpf_prog_test_run_xdp routine. This is a preliminary patch to > > introduce > > the selftests for new xdp multi-buff ebpf helpers > > > > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> > > --- > > net/bpf/test_run.c | 51 ++++++++++++++++++++++++++++++++++++++-------- > > 1 file changed, 43 insertions(+), 8 deletions(-) > > > > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c > > index bd291f5f539c..ec7286cd051b 100644 > > --- a/net/bpf/test_run.c > > +++ b/net/bpf/test_run.c > > @@ -617,44 +617,79 @@ int bpf_prog_test_run_xdp(struct bpf_prog > > *prog, const union bpf_attr *kattr, > > { > > u32 tailroom = SKB_DATA_ALIGN(sizeof(struct > > skb_shared_info)); > > u32 headroom = XDP_PACKET_HEADROOM; > > - u32 size = kattr->test.data_size_in; > > u32 repeat = kattr->test.repeat; > > struct netdev_rx_queue *rxqueue; > > + struct skb_shared_info *sinfo; > > struct xdp_buff xdp = {}; > > + u32 max_data_sz, size; > > u32 retval, duration; > > - u32 max_data_sz; > > + int i, ret, data_len; > > void *data; > > - int ret; > > > > if (kattr->test.ctx_in || kattr->test.ctx_out) > > return -EINVAL; > > > > - /* XDP have extra tailroom as (most) drivers use full page > > */ > > max_data_sz = 4096 - headroom - tailroom; > > For the sake of consistency, can this 4096 be changed to PAGE_SIZE > ? The size 4096 is explicitly use, because the selftest xdp_adjust_tail expect this, else it will fail on ARCHs with 64K PAGE_SIZE. It also seems excessive to create 64K packets for testing XDP. See: tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c > Same as in > data_len = min_t(int, kattr->test.data_size_in - size, > PAGE_SIZE); > > expression below
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index bd291f5f539c..ec7286cd051b 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -617,44 +617,79 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, { u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); u32 headroom = XDP_PACKET_HEADROOM; - u32 size = kattr->test.data_size_in; u32 repeat = kattr->test.repeat; struct netdev_rx_queue *rxqueue; + struct skb_shared_info *sinfo; struct xdp_buff xdp = {}; + u32 max_data_sz, size; u32 retval, duration; - u32 max_data_sz; + int i, ret, data_len; void *data; - int ret; if (kattr->test.ctx_in || kattr->test.ctx_out) return -EINVAL; - /* XDP have extra tailroom as (most) drivers use full page */ max_data_sz = 4096 - headroom - tailroom; + size = min_t(u32, kattr->test.data_size_in, max_data_sz); + data_len = size; - data = bpf_test_init(kattr, kattr->test.data_size_in, - max_data_sz, headroom, tailroom); + data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); if (IS_ERR(data)) return PTR_ERR(data); xdp.data_hard_start = data; xdp.data = data + headroom; xdp.data_meta = xdp.data; - xdp.data_end = xdp.data + size; + xdp.data_end = xdp.data + data_len; xdp.frame_sz = headroom + max_data_sz + tailroom; + sinfo = xdp_get_shared_info_from_buff(&xdp); + if (unlikely(kattr->test.data_size_in > size)) { + void __user *data_in = u64_to_user_ptr(kattr->test.data_in); + + while (size < kattr->test.data_size_in) { + skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags]; + struct page *page; + int data_len; + + page = alloc_page(GFP_KERNEL); + if (!page) { + ret = -ENOMEM; + goto out; + } + + __skb_frag_set_page(frag, page); + data_len = min_t(int, kattr->test.data_size_in - size, + PAGE_SIZE); + skb_frag_size_set(frag, data_len); + if (copy_from_user(page_address(page), data_in + size, + data_len)) { + ret = -EFAULT; + goto out; + } + sinfo->nr_frags++; + size += data_len; + } + xdp.mb = 1; + } + rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); xdp.rxq = &rxqueue->xdp_rxq; bpf_prog_change_xdp(NULL, prog); ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); if (ret) goto out; + if (xdp.data != data + headroom || xdp.data_end != xdp.data + size) - size = xdp.data_end - xdp.data; + size += xdp.data_end - xdp.data - data_len; + ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); out: bpf_prog_change_xdp(prog, NULL); + for (i = 0; i < sinfo->nr_frags; i++) + __free_page(skb_frag_page(&sinfo->frags[i])); kfree(data); + return ret; }
Introduce the capability to allocate a xdp multi-buff in bpf_prog_test_run_xdp routine. This is a preliminary patch to introduce the selftests for new xdp multi-buff ebpf helpers Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> --- net/bpf/test_run.c | 51 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 43 insertions(+), 8 deletions(-)