@@ -493,6 +493,7 @@ static inline void xdp_clear_features_flag(struct net_device *dev)
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
+ lockdep_assert_held(this_cpu_ptr(&bpf_run_lock.redirect_lock));
/* Driver XDP hooks are invoked within a single NAPI poll cycle and thus
* under local_bh_disable(), which provides the needed RCU protection
* for accessing map entries.
@@ -2495,6 +2495,7 @@ int skb_do_redirect(struct sk_buff *skb)
struct net_device *dev;
u32 flags = ri->flags;
+ lockdep_assert_held(this_cpu_ptr(&bpf_run_lock.redirect_lock));
dev = dev_get_by_index_rcu(net, ri->tgt_index);
ri->tgt_index = 0;
ri->flags = 0;
@@ -2525,6 +2526,8 @@ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ lockdep_assert_held(this_cpu_ptr(&bpf_run_lock.redirect_lock));
+
if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
return TC_ACT_SHOT;
@@ -2546,6 +2549,8 @@ BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ lockdep_assert_held(this_cpu_ptr(&bpf_run_lock.redirect_lock));
+
if (unlikely(flags))
return TC_ACT_SHOT;
@@ -2568,6 +2573,8 @@ BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ lockdep_assert_held(this_cpu_ptr(&bpf_run_lock.redirect_lock));
+
if (unlikely((plen && plen < sizeof(*params)) || flags))
return TC_ACT_SHOT;
@@ -4287,6 +4294,8 @@ u32 xdp_master_redirect(struct xdp_buff *xdp)
struct net_device *master, *slave;
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ lockdep_assert_held(this_cpu_ptr(&bpf_run_lock.redirect_lock));
+
master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev);
slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp);
if (slave && slave != xdp->rxq->dev) {
@@ -4394,6 +4403,7 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
enum bpf_map_type map_type = ri->map_type;
+ lockdep_assert_held(this_cpu_ptr(&bpf_run_lock.redirect_lock));
if (map_type == BPF_MAP_TYPE_XSKMAP)
return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
@@ -4408,6 +4418,7 @@ int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
enum bpf_map_type map_type = ri->map_type;
+ lockdep_assert_held(this_cpu_ptr(&bpf_run_lock.redirect_lock));
if (map_type == BPF_MAP_TYPE_XSKMAP)
return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
The users of bpf_redirect_info should lock the access by acquiring the nested BH-lock bpf_run_lock.redirect_lock. This lock should be acquired before the first usage (bpf_prog_run_xdp()) and dropped after the last user in the context (xdp_do_redirect()). Current user in tree have been audited and updated. Add lockdep annonation to ensure new user acquire the lock. Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Hao Luo <haoluo@google.com> Cc: Jesper Dangaard Brouer <hawk@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Fastabend <john.fastabend@gmail.com> Cc: KP Singh <kpsingh@kernel.org> Cc: Martin KaFai Lau <martin.lau@linux.dev> Cc: Song Liu <song@kernel.org> Cc: Stanislav Fomichev <sdf@google.com> Cc: Yonghong Song <yonghong.song@linux.dev> Cc: bpf@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- include/net/xdp.h | 1 + net/core/filter.c | 11 +++++++++++ 2 files changed, 12 insertions(+)