From patchwork Mon Mar 31 19:43:03 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jakub Kicinski X-Patchwork-Id: 14033866 X-Patchwork-Delegate: kuba@kernel.org Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 644311B4138 for ; Mon, 31 Mar 2025 19:43:05 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1743450187; cv=none; b=joXXb9plVqXdhJz8HTWMunl2pTzBfS5IYNAxPQh/BjNWJXLvB3xq5ftLc1+Nwq6PJr5yscasRLbc5dMCDPxnnQyg2U96OWsrNe7XWTD06MywfJWncYwjsfoEAO9KAkPkf4v81V3nE+8e97e5iljrtzb0gHa8XEg0lns/5Wwa7rE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1743450187; c=relaxed/simple; bh=UaQ9rShvlbZRVzcFsikINqmNxUiys/Aao292bRb7+is=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=ICrEEgqJgIYdRzXkwgpKO7aktBaVfEIOCNCDpV8e5M7JS2HvsKFL4tqnVy0QAlKnSwXILLlo6bQw8OjjTNLuh0Ucq5Yhfodi0q+MdIGZJzZiNZVc1eNtx+Tv8ESzmdsMByzgNtlPd3MWbY3PBa+dJ2eVYskfo3na1C416ZGPMBE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=gqEcjKpw; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="gqEcjKpw" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 76825C4CEE3; Mon, 31 Mar 2025 19:43:05 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1743450185; bh=UaQ9rShvlbZRVzcFsikINqmNxUiys/Aao292bRb7+is=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=gqEcjKpwLlI+czPHLZXXFPf81FNwLX9WmduI+tmxoieAYaPlKWR8gcfpz0wAir3Cf ZBpAY4sF63jE3GRUuc+Dk82Nqz9JM33gKWQcPBkk9Hg4ddoCMgJFZnWfQwJOMvC5JR nhRSz0ubpnJXiMxmCeHm43Jg+2miXmyAogyZl/EbL04x/BS9FWVsfMD4zlurJaYxsR Z6XZLh4diWOct6PDExRnnInOZ0SE10vNKHQRhBn+592gybCffAABjsjMkZUqKq141I BgQg0tiK7GKIcNF7W6qYg8YxQ+RP68VD22r2XQE/wSTTx4GBqJKOrLYfjx0SbGmIbs DUPOgbIFIkphA== From: Jakub Kicinski To: davem@davemloft.net Cc: netdev@vger.kernel.org, edumazet@google.com, pabeni@redhat.com, andrew+netdev@lunn.ch, horms@kernel.org, ap420073@gmail.com, asml.silence@gmail.com, almasrymina@google.com, dw@davidwei.uk, sdf@fomichev.me, Jakub Kicinski Subject: [PATCH net 1/2] net: move mp dev config validation to __net_mp_open_rxq() Date: Mon, 31 Mar 2025 12:43:03 -0700 Message-ID: <20250331194303.2026903-1-kuba@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: <20250331194201.2026422-1-kuba@kernel.org> References: <20250331194201.2026422-1-kuba@kernel.org> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org devmem code performs a number of safety checks to avoid having to reimplement all of them in the drivers. Move those to __net_mp_open_rxq() and reuse that function for binding to make sure that io_uring ZC also benefits from them. While at it rename the queue ID variable to rxq_idx in __net_mp_open_rxq(), we touch most of the relevant lines. Fixes: 6e18ed929d3b ("net: add helpers for setting a memory provider on an rx queue") Signed-off-by: Jakub Kicinski Acked-by: Stanislav Fomichev Reviewed-by: Mina Almasry --- CC: ap420073@gmail.com CC: almasrymina@google.com CC: asml.silence@gmail.com CC: dw@davidwei.uk CC: sdf@fomichev.me --- include/net/page_pool/memory_provider.h | 6 +++ net/core/devmem.c | 52 ++++++------------------- net/core/netdev-genl.c | 6 --- net/core/netdev_rx_queue.c | 49 +++++++++++++++++------ 4 files changed, 55 insertions(+), 58 deletions(-) diff --git a/include/net/page_pool/memory_provider.h b/include/net/page_pool/memory_provider.h index b3e665897767..3724ac3c9fb2 100644 --- a/include/net/page_pool/memory_provider.h +++ b/include/net/page_pool/memory_provider.h @@ -6,6 +6,7 @@ #include struct netdev_rx_queue; +struct netlink_ext_ack; struct sk_buff; struct memory_provider_ops { @@ -24,8 +25,13 @@ void net_mp_niov_clear_page_pool(struct net_iov *niov); int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, struct pp_memory_provider_params *p); +int __net_mp_open_rxq(struct net_device *dev, unsigned int ifq_idx, + const struct pp_memory_provider_params *p, + struct netlink_ext_ack *extack); void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, struct pp_memory_provider_params *old_p); +void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx, + const struct pp_memory_provider_params *old_p); /** * net_mp_netmem_place_in_cache() - give a netmem to a page pool diff --git a/net/core/devmem.c b/net/core/devmem.c index ee145a2aa41c..f2ce3c2ebc97 100644 --- a/net/core/devmem.c +++ b/net/core/devmem.c @@ -8,7 +8,6 @@ */ #include -#include #include #include #include @@ -143,57 +142,28 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, struct net_devmem_dmabuf_binding *binding, struct netlink_ext_ack *extack) { + struct pp_memory_provider_params mp_params = { + .mp_priv = binding, + .mp_ops = &dmabuf_devmem_ops, + }; struct netdev_rx_queue *rxq; u32 xa_idx; int err; - if (rxq_idx >= dev->real_num_rx_queues) { - NL_SET_ERR_MSG(extack, "rx queue index out of range"); - return -ERANGE; - } - - if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) { - NL_SET_ERR_MSG(extack, "tcp-data-split is disabled"); - return -EINVAL; - } - - if (dev->cfg->hds_thresh) { - NL_SET_ERR_MSG(extack, "hds-thresh is not zero"); - return -EINVAL; - } - - rxq = __netif_get_rx_queue(dev, rxq_idx); - if (rxq->mp_params.mp_ops) { - NL_SET_ERR_MSG(extack, "designated queue already memory provider bound"); - return -EEXIST; - } - -#ifdef CONFIG_XDP_SOCKETS - if (rxq->pool) { - NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP"); - return -EBUSY; - } -#endif - - err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, - GFP_KERNEL); + err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack); if (err) return err; - rxq->mp_params.mp_priv = binding; - rxq->mp_params.mp_ops = &dmabuf_devmem_ops; - - err = netdev_rx_queue_restart(dev, rxq_idx); + rxq = __netif_get_rx_queue(dev, rxq_idx); + err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, + GFP_KERNEL); if (err) - goto err_xa_erase; + goto err_close_rxq; return 0; -err_xa_erase: - rxq->mp_params.mp_priv = NULL; - rxq->mp_params.mp_ops = NULL; - xa_erase(&binding->bound_rxqs, xa_idx); - +err_close_rxq: + __net_mp_close_rxq(dev, rxq_idx, &mp_params); return err; } diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c index fd1cfa9707dc..8ad4a944f368 100644 --- a/net/core/netdev-genl.c +++ b/net/core/netdev-genl.c @@ -874,12 +874,6 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) goto err_unlock; } - if (dev_xdp_prog_count(netdev)) { - NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached"); - err = -EEXIST; - goto err_unlock; - } - binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack); if (IS_ERR(binding)) { err = PTR_ERR(binding); diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c index 3af716f77a13..556b5393ec9f 100644 --- a/net/core/netdev_rx_queue.c +++ b/net/core/netdev_rx_queue.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later +#include #include #include #include @@ -86,8 +87,9 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) } EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL"); -static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, - struct pp_memory_provider_params *p) +int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx, + const struct pp_memory_provider_params *p, + struct netlink_ext_ack *extack) { struct netdev_rx_queue *rxq; int ret; @@ -95,16 +97,41 @@ static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, if (!netdev_need_ops_lock(dev)) return -EOPNOTSUPP; - if (ifq_idx >= dev->real_num_rx_queues) + if (rxq_idx >= dev->real_num_rx_queues) return -EINVAL; - ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues); + rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues); - rxq = __netif_get_rx_queue(dev, ifq_idx); - if (rxq->mp_params.mp_ops) + if (rxq_idx >= dev->real_num_rx_queues) { + NL_SET_ERR_MSG(extack, "rx queue index out of range"); + return -ERANGE; + } + if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) { + NL_SET_ERR_MSG(extack, "tcp-data-split is disabled"); + return -EINVAL; + } + if (dev->cfg->hds_thresh) { + NL_SET_ERR_MSG(extack, "hds-thresh is not zero"); + return -EINVAL; + } + if (dev_xdp_prog_count(dev)) { + NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached"); return -EEXIST; + } + + rxq = __netif_get_rx_queue(dev, rxq_idx); + if (rxq->mp_params.mp_ops) { + NL_SET_ERR_MSG(extack, "designated queue already memory provider bound"); + return -EEXIST; + } +#ifdef CONFIG_XDP_SOCKETS + if (rxq->pool) { + NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP"); + return -EBUSY; + } +#endif rxq->mp_params = *p; - ret = netdev_rx_queue_restart(dev, ifq_idx); + ret = netdev_rx_queue_restart(dev, rxq_idx); if (ret) { rxq->mp_params.mp_ops = NULL; rxq->mp_params.mp_priv = NULL; @@ -112,19 +139,19 @@ static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, return ret; } -int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, +int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx, struct pp_memory_provider_params *p) { int ret; netdev_lock(dev); - ret = __net_mp_open_rxq(dev, ifq_idx, p); + ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL); netdev_unlock(dev); return ret; } -static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, - struct pp_memory_provider_params *old_p) +void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx, + const struct pp_memory_provider_params *old_p) { struct netdev_rx_queue *rxq; From patchwork Mon Mar 31 19:43:08 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jakub Kicinski X-Patchwork-Id: 14033867 X-Patchwork-Delegate: kuba@kernel.org Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 3AE2D1B4138 for ; Mon, 31 Mar 2025 19:43:10 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1743450191; cv=none; b=jTkBM0PXRNdnEyEsA6eXlnFy2R658x/jNMT3d59kfqBRMTosqQQrDQHkWVjw197ngsOWMJIdq4zfDujceQ8R//yE2B3zY0xdU60Ev7EEMUZtfdhkPeR99O+CQ5CYKyw/OdE8wnKsDDeRxdSqKpHb2c/zlYi9nOcHzq5iTIvOCPw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1743450191; c=relaxed/simple; bh=JT7WjCTWPqx07GCgnSYyjt4N+4wmDRjWSCDjwmHT9n0=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=SLngqrQtTvQjWeMmHTClQ5hdJPsa9WQSly4oHrKZaL0VM2SUj8GHjjDH1bY3CLetyJwr+6aNlfkaJlw2maaKfYWJFdOqO1duZzg61oyz3PKJOsxSqOlEKIyzrPM8IwFOdK0JhXObSUk56ONF0Ufi+EkfPCQbvGalqElh3R7fNTU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=i/0evjJl; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="i/0evjJl" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 4D2F9C4CEE3; Mon, 31 Mar 2025 19:43:10 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1743450190; bh=JT7WjCTWPqx07GCgnSYyjt4N+4wmDRjWSCDjwmHT9n0=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=i/0evjJlFPkv3QbRq/IUQW3LgMTLbf8YaQmyEDIttVQLwws667yrtXjYLH3+vZQG3 t1rrLjCWIanNx6AifVsNLG5BTvYdh8XLHnlMZn+7nX+wxOaXeg3+ZSvDVsMeK6uo4R 2X1kSV9BsDSrk4t2qruxeb/rQV4n9a6Yy6Oiv/5VkUX/n5n09Xua6rgxLoZvkbyfWo aqpKw3ujB75KIQfwvKm3iT3PB4O6qc4xYTMxreGzdthcZK9kI6pl1DWii2/agMXcB7 +V0ns7Nze25f2oQ8q3LO7QF7uH8VZ3zt+XeCdvCseC+P8p/602bWEcyOMWpLcsp5B/ 2Ffa+DpKfh2+Q== From: Jakub Kicinski To: davem@davemloft.net Cc: netdev@vger.kernel.org, edumazet@google.com, pabeni@redhat.com, andrew+netdev@lunn.ch, horms@kernel.org, ap420073@gmail.com, asml.silence@gmail.com, almasrymina@google.com, dw@davidwei.uk, sdf@fomichev.me, Jakub Kicinski Subject: [PATCH net 2/2] net: avoid false positive warnings in __net_mp_close_rxq() Date: Mon, 31 Mar 2025 12:43:08 -0700 Message-ID: <20250331194308.2026940-1-kuba@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: <20250331194201.2026422-1-kuba@kernel.org> References: <20250331194201.2026422-1-kuba@kernel.org> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org Commit under Fixes solved the problem of spurious warnings when we uninstall an MP from a device while its down. The __net_mp_close_rxq() which is used by io_uring was not fixed. Move the fix over and reuse __net_mp_close_rxq() in the devmem path. Fixes: a70f891e0fa0 ("net: devmem: do not WARN conditionally after netdev_rx_queue_restart()") Signed-off-by: Jakub Kicinski Acked-by: Stanislav Fomichev --- net/core/devmem.c | 12 +++++------- net/core/netdev_rx_queue.c | 17 +++++++++-------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/net/core/devmem.c b/net/core/devmem.c index f2ce3c2ebc97..6e27a47d0493 100644 --- a/net/core/devmem.c +++ b/net/core/devmem.c @@ -116,21 +116,19 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) struct netdev_rx_queue *rxq; unsigned long xa_idx; unsigned int rxq_idx; - int err; if (binding->list.next) list_del(&binding->list); xa_for_each(&binding->bound_rxqs, xa_idx, rxq) { - WARN_ON(rxq->mp_params.mp_priv != binding); - - rxq->mp_params.mp_priv = NULL; - rxq->mp_params.mp_ops = NULL; + const struct pp_memory_provider_params mp_params = { + .mp_priv = binding, + .mp_ops = &dmabuf_devmem_ops, + }; rxq_idx = get_netdev_rx_queue_index(rxq); - err = netdev_rx_queue_restart(binding->dev, rxq_idx); - WARN_ON(err && err != -ENETDOWN); + __net_mp_close_rxq(binding->dev, rxq_idx, &mp_params); } xa_erase(&net_devmem_dmabuf_bindings, binding->id); diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c index 556b5393ec9f..3e906c2950bd 100644 --- a/net/core/netdev_rx_queue.c +++ b/net/core/netdev_rx_queue.c @@ -154,17 +154,13 @@ void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx, const struct pp_memory_provider_params *old_p) { struct netdev_rx_queue *rxq; + int err; if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues)) return; rxq = __netif_get_rx_queue(dev, ifq_idx); - - /* Callers holding a netdev ref may get here after we already - * went thru shutdown via dev_memory_provider_uninstall(). - */ - if (dev->reg_state > NETREG_REGISTERED && - !rxq->mp_params.mp_ops) + if (!rxq->mp_params.mp_ops) return; if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops || @@ -173,13 +169,18 @@ void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx, rxq->mp_params.mp_ops = NULL; rxq->mp_params.mp_priv = NULL; - WARN_ON(netdev_rx_queue_restart(dev, ifq_idx)); + err = netdev_rx_queue_restart(dev, ifq_idx); + WARN_ON(err && err != -ENETDOWN); } void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, struct pp_memory_provider_params *old_p) { netdev_lock(dev); - __net_mp_close_rxq(dev, ifq_idx, old_p); + /* Callers holding a netdev ref may get here after we already + * went thru shutdown via dev_memory_provider_uninstall(). + */ + if (dev->reg_state <= NETREG_REGISTERED) + __net_mp_close_rxq(dev, ifq_idx, old_p); netdev_unlock(dev); }