diff mbox series

[net-next,v2] sctp: Avoid enqueuing addr events redundantly

Message ID 20241104083545.114-1-gnaaman@drivenets.com (mailing list archive)
State Accepted
Commit 702c290a1cb16f4a64567cae0bedb848399f7915
Delegated to: Netdev Maintainers
Headers show
Series [net-next,v2] sctp: Avoid enqueuing addr events redundantly | expand

Checks

Context Check Description
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 3 this patch: 3
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 8 of 8 maintainers
netdev/build_clang success Errors and warnings before: 3 this patch: 3
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 4 this patch: 4
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 42 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-11-07--12-00 (tests: 787)

Commit Message

Gilad Naaman Nov. 4, 2024, 8:35 a.m. UTC
Avoid modifying or enqueuing new events if it's possible to tell that no
one will consume them.

Since enqueueing requires searching the current queue for opposite
events for the same address, adding addresses en-masse turns this
inetaddr_event into a bottle-neck, as it will get slower and slower
with each address added.

Signed-off-by: Gilad Naaman <gnaaman@drivenets.com>
---
Changes in v2:
 - Reorder list removal to avoid race with new sessions
---
 net/sctp/ipv6.c     |  2 +-
 net/sctp/protocol.c | 16 +++++++++++++++-
 2 files changed, 16 insertions(+), 2 deletions(-)

Comments

Xin Long Nov. 7, 2024, 2:38 p.m. UTC | #1
On Mon, Nov 4, 2024 at 3:36 AM Gilad Naaman <gnaaman@drivenets.com> wrote:
>
>
> Avoid modifying or enqueuing new events if it's possible to tell that no
> one will consume them.
>
> Since enqueueing requires searching the current queue for opposite
> events for the same address, adding addresses en-masse turns this
> inetaddr_event into a bottle-neck, as it will get slower and slower
> with each address added.
>
> Signed-off-by: Gilad Naaman <gnaaman@drivenets.com>
Acked-by: Xin Long <lucien.xin@gmail.com>

Thanks.

> ---
> Changes in v2:
>  - Reorder list removal to avoid race with new sessions
> ---
>  net/sctp/ipv6.c     |  2 +-
>  net/sctp/protocol.c | 16 +++++++++++++++-
>  2 files changed, 16 insertions(+), 2 deletions(-)
>
> diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
> index f7b809c0d142..b96c849545ae 100644
> --- a/net/sctp/ipv6.c
> +++ b/net/sctp/ipv6.c
> @@ -103,10 +103,10 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
>                             ipv6_addr_equal(&addr->a.v6.sin6_addr,
>                                             &ifa->addr) &&
>                             addr->a.v6.sin6_scope_id == ifa->idev->dev->ifindex) {
> -                               sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
>                                 found = 1;
>                                 addr->valid = 0;
>                                 list_del_rcu(&addr->list);
> +                               sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
>                                 break;
>                         }
>                 }
> diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
> index 39ca5403d4d7..8b9a1b96695e 100644
> --- a/net/sctp/protocol.c
> +++ b/net/sctp/protocol.c
> @@ -738,6 +738,20 @@ void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cm
>          */
>
>         spin_lock_bh(&net->sctp.addr_wq_lock);
> +
> +       /* Avoid searching the queue or modifying it if there are no consumers,
> +        * as it can lead to performance degradation if addresses are modified
> +        * en-masse.
> +        *
> +        * If the queue already contains some events, update it anyway to avoid
> +        * ugly races between new sessions and new address events.
> +        */
> +       if (list_empty(&net->sctp.auto_asconf_splist) &&
> +           list_empty(&net->sctp.addr_waitq)) {
> +               spin_unlock_bh(&net->sctp.addr_wq_lock);
> +               return;
> +       }
> +
>         /* Offsets existing events in addr_wq */
>         addrw = sctp_addr_wq_lookup(net, addr);
>         if (addrw) {
> @@ -808,10 +822,10 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
>                         if (addr->a.sa.sa_family == AF_INET &&
>                                         addr->a.v4.sin_addr.s_addr ==
>                                         ifa->ifa_local) {
> -                               sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
>                                 found = 1;
>                                 addr->valid = 0;
>                                 list_del_rcu(&addr->list);
> +                               sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
>                                 break;
>                         }
>                 }
> --
> 2.34.1
>
patchwork-bot+netdevbpf@kernel.org Nov. 7, 2024, 2:50 p.m. UTC | #2
Hello:

This patch was applied to netdev/net-next.git (main)
by Paolo Abeni <pabeni@redhat.com>:

On Mon,  4 Nov 2024 08:35:44 +0000 you wrote:
> Avoid modifying or enqueuing new events if it's possible to tell that no
> one will consume them.
> 
> Since enqueueing requires searching the current queue for opposite
> events for the same address, adding addresses en-masse turns this
> inetaddr_event into a bottle-neck, as it will get slower and slower
> with each address added.
> 
> [...]

Here is the summary with links:
  - [net-next,v2] sctp: Avoid enqueuing addr events redundantly
    https://git.kernel.org/netdev/net-next/c/702c290a1cb1

You are awesome, thank you!
diff mbox series

Patch

diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f7b809c0d142..b96c849545ae 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -103,10 +103,10 @@  static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
 			    ipv6_addr_equal(&addr->a.v6.sin6_addr,
 					    &ifa->addr) &&
 			    addr->a.v6.sin6_scope_id == ifa->idev->dev->ifindex) {
-				sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
 				found = 1;
 				addr->valid = 0;
 				list_del_rcu(&addr->list);
+				sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
 				break;
 			}
 		}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 39ca5403d4d7..8b9a1b96695e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -738,6 +738,20 @@  void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cm
 	 */
 
 	spin_lock_bh(&net->sctp.addr_wq_lock);
+
+	/* Avoid searching the queue or modifying it if there are no consumers,
+	 * as it can lead to performance degradation if addresses are modified
+	 * en-masse.
+	 *
+	 * If the queue already contains some events, update it anyway to avoid
+	 * ugly races between new sessions and new address events.
+	 */
+	if (list_empty(&net->sctp.auto_asconf_splist) &&
+	    list_empty(&net->sctp.addr_waitq)) {
+		spin_unlock_bh(&net->sctp.addr_wq_lock);
+		return;
+	}
+
 	/* Offsets existing events in addr_wq */
 	addrw = sctp_addr_wq_lookup(net, addr);
 	if (addrw) {
@@ -808,10 +822,10 @@  static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
 			if (addr->a.sa.sa_family == AF_INET &&
 					addr->a.v4.sin_addr.s_addr ==
 					ifa->ifa_local) {
-				sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
 				found = 1;
 				addr->valid = 0;
 				list_del_rcu(&addr->list);
+				sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
 				break;
 			}
 		}