Message ID | 20240503223150.6035-3-kuniyu@amazon.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | af_unix: GC cleanup and optimisation | expand |
On Fri, 2024-05-03 at 15:31 -0700, Kuniyuki Iwashima wrote: > unix_walk_scc_fast() calls unix_scc_cyclic() for every SCC so that we > can make unix_graph_maybe_cyclic false when all SCC are cleaned up. > > If we count the number of loops in the graph during Tarjan's algorithm, > we need not call unix_scc_cyclic() in unix_walk_scc_fast(). > > Instead, we can just decrement the number when calling unix_collect_skb() > and update unix_graph_maybe_cyclic based on the count. > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> > --- > net/unix/garbage.c | 19 +++++++++++-------- > 1 file changed, 11 insertions(+), 8 deletions(-) > > diff --git a/net/unix/garbage.c b/net/unix/garbage.c > index 1f8b8cdfcdc8..7ffb80dd422c 100644 > --- a/net/unix/garbage.c > +++ b/net/unix/garbage.c > @@ -405,6 +405,7 @@ static bool unix_scc_cyclic(struct list_head *scc) > > static LIST_HEAD(unix_visited_vertices); > static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; > +static unsigned long unix_graph_circles; > > static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index, > struct sk_buff_head *hitlist) > @@ -494,8 +495,8 @@ static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde > > if (scc_dead) > unix_collect_skb(&scc, hitlist); > - else if (!unix_graph_maybe_cyclic) > - unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); > + else if (unix_scc_cyclic(&scc)) > + unix_graph_circles++; > > list_del(&scc); > } > @@ -509,7 +510,7 @@ static void unix_walk_scc(struct sk_buff_head *hitlist) > { > unsigned long last_index = UNIX_VERTEX_INDEX_START; > > - unix_graph_maybe_cyclic = false; > + unix_graph_circles = 0; > > /* Visit every vertex exactly once. > * __unix_walk_scc() moves visited vertices to unix_visited_vertices. > @@ -524,13 +525,12 @@ static void unix_walk_scc(struct sk_buff_head *hitlist) > list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); > swap(unix_vertex_unvisited_index, unix_vertex_grouped_index); > > + unix_graph_maybe_cyclic = !!unix_graph_circles; > unix_graph_grouped = true; > } > > static void unix_walk_scc_fast(struct sk_buff_head *hitlist) > { > - unix_graph_maybe_cyclic = false; > - > while (!list_empty(&unix_unvisited_vertices)) { > struct unix_vertex *vertex; > struct list_head scc; > @@ -546,15 +546,18 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist) > scc_dead = unix_vertex_dead(vertex); > } > > - if (scc_dead) > + if (scc_dead) { > unix_collect_skb(&scc, hitlist); > - else if (!unix_graph_maybe_cyclic) > - unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); > + unix_graph_circles--; Possibly WARN_ON_ONCE(unix_graph_circles < 0) ? I find this patch a little scaring - meaning I can't understand it fully, I'm wondering if it would make any sense to postpone this patch to the next cycle? Thanks, Paolo
From: Paolo Abeni <pabeni@redhat.com> Date: Tue, 07 May 2024 15:54:33 +0200 > On Fri, 2024-05-03 at 15:31 -0700, Kuniyuki Iwashima wrote: > > unix_walk_scc_fast() calls unix_scc_cyclic() for every SCC so that we > > can make unix_graph_maybe_cyclic false when all SCC are cleaned up. > > > > If we count the number of loops in the graph during Tarjan's algorithm, > > we need not call unix_scc_cyclic() in unix_walk_scc_fast(). > > > > Instead, we can just decrement the number when calling unix_collect_skb() > > and update unix_graph_maybe_cyclic based on the count. > > > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> > > --- > > net/unix/garbage.c | 19 +++++++++++-------- > > 1 file changed, 11 insertions(+), 8 deletions(-) > > > > diff --git a/net/unix/garbage.c b/net/unix/garbage.c > > index 1f8b8cdfcdc8..7ffb80dd422c 100644 > > --- a/net/unix/garbage.c > > +++ b/net/unix/garbage.c > > @@ -405,6 +405,7 @@ static bool unix_scc_cyclic(struct list_head *scc) > > > > static LIST_HEAD(unix_visited_vertices); > > static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; > > +static unsigned long unix_graph_circles; > > > > static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index, > > struct sk_buff_head *hitlist) > > @@ -494,8 +495,8 @@ static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde > > > > if (scc_dead) > > unix_collect_skb(&scc, hitlist); > > - else if (!unix_graph_maybe_cyclic) > > - unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); > > + else if (unix_scc_cyclic(&scc)) > > + unix_graph_circles++; > > > > list_del(&scc); > > } > > @@ -509,7 +510,7 @@ static void unix_walk_scc(struct sk_buff_head *hitlist) > > { > > unsigned long last_index = UNIX_VERTEX_INDEX_START; > > > > - unix_graph_maybe_cyclic = false; > > + unix_graph_circles = 0; > > > > /* Visit every vertex exactly once. > > * __unix_walk_scc() moves visited vertices to unix_visited_vertices. > > @@ -524,13 +525,12 @@ static void unix_walk_scc(struct sk_buff_head *hitlist) > > list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); > > swap(unix_vertex_unvisited_index, unix_vertex_grouped_index); > > > > + unix_graph_maybe_cyclic = !!unix_graph_circles; > > unix_graph_grouped = true; > > } > > > > static void unix_walk_scc_fast(struct sk_buff_head *hitlist) > > { > > - unix_graph_maybe_cyclic = false; > > - > > while (!list_empty(&unix_unvisited_vertices)) { > > struct unix_vertex *vertex; > > struct list_head scc; > > @@ -546,15 +546,18 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist) > > scc_dead = unix_vertex_dead(vertex); > > } > > > > - if (scc_dead) > > + if (scc_dead) { > > unix_collect_skb(&scc, hitlist); > > - else if (!unix_graph_maybe_cyclic) > > - unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); > > + unix_graph_circles--; > > Possibly WARN_ON_ONCE(unix_graph_circles < 0) ? Will add in v2. > > I find this patch a little scaring - meaning I can't understand it > fully, > I'm wondering if it would make any sense to postpone this patch > to the next cycle? It's fine by me to postpone patch 2 - 5, but it would be appreciated if patch 1 makes it to this cycle. Thanks!
On Tue, 2024-05-07 at 09:11 -0700, Kuniyuki Iwashima wrote: > From: Paolo Abeni <pabeni@redhat.com> > Date: Tue, 07 May 2024 15:54:33 +0200 > > On Fri, 2024-05-03 at 15:31 -0700, Kuniyuki Iwashima wrote: > > > unix_walk_scc_fast() calls unix_scc_cyclic() for every SCC so that we > > > can make unix_graph_maybe_cyclic false when all SCC are cleaned up. > > > > > > If we count the number of loops in the graph during Tarjan's algorithm, > > > we need not call unix_scc_cyclic() in unix_walk_scc_fast(). > > > > > > Instead, we can just decrement the number when calling unix_collect_skb() > > > and update unix_graph_maybe_cyclic based on the count. > > > > > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> > > > --- > > > net/unix/garbage.c | 19 +++++++++++-------- > > > 1 file changed, 11 insertions(+), 8 deletions(-) > > > > > > diff --git a/net/unix/garbage.c b/net/unix/garbage.c > > > index 1f8b8cdfcdc8..7ffb80dd422c 100644 > > > --- a/net/unix/garbage.c > > > +++ b/net/unix/garbage.c > > > @@ -405,6 +405,7 @@ static bool unix_scc_cyclic(struct list_head *scc) > > > > > > static LIST_HEAD(unix_visited_vertices); > > > static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; > > > +static unsigned long unix_graph_circles; > > > > > > static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index, > > > struct sk_buff_head *hitlist) > > > @@ -494,8 +495,8 @@ static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde > > > > > > if (scc_dead) > > > unix_collect_skb(&scc, hitlist); > > > - else if (!unix_graph_maybe_cyclic) > > > - unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); > > > + else if (unix_scc_cyclic(&scc)) > > > + unix_graph_circles++; > > > > > > list_del(&scc); > > > } > > > @@ -509,7 +510,7 @@ static void unix_walk_scc(struct sk_buff_head *hitlist) > > > { > > > unsigned long last_index = UNIX_VERTEX_INDEX_START; > > > > > > - unix_graph_maybe_cyclic = false; > > > + unix_graph_circles = 0; > > > > > > /* Visit every vertex exactly once. > > > * __unix_walk_scc() moves visited vertices to unix_visited_vertices. > > > @@ -524,13 +525,12 @@ static void unix_walk_scc(struct sk_buff_head *hitlist) > > > list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); > > > swap(unix_vertex_unvisited_index, unix_vertex_grouped_index); > > > > > > + unix_graph_maybe_cyclic = !!unix_graph_circles; > > > unix_graph_grouped = true; > > > } > > > > > > static void unix_walk_scc_fast(struct sk_buff_head *hitlist) > > > { > > > - unix_graph_maybe_cyclic = false; > > > - > > > while (!list_empty(&unix_unvisited_vertices)) { > > > struct unix_vertex *vertex; > > > struct list_head scc; > > > @@ -546,15 +546,18 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist) > > > scc_dead = unix_vertex_dead(vertex); > > > } > > > > > > - if (scc_dead) > > > + if (scc_dead) { > > > unix_collect_skb(&scc, hitlist); > > > - else if (!unix_graph_maybe_cyclic) > > > - unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); > > > + unix_graph_circles--; > > > > Possibly WARN_ON_ONCE(unix_graph_circles < 0) ? > > Will add in v2. > > > > > I find this patch a little scaring - meaning I can't understand it > > fully, > > I'm wondering if it would make any sense to postpone this patch > > to the next cycle? > > It's fine by me to postpone patch 2 - 5, but it would be appreciated > if patch 1 makes it to this cycle. Yes, patch 1 looks fine and safe to me. Feel free to re-submit that one individually right now, with my Acked-by tag. Thanks! Paolo
From: Paolo Abeni <pabeni@redhat.com> Date: Wed, 08 May 2024 12:08:48 +0200 > On Tue, 2024-05-07 at 09:11 -0700, Kuniyuki Iwashima wrote: > > From: Paolo Abeni <pabeni@redhat.com> > > Date: Tue, 07 May 2024 15:54:33 +0200 > > > On Fri, 2024-05-03 at 15:31 -0700, Kuniyuki Iwashima wrote: > > > > unix_walk_scc_fast() calls unix_scc_cyclic() for every SCC so that we > > > > can make unix_graph_maybe_cyclic false when all SCC are cleaned up. > > > > > > > > If we count the number of loops in the graph during Tarjan's algorithm, > > > > we need not call unix_scc_cyclic() in unix_walk_scc_fast(). > > > > > > > > Instead, we can just decrement the number when calling unix_collect_skb() > > > > and update unix_graph_maybe_cyclic based on the count. > > > > > > > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> > > > > --- > > > > net/unix/garbage.c | 19 +++++++++++-------- > > > > 1 file changed, 11 insertions(+), 8 deletions(-) > > > > > > > > diff --git a/net/unix/garbage.c b/net/unix/garbage.c > > > > index 1f8b8cdfcdc8..7ffb80dd422c 100644 > > > > --- a/net/unix/garbage.c > > > > +++ b/net/unix/garbage.c > > > > @@ -405,6 +405,7 @@ static bool unix_scc_cyclic(struct list_head *scc) > > > > > > > > static LIST_HEAD(unix_visited_vertices); > > > > static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; > > > > +static unsigned long unix_graph_circles; > > > > > > > > static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index, > > > > struct sk_buff_head *hitlist) > > > > @@ -494,8 +495,8 @@ static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde > > > > > > > > if (scc_dead) > > > > unix_collect_skb(&scc, hitlist); > > > > - else if (!unix_graph_maybe_cyclic) > > > > - unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); > > > > + else if (unix_scc_cyclic(&scc)) > > > > + unix_graph_circles++; > > > > > > > > list_del(&scc); > > > > } > > > > @@ -509,7 +510,7 @@ static void unix_walk_scc(struct sk_buff_head *hitlist) > > > > { > > > > unsigned long last_index = UNIX_VERTEX_INDEX_START; > > > > > > > > - unix_graph_maybe_cyclic = false; > > > > + unix_graph_circles = 0; > > > > > > > > /* Visit every vertex exactly once. > > > > * __unix_walk_scc() moves visited vertices to unix_visited_vertices. > > > > @@ -524,13 +525,12 @@ static void unix_walk_scc(struct sk_buff_head *hitlist) > > > > list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); > > > > swap(unix_vertex_unvisited_index, unix_vertex_grouped_index); > > > > > > > > + unix_graph_maybe_cyclic = !!unix_graph_circles; > > > > unix_graph_grouped = true; > > > > } > > > > > > > > static void unix_walk_scc_fast(struct sk_buff_head *hitlist) > > > > { > > > > - unix_graph_maybe_cyclic = false; > > > > - > > > > while (!list_empty(&unix_unvisited_vertices)) { > > > > struct unix_vertex *vertex; > > > > struct list_head scc; > > > > @@ -546,15 +546,18 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist) > > > > scc_dead = unix_vertex_dead(vertex); > > > > } > > > > > > > > - if (scc_dead) > > > > + if (scc_dead) { > > > > unix_collect_skb(&scc, hitlist); > > > > - else if (!unix_graph_maybe_cyclic) > > > > - unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); > > > > + unix_graph_circles--; > > > > > > Possibly WARN_ON_ONCE(unix_graph_circles < 0) ? > > > > Will add in v2. > > > > > > > > I find this patch a little scaring - meaning I can't understand it > > > fully, > > > I'm wondering if it would make any sense to postpone this patch > > > to the next cycle? > > > > It's fine by me to postpone patch 2 - 5, but it would be appreciated > > if patch 1 makes it to this cycle. > > Yes, patch 1 looks fine and safe to me. Feel free to re-submit that one > individually right now, with my Acked-by tag. Thanks, will do!
diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 1f8b8cdfcdc8..7ffb80dd422c 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -405,6 +405,7 @@ static bool unix_scc_cyclic(struct list_head *scc) static LIST_HEAD(unix_visited_vertices); static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; +static unsigned long unix_graph_circles; static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index, struct sk_buff_head *hitlist) @@ -494,8 +495,8 @@ static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde if (scc_dead) unix_collect_skb(&scc, hitlist); - else if (!unix_graph_maybe_cyclic) - unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); + else if (unix_scc_cyclic(&scc)) + unix_graph_circles++; list_del(&scc); } @@ -509,7 +510,7 @@ static void unix_walk_scc(struct sk_buff_head *hitlist) { unsigned long last_index = UNIX_VERTEX_INDEX_START; - unix_graph_maybe_cyclic = false; + unix_graph_circles = 0; /* Visit every vertex exactly once. * __unix_walk_scc() moves visited vertices to unix_visited_vertices. @@ -524,13 +525,12 @@ static void unix_walk_scc(struct sk_buff_head *hitlist) list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); swap(unix_vertex_unvisited_index, unix_vertex_grouped_index); + unix_graph_maybe_cyclic = !!unix_graph_circles; unix_graph_grouped = true; } static void unix_walk_scc_fast(struct sk_buff_head *hitlist) { - unix_graph_maybe_cyclic = false; - while (!list_empty(&unix_unvisited_vertices)) { struct unix_vertex *vertex; struct list_head scc; @@ -546,15 +546,18 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist) scc_dead = unix_vertex_dead(vertex); } - if (scc_dead) + if (scc_dead) { unix_collect_skb(&scc, hitlist); - else if (!unix_graph_maybe_cyclic) - unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); + unix_graph_circles--; + } list_del(&scc); } list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); + + if (!unix_graph_circles) + unix_graph_maybe_cyclic = false; } static bool gc_in_progress;
unix_walk_scc_fast() calls unix_scc_cyclic() for every SCC so that we can make unix_graph_maybe_cyclic false when all SCC are cleaned up. If we count the number of loops in the graph during Tarjan's algorithm, we need not call unix_scc_cyclic() in unix_walk_scc_fast(). Instead, we can just decrement the number when calling unix_collect_skb() and update unix_graph_maybe_cyclic based on the count. Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> --- net/unix/garbage.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-)