Message ID | 20240215160458.1727237-3-ast@fiberby.net (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | make skip_sw actually skip software | expand |
Thu, Feb 15, 2024 at 05:04:43PM CET, ast@fiberby.net wrote: >Maintain a count of filters per block. > >Counter updates are protected by cb_lock, which is >also used to protect the offload counters. > >Signed-off-by: Asbjørn Sloth Tønnesen <ast@fiberby.net> >--- > include/net/sch_generic.h | 2 ++ > net/sched/cls_api.c | 20 ++++++++++++++++++++ > 2 files changed, 22 insertions(+) > >diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h >index 46a63d1818a0..7af0621db226 100644 >--- a/include/net/sch_generic.h >+++ b/include/net/sch_generic.h >@@ -427,6 +427,7 @@ struct tcf_proto { > */ > spinlock_t lock; > bool deleting; >+ bool counted; > refcount_t refcnt; > struct rcu_head rcu; > struct hlist_node destroy_ht_node; >@@ -476,6 +477,7 @@ struct tcf_block { > struct flow_block flow_block; > struct list_head owner_list; > bool keep_dst; >+ atomic_t filtercnt; /* Number of filters */ > atomic_t skipswcnt; /* Number of skip_sw filters */ > atomic_t offloadcnt; /* Number of oddloaded filters */ > unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ >diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c >index 397c3d29659c..c750cb662142 100644 >--- a/net/sched/cls_api.c >+++ b/net/sched/cls_api.c >@@ -411,11 +411,13 @@ static void tcf_proto_get(struct tcf_proto *tp) > } > > static void tcf_chain_put(struct tcf_chain *chain); >+static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add); > > static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, > bool sig_destroy, struct netlink_ext_ack *extack) > { > tp->ops->destroy(tp, rtnl_held, extack); >+ tcf_block_filter_cnt_update(tp->chain->block, &tp->counted, false); > if (sig_destroy) > tcf_proto_signal_destroyed(tp->chain, tp); > tcf_chain_put(tp->chain); >@@ -2364,6 +2366,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, > err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, > flags, extack); > if (err == 0) { >+ tcf_block_filter_cnt_update(block, &tp->counted, true); > tfilter_notify(net, skb, n, tp, block, q, parent, fh, > RTM_NEWTFILTER, false, rtnl_held, extack); > tfilter_put(tp, fh); >@@ -3478,6 +3481,23 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) > } > EXPORT_SYMBOL(tcf_exts_dump_stats); > >+static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add) Can't you move this up to avoid forward declaration? >+{ >+ lockdep_assert_not_held(&block->cb_lock); >+ >+ down_write(&block->cb_lock); >+ if (*counted != add) { >+ if (add) { >+ atomic_inc(&block->filtercnt); >+ *counted = true; >+ } else { >+ atomic_dec(&block->filtercnt); >+ *counted = false; >+ } >+ } >+ up_write(&block->cb_lock); >+} >+ > static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) > { > if (*flags & TCA_CLS_FLAGS_IN_HW) >-- >2.43.0 >
Hi Jiri, Thanks for the review. On 2/15/24 17:25, Jiri Pirko wrote: > Thu, Feb 15, 2024 at 05:04:43PM CET, ast@fiberby.net wrote: >> Maintain a count of filters per block. >> >> Counter updates are protected by cb_lock, which is >> also used to protect the offload counters. >> >> Signed-off-by: Asbjørn Sloth Tønnesen <ast@fiberby.net> >> --- >> include/net/sch_generic.h | 2 ++ >> net/sched/cls_api.c | 20 ++++++++++++++++++++ >> 2 files changed, 22 insertions(+) >> >> diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h >> index 46a63d1818a0..7af0621db226 100644 >> --- a/include/net/sch_generic.h >> +++ b/include/net/sch_generic.h >> @@ -427,6 +427,7 @@ struct tcf_proto { >> */ >> spinlock_t lock; >> bool deleting; >> + bool counted; >> refcount_t refcnt; >> struct rcu_head rcu; >> struct hlist_node destroy_ht_node; >> @@ -476,6 +477,7 @@ struct tcf_block { >> struct flow_block flow_block; >> struct list_head owner_list; >> bool keep_dst; >> + atomic_t filtercnt; /* Number of filters */ >> atomic_t skipswcnt; /* Number of skip_sw filters */ >> atomic_t offloadcnt; /* Number of oddloaded filters */ >> unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ >> diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c >> index 397c3d29659c..c750cb662142 100644 >> --- a/net/sched/cls_api.c >> +++ b/net/sched/cls_api.c >> @@ -411,11 +411,13 @@ static void tcf_proto_get(struct tcf_proto *tp) >> } >> >> static void tcf_chain_put(struct tcf_chain *chain); >> +static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add); >> >> static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, >> bool sig_destroy, struct netlink_ext_ack *extack) >> { >> tp->ops->destroy(tp, rtnl_held, extack); >> + tcf_block_filter_cnt_update(tp->chain->block, &tp->counted, false); >> if (sig_destroy) >> tcf_proto_signal_destroyed(tp->chain, tp); >> tcf_chain_put(tp->chain); >> @@ -2364,6 +2366,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, >> err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, >> flags, extack); >> if (err == 0) { >> + tcf_block_filter_cnt_update(block, &tp->counted, true); >> tfilter_notify(net, skb, n, tp, block, q, parent, fh, >> RTM_NEWTFILTER, false, rtnl_held, extack); >> tfilter_put(tp, fh); >> @@ -3478,6 +3481,23 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) >> } >> EXPORT_SYMBOL(tcf_exts_dump_stats); >> >> +static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add) > > Can't you move this up to avoid forward declaration? Sure, will do that in v2. I had considered it, but in the end decided to keep it next to the related offloadcnt logic. >> +{ >> + lockdep_assert_not_held(&block->cb_lock); >> + >> + down_write(&block->cb_lock); >> + if (*counted != add) { >> + if (add) { >> + atomic_inc(&block->filtercnt); >> + *counted = true; >> + } else { >> + atomic_dec(&block->filtercnt); >> + *counted = false; >> + } >> + } >> + up_write(&block->cb_lock); >> +} >> + >> static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) >> { >> if (*flags & TCA_CLS_FLAGS_IN_HW) >> -- >> 2.43.0 >>
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 46a63d1818a0..7af0621db226 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -427,6 +427,7 @@ struct tcf_proto { */ spinlock_t lock; bool deleting; + bool counted; refcount_t refcnt; struct rcu_head rcu; struct hlist_node destroy_ht_node; @@ -476,6 +477,7 @@ struct tcf_block { struct flow_block flow_block; struct list_head owner_list; bool keep_dst; + atomic_t filtercnt; /* Number of filters */ atomic_t skipswcnt; /* Number of skip_sw filters */ atomic_t offloadcnt; /* Number of oddloaded filters */ unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 397c3d29659c..c750cb662142 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -411,11 +411,13 @@ static void tcf_proto_get(struct tcf_proto *tp) } static void tcf_chain_put(struct tcf_chain *chain); +static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add); static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, bool sig_destroy, struct netlink_ext_ack *extack) { tp->ops->destroy(tp, rtnl_held, extack); + tcf_block_filter_cnt_update(tp->chain->block, &tp->counted, false); if (sig_destroy) tcf_proto_signal_destroyed(tp->chain, tp); tcf_chain_put(tp->chain); @@ -2364,6 +2366,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, flags, extack); if (err == 0) { + tcf_block_filter_cnt_update(block, &tp->counted, true); tfilter_notify(net, skb, n, tp, block, q, parent, fh, RTM_NEWTFILTER, false, rtnl_held, extack); tfilter_put(tp, fh); @@ -3478,6 +3481,23 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) } EXPORT_SYMBOL(tcf_exts_dump_stats); +static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add) +{ + lockdep_assert_not_held(&block->cb_lock); + + down_write(&block->cb_lock); + if (*counted != add) { + if (add) { + atomic_inc(&block->filtercnt); + *counted = true; + } else { + atomic_dec(&block->filtercnt); + *counted = false; + } + } + up_write(&block->cb_lock); +} + static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) { if (*flags & TCA_CLS_FLAGS_IN_HW)
Maintain a count of filters per block. Counter updates are protected by cb_lock, which is also used to protect the offload counters. Signed-off-by: Asbjørn Sloth Tønnesen <ast@fiberby.net> --- include/net/sch_generic.h | 2 ++ net/sched/cls_api.c | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+)