@@ -165,6 +165,7 @@ enum nf_flow_flags {
NF_FLOW_HW_DEAD,
NF_FLOW_HW_PENDING,
NF_FLOW_HW_BIDIRECTIONAL,
+ NF_FLOW_HW_UPDATE,
};
enum flow_offload_type {
@@ -300,7 +301,7 @@ unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
#define MODULE_ALIAS_NF_FLOWTABLE(family) \
MODULE_ALIAS("nf-flowtable-" __stringify(family))
-void nf_flow_offload_add(struct nf_flowtable *flowtable,
+bool nf_flow_offload_add(struct nf_flowtable *flowtable,
struct flow_offload *flow);
void nf_flow_offload_del(struct nf_flowtable *flowtable,
struct flow_offload *flow);
@@ -316,21 +316,28 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
}
EXPORT_SYMBOL_GPL(flow_offload_add);
+static bool __flow_offload_refresh(struct nf_flowtable *flow_table,
+ struct flow_offload *flow)
+{
+ if (likely(!nf_flowtable_hw_offload(flow_table)))
+ return true;
+
+ return nf_flow_offload_add(flow_table, flow);
+}
+
void flow_offload_refresh(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
u32 timeout;
timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
- if (timeout - READ_ONCE(flow->timeout) > HZ)
+ if (timeout - READ_ONCE(flow->timeout) > HZ &&
+ !test_bit(NF_FLOW_HW_UPDATE, &flow->flags))
WRITE_ONCE(flow->timeout, timeout);
else
return;
- if (likely(!nf_flowtable_hw_offload(flow_table)))
- return;
-
- nf_flow_offload_add(flow_table, flow);
+ __flow_offload_refresh(flow_table, flow);
}
EXPORT_SYMBOL_GPL(flow_offload_refresh);
@@ -435,6 +442,9 @@ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
} else {
flow_offload_del(flow_table, flow);
}
+ } else if (test_and_clear_bit(NF_FLOW_HW_UPDATE, &flow->flags)) {
+ if (!__flow_offload_refresh(flow_table, flow))
+ set_bit(NF_FLOW_HW_UPDATE, &flow->flags);
} else if (test_bit(NF_FLOW_HW, &flow->flags)) {
nf_flow_offload_stats(flow_table, flow);
}
@@ -1036,16 +1036,17 @@ nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
}
-void nf_flow_offload_add(struct nf_flowtable *flowtable,
+bool nf_flow_offload_add(struct nf_flowtable *flowtable,
struct flow_offload *flow)
{
struct flow_offload_work *offload;
offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_REPLACE);
if (!offload)
- return;
+ return false;
flow_offload_queue_work(offload);
+ return true;
}
void nf_flow_offload_del(struct nf_flowtable *flowtable,
Following patches in series need to update flowtable rule several times during its lifetime in order to synchronize hardware offload with actual ct status. However, reusing existing 'refresh' logic in act_ct would cause data path to potentially schedule significant amount of spurious tasks in 'add' workqueue since it is executed per-packet. Instead, introduce a new flow 'update' flag and use it to schedule async flow refresh in flowtable gc which will only be executed once per gc iteration. Signed-off-by: Vlad Buslov <vladbu@nvidia.com> --- include/net/netfilter/nf_flow_table.h | 3 ++- net/netfilter/nf_flow_table_core.c | 20 +++++++++++++++----- net/netfilter/nf_flow_table_offload.c | 5 +++-- 3 files changed, 20 insertions(+), 8 deletions(-)