@@ -92,6 +92,7 @@ enum flow_offload_tuple_dir {
enum flow_offload_xmit_type {
FLOW_OFFLOAD_XMIT_NEIGH = 0,
FLOW_OFFLOAD_XMIT_XFRM,
+ FLOW_OFFLOAD_XMIT_DIRECT,
};
struct flow_offload_tuple {
@@ -119,6 +120,8 @@ struct flow_offload_tuple {
u16 mtu;
u32 oifidx;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
struct dst_entry *dst_cache;
};
@@ -167,6 +170,8 @@ struct nf_flow_route {
} in;
struct {
u32 ifindex;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
} out;
struct dst_entry *dst;
} tuple[FLOW_OFFLOAD_DIR_MAX];
@@ -94,6 +94,8 @@ static int flow_offload_fill_route(struct flow_offload *flow,
}
flow_tuple->iifidx = route->tuple[dir].in.ifindex;
+ memcpy(flow_tuple->h_dest, route->tuple[dir].out.h_dest, ETH_ALEN);
+ memcpy(flow_tuple->h_source, route->tuple[dir].out.h_source, ETH_ALEN);
flow_tuple->oifidx = route->tuple[dir].out.ifindex;
if (dst_xfrm(dst))
@@ -247,6 +247,24 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
return NF_STOLEN;
}
+static unsigned int nf_flow_queue_xmit(struct sk_buff *skb,
+ struct net_device *outdev,
+ const struct flow_offload_tuple *tuple)
+{
+ struct ethhdr *eth;
+
+ skb->dev = outdev;
+ skb_push(skb, skb->mac_len);
+ skb_reset_mac_header(skb);
+
+ eth = eth_hdr(skb);
+ memcpy(eth->h_source, tuple->h_source, ETH_ALEN);
+ memcpy(eth->h_dest, tuple->h_dest, ETH_ALEN);
+ dev_queue_xmit(skb);
+
+ return NF_STOLEN;
+}
+
unsigned int
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -325,6 +343,9 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
IPCB(skb)->flags = IPSKB_FORWARDED;
ret = nf_flow_xmit_xfrm(skb, state, &rt->dst);
break;
+ case FLOW_OFFLOAD_XMIT_DIRECT:
+ ret = nf_flow_queue_xmit(skb, outdev, &tuplehash->tuple);
+ break;
default:
WARN_ON_ONCE(1);
ret = NF_DROP;
@@ -581,6 +602,9 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
IP6CB(skb)->flags = IP6SKB_FORWARDED;
ret = nf_flow_xmit_xfrm(skb, state, &rt->dst);
break;
+ case FLOW_OFFLOAD_XMIT_DIRECT:
+ ret = nf_flow_queue_xmit(skb, outdev, &tuplehash->tuple);
+ break;
default:
WARN_ON_ONCE(1);
ret = NF_DROP;
@@ -21,12 +21,11 @@ struct nft_flow_offload {
static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
+ enum ip_conntrack_dir dir, u8 *dst,
struct net_device_path_stack *stack)
{
const struct dst_entry *dst_cache = route->tuple[dir].dst;
const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
- unsigned char ha[ETH_ALEN];
struct net_device *dev;
struct neighbour *n;
struct rtable *rt;
@@ -41,18 +40,20 @@ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
read_lock_bh(&n->lock);
nud_state = n->nud_state;
- ether_addr_copy(ha, n->ha);
+ ether_addr_copy(dst, n->ha);
read_unlock_bh(&n->lock);
neigh_release(n);
if (!(nud_state & NUD_VALID))
return -1;
- return dev_fill_forward_path(dev, ha, stack);
+ return dev_fill_forward_path(dev, dst, stack);
}
struct nft_forward_info {
u32 iifindex;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
};
static int nft_dev_forward_path(struct nf_flow_route *route,
@@ -62,11 +63,12 @@ static int nft_dev_forward_path(struct nf_flow_route *route,
struct net_device_path_stack stack = {};
struct nft_forward_info info = {};
struct net_device_path *path;
+ unsigned char dst[ETH_ALEN];
int i, ret;
memset(&stack, 0, sizeof(stack));
- ret = nft_dev_fill_forward_path(route, ct, dir, &stack);
+ ret = nft_dev_fill_forward_path(route, ct, dir, dst, &stack);
if (ret < 0)
return -1;
@@ -74,6 +76,8 @@ static int nft_dev_forward_path(struct nf_flow_route *route,
path = &stack.path[i];
switch (path->type) {
case DEV_PATH_ETHERNET:
+ memcpy(info.h_dest, path->dev->dev_addr, ETH_ALEN);
+ memcpy(info.h_source, dst, ETH_ALEN);
info.iifindex = path->dev->ifindex;
break;
case DEV_PATH_VLAN:
@@ -84,6 +88,8 @@ static int nft_dev_forward_path(struct nf_flow_route *route,
}
route->tuple[!dir].in.ifindex = info.iifindex;
+ memcpy(route->tuple[dir].out.h_dest, info.h_source, ETH_ALEN);
+ memcpy(route->tuple[dir].out.h_source, info.h_dest, ETH_ALEN);
route->tuple[dir].out.ifindex = info.iifindex;
return 0;
Add FLOW_OFFLOAD_XMIT_DIRECT to turn on the direct dev_queue_xmit() path to transmit ethernet frames. Cache the source and destination hardware address for flow to use dev_queue_xmit() to transfer packets. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> --- include/net/netfilter/nf_flow_table.h | 5 +++++ net/netfilter/nf_flow_table_core.c | 2 ++ net/netfilter/nf_flow_table_ip.c | 24 ++++++++++++++++++++++++ net/netfilter/nft_flow_offload.c | 16 +++++++++++----- 4 files changed, 42 insertions(+), 5 deletions(-)