@@ -37,6 +37,24 @@ static u16 tcf_skbedit_hash(struct tcf_skbedit_params *params,
return netdev_cap_txqueue(skb->dev, queue_mapping);
}
+static void tcf_skbedit_act_txq(struct tcf_skbedit_params *params,
+ struct sk_buff *skb)
+{
+ if (skb->dev->real_num_tx_queues > params->queue_mapping) {
+#ifdef CONFIG_NET_EGRESS
+ netdev_xmit_skip_txqueue(true);
+#endif
+ skb_set_queue_mapping(skb, tcf_skbedit_hash(params, skb));
+ }
+}
+
+static void tcf_skbedit_act_rxq(struct tcf_skbedit_params *params,
+ struct sk_buff *skb)
+{
+ if (skb->dev->real_num_rx_queues > params->queue_mapping)
+ skb_record_rx_queue(skb, params->queue_mapping);
+}
+
static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -71,12 +89,11 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
break;
}
}
- if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
- skb->dev->real_num_tx_queues > params->queue_mapping) {
-#ifdef CONFIG_NET_EGRESS
- netdev_xmit_skip_txqueue(true);
-#endif
- skb_set_queue_mapping(skb, tcf_skbedit_hash(params, skb));
+ if (params->flags & SKBEDIT_F_QUEUE_MAPPING) {
+ if (!skb_at_tc_ingress(skb))
+ tcf_skbedit_act_txq(params, skb);
+ else
+ tcf_skbedit_act_rxq(params, skb);
}
if (params->flags & SKBEDIT_F_MARK) {
skb->mark &= ~params->mask;