@@ -738,6 +738,7 @@ enum {
TCA_MQPRIO_SHAPER,
TCA_MQPRIO_MIN_RATE64,
TCA_MQPRIO_MAX_RATE64,
+ TCA_MQPRIO_PREEMPT_TCS,
__TCA_MQPRIO_MAX,
};
@@ -23,6 +23,7 @@ struct mqprio_sched {
u16 shaper;
int hw_offload;
u32 flags;
+ u32 preemptible_tcs;
u64 min_rate[TC_QOPT_MAX_QUEUE];
u64 max_rate[TC_QOPT_MAX_QUEUE];
};
@@ -33,6 +34,13 @@ static void mqprio_destroy(struct Qdisc *sch)
struct mqprio_sched *priv = qdisc_priv(sch);
unsigned int ntx;
+ if (priv->preemptible_tcs && dev->netdev_ops->ndo_setup_tc) {
+ struct tc_preempt_qopt_offload preempt = { };
+
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_PREEMPT,
+ &preempt);
+ }
+
if (priv->qdiscs) {
for (ntx = 0;
ntx < dev->num_tx_queues && priv->qdiscs[ntx];
@@ -112,6 +120,7 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
[TCA_MQPRIO_MODE] = { .len = sizeof(u16) },
[TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) },
+ [TCA_MQPRIO_PREEMPT_TCS] = { .type = NLA_U32 },
[TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED },
[TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED },
};
@@ -171,8 +180,17 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
if (err < 0)
return err;
- if (!qopt->hw)
- return -EINVAL;
+ if (tb[TCA_MQPRIO_PREEMPT_TCS]) {
+ u32 preempt = nla_get_u32(tb[TCA_MQPRIO_PREEMPT_TCS]);
+ u32 all_tcs_mask = GENMASK(qopt->num_tc, 0);
+
+ if ((preempt & all_tcs_mask) == all_tcs_mask) {
+ NL_SET_ERR_MSG(extack, "At least one traffic class must be not be preemptible");
+ return -EINVAL;
+ }
+
+ priv->preemptible_tcs = preempt;
+ }
if (tb[TCA_MQPRIO_MODE]) {
priv->flags |= TC_MQPRIO_F_MODE;
@@ -217,6 +235,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
}
}
+ if (!qopt->hw && priv->flags)
+ return -EINVAL;
+
/* pre-allocate qdisc, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL);
@@ -282,6 +303,18 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
for (i = 0; i < TC_BITMASK + 1; i++)
netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
+ if (priv->preemptible_tcs) {
+ struct tc_preempt_qopt_offload preempt = { };
+
+ preempt.preemptible_queues =
+ netdev_tc_map_to_queue_mask(dev, priv->preemptible_tcs);
+
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_PREEMPT,
+ &preempt);
+ if (err)
+ return err;
+ }
+
sch->flags |= TCQ_F_MQROOT;
return 0;
}
@@ -450,6 +483,10 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
(dump_rates(priv, &opt, skb) != 0))
goto nla_put_failure;
+ if (priv->preemptible_tcs &&
+ nla_put_u32(skb, TCA_MQPRIO_PREEMPT_TCS, priv->preemptible_tcs))
+ goto nla_put_failure;
+
return nla_nest_end(skb, nla);
nla_put_failure:
nlmsg_trim(skb, nla);
Adds a way to configure which traffic classes are marked as preemptible and which are marked as express. Even if frame preemption is not a "real" offload, because it can't be executed purely in software, having this information near where the mapping of traffic classes to queues is specified, makes it, hopefully, easier to use. mqprio will receive the information of which traffic classes are marked as express/preemptible, and when offloading frame preemption to the driver will convert the information, so the driver receives which queues are marked as express/preemptible. Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com> --- include/uapi/linux/pkt_sched.h | 1 + net/sched/sch_mqprio.c | 41 ++++++++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 2 deletions(-)