@@ -1786,7 +1786,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
goto failure;
err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
- NULL);
+ extack);
if (err < 0)
goto failure;
@@ -1858,7 +1858,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* check maximal depth */
if (parent && parent->parent && parent->parent->level < 2) {
- pr_err("htb: tree is too deep\n");
+ NL_SET_ERR_MSG_MOD(extack, "tree is too deep");
goto failure;
}
err = -ENOBUFS;
@@ -1917,8 +1917,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
};
err = htb_offload(dev, &offload_opt);
if (err) {
- pr_err("htb: TC_HTB_LEAF_ALLOC_QUEUE failed with err = %d\n",
- err);
+ NL_SET_ERR_MSG_WEAK(extack,
+ "Failed to offload TC_HTB_LEAF_ALLOC_QUEUE");
goto err_kill_estimator;
}
dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
@@ -1937,8 +1937,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
};
err = htb_offload(dev, &offload_opt);
if (err) {
- pr_err("htb: TC_HTB_LEAF_TO_INNER failed with err = %d\n",
- err);
+ NL_SET_ERR_MSG_WEAK(extack,
+ "Failed to offload TC_HTB_LEAF_TO_INNER");
htb_graft_helper(dev_queue, old_q);
goto err_kill_estimator;
}
@@ -2067,8 +2067,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
qdisc_put(parent_qdisc);
if (warn)
- pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
- cl->common.classid, (warn == -1 ? "small" : "big"));
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "quantum of class %X is %s. Consider r2q change.",
+ cl->common.classid, (warn == -1 ? "small" : "big"));
qdisc_class_hash_grow(sch, &q->clhash);