@@ -199,8 +199,10 @@ from lib.py import ethtool, ip, defer, GenerateTraffic, CmdExitFailure
GenerateTraffic(cfg, port=ports[i]).wait_pkts_and_stop(20000)
cnts = _get_rx_cnts(cfg, prev=cnts)
- ksft_lt(sum(cnts[ :2]), 10000, "traffic on main context:" + str(cnts))
- ksft_ge(sum(cnts[2+i*2:4+i*2]), 20000, f"traffic on context {i}: " + str(cnts))
+ directed = sum(cnts[2+i*2:4+i*2])
+
+ ksft_lt(sum(cnts[ :2]), directed / 2, "traffic on main context:" + str(cnts))
+ ksft_ge(directed, 20000, f"traffic on context {i}: " + str(cnts))
ksft_eq(sum(cnts[2:2+i*2] + cnts[4+i*2:]), 0, "traffic on other contexts: " + str(cnts))
if requested_ctx_cnt != ctx_cnt:
@@ -258,8 +260,9 @@ from lib.py import ethtool, ip, defer, GenerateTraffic, CmdExitFailure
cnts = _get_rx_cnts(cfg, prev=cnts)
if ctx[i]:
- ksft_lt(sum(cnts[ :2]), 10000, "traffic on main context:" + str(cnts))
- ksft_ge(sum(cnts[2+i*2:4+i*2]), 20000, f"traffic on context {i}: " + str(cnts))
+ directed = sum(cnts[2+i*2:4+i*2])
+ ksft_lt(sum(cnts[ :2]), directed / 2, "traffic on main context:" + str(cnts))
+ ksft_ge(directed, 20000, f"traffic on context {i}: " + str(cnts))
ksft_eq(sum(cnts[2:2+i*2] + cnts[4+i*2:]), 0, "traffic on other contexts: " + str(cnts))
else:
ksft_ge(sum(cnts[ :2]), 20000, "traffic on main context:" + str(cnts))
@@ -353,8 +356,9 @@ from lib.py import ethtool, ip, defer, GenerateTraffic, CmdExitFailure
GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000)
cnts = _get_rx_cnts(cfg, prev=cnts)
- ksft_lt(sum(cnts[ :2]), 7000, "traffic on main context: " + str(cnts))
- ksft_ge(sum(cnts[2:4]), 20000, "traffic on extra context: " + str(cnts))
+ directed = sum(cnts[2:4])
+ ksft_lt(sum(cnts[ :2]), directed / 2, "traffic on main context: " + str(cnts))
+ ksft_ge(directed, 20000, "traffic on extra context: " + str(cnts))
if other_ctx == 0:
ksft_eq(sum(cnts[4: ]), 0, "traffic on other queues: " + str(cnts))
As predicted by David running the test on a machine with a single interface is a bit unreliable. We try to send 20k packets with iperf and expect fewer than 10k packets on the default context. The test isn't very quick, iperf will usually send 100k packets by the time we stop it. So we're off by 5x on the number of iperf packets but still expect default context to only get the hardcoded 10k. The intent is to make sure we get noticeably less traffic on the default context. Use half of the resulting iperf traffic instead of the hard coded 10k. Signed-off-by: Jakub Kicinski <kuba@kernel.org> --- CC: petrm@nvidia.com CC: willemb@google.com --- .../testing/selftests/drivers/net/hw/rss_ctx.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-)