@@ -17,7 +17,7 @@
#
ALL_TESTS="mdb_add_del_test mdb_compat_fwd_test mdb_mac_fwd_test mdb_ip4_fwd_test mdb_ip6_fwd_test"
-NUM_NETIFS=4
+NUM_NETIFS=6
SRC_PORT="1234"
DST_PORT="4321"
@@ -70,6 +70,16 @@ h2_destroy()
simple_if_fini $h2 192.0.2.2/24 2001:db8:1::2/64
}
+h3_create()
+{
+ simple_if_init $h3 192.0.2.3/24 2001:db8:1::3/64
+}
+
+h3_destroy()
+{
+ simple_if_fini $h3 192.0.2.3/24 2001:db8:1::3/64
+}
+
switch_create()
{
# Enable multicast filtering w/ querier, reduce query response
@@ -88,10 +98,12 @@ switch_create()
ip link set dev $swp1 master br0
ip link set dev $swp2 master br0
+ ip link set dev $swp3 master br0
ip link set dev br0 up
ip link set dev $swp1 up
ip link set dev $swp2 up
+ ip link set dev $swp3 up
# Initial delay, when bridge floods all mcast, is set to 200
# above (2 sec.) We wait 3 sec to handle the case when a single
@@ -102,6 +114,7 @@ switch_create()
switch_destroy()
{
+ ip link set dev $swp3 down
ip link set dev $swp2 down
ip link set dev $swp1 down
@@ -116,6 +129,9 @@ setup_prepare()
swp2=${NETIFS[p3]}
h2=${NETIFS[p4]}
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
# Disable all IPv6 autoconfiguration during test, we want
# full control of when MLD queries start etc.
sysctl_set net.ipv6.conf.default.accept_ra 0
@@ -123,6 +139,7 @@ setup_prepare()
h1_create
h2_create
+ h3_create
switch_create
}
@@ -133,6 +150,7 @@ cleanup()
switch_destroy
+ h3_destroy
h2_destroy
h1_destroy
@@ -249,11 +267,15 @@ do_mdb_fwd()
fi
if [ "$port" = "$h2" ]; then
swp=$swp2
+ nop="$h3"
+ else
+ nop="$h2"
fi
# Disable flooding of unknown multicast, strict MDB forwarding
bridge link set dev "$swp1" mcast_flood off
bridge link set dev "$swp2" mcast_flood off
+ bridge link set dev "$swp3" mcast_flood off
bridge link set dev "br0" mcast_flood off self
# Static filter only to this port
@@ -261,6 +283,7 @@ do_mdb_fwd()
check_err $? "Failed adding $type multicast group $pass_grp to bridge port $swp"
tcpdump_start "$port"
+ tcpdump_start "$nop"
# Real data we're expecting
$MZ -q "$h1" "$pass_pkt"
@@ -268,6 +291,7 @@ do_mdb_fwd()
$MZ -q "$h1" "$fail_pkt"
sleep 1
+ tcpdump_stop "$nop"
tcpdump_stop "$port"
tcpdump_show "$port" |grep -q "$src$spt > $pass_grp$dpt"
@@ -276,6 +300,12 @@ do_mdb_fwd()
tcpdump_show "$port" |grep -q "$src$spt > $fail_grp$dpt"
check_err_fail 1 $? "Received $type multicast group $fail_grp from $h1 to port $port"
+ # Verify we don't get multicast to the canary port
+ tcpdump_show "$nop" |grep -q "$src$spt > $pass_grp$dpt"
+ check_err_fail 1 $? "Received $type multicast group $pass_grp from $h1 to port $nop"
+ tcpdump_show "$nop" |grep -q "$src$spt > $fail_grp$dpt"
+ check_err_fail 1 $? "Received $type multicast group $fail_grp from $h1 to port $nop"
+
bridge mdb del dev br0 port "$swp" grp "$pass_grp"
log_test "MDB forward $type multicast to bridge port $port"
Add a another veth pair to get a dedicated canary port. This to help us verify that multicast is received by only a subset of the ports in the MDB filter. Signed-off-by: Joachim Wiberg <troglobit@gmail.com> --- .../selftests/net/forwarding/bridge_mdb.sh | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-)