diff mbox series

[16/18] thunderbolt: Add KUnit tests for DMA tunnels

Message ID 20210304123125.43630-17-mika.westerberg@linux.intel.com (mailing list archive)
State Not Applicable
Delegated to: Netdev Maintainers
Headers show
Series thunderbolt: Align with USB4 inter-domain and DROM specs | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count fail Series longer than 15 patches
netdev/tree_selection success Guessed tree name to be net-next
netdev/subject_prefix success Link
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch warning CHECK: Comparison to NULL could be written "tunnel"
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Mika Westerberg March 4, 2021, 12:31 p.m. UTC
Add a couple of tests to check DMA tunneling functionality.

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
---
 drivers/thunderbolt/test.c | 240 +++++++++++++++++++++++++++++++++++++
 1 file changed, 240 insertions(+)
diff mbox series

Patch

diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index 4e1e7ae2d90d..5ff5a03bc9ce 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -119,6 +119,7 @@  static struct tb_switch *alloc_host(struct kunit *test)
 	sw->ports[7].config.type = TB_TYPE_NHI;
 	sw->ports[7].config.max_in_hop_id = 11;
 	sw->ports[7].config.max_out_hop_id = 11;
+	sw->ports[7].config.nfc_credits = 0x41800000;
 
 	sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
 	sw->ports[8].config.max_in_hop_id = 8;
@@ -1594,6 +1595,240 @@  static void tb_test_tunnel_port_on_path(struct kunit *test)
 	tb_tunnel_free(dp_tunnel);
 }
 
+static void tb_test_tunnel_dma(struct kunit *test)
+{
+	struct tb_port *nhi, *port;
+	struct tb_tunnel *tunnel;
+	struct tb_switch *host;
+
+	/*
+	 * Create DMA tunnel from NHI to port 1 and back.
+	 *
+	 *   [Host 1]
+	 *    1 ^ In HopID 1 -> Out HopID 8
+	 *      |
+	 *      v In HopID 8 -> Out HopID 1
+	 * ............ Domain border
+	 *      |
+	 *   [Host 2]
+	 */
+	host = alloc_host(test);
+	nhi = &host->ports[7];
+	port = &host->ports[1];
+
+	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
+	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+	KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
+	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
+	/* RX path */
+	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
+	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
+	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
+	/* TX path */
+	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
+	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
+	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
+
+	tb_tunnel_free(tunnel);
+}
+
+static void tb_test_tunnel_dma_rx(struct kunit *test)
+{
+	struct tb_port *nhi, *port;
+	struct tb_tunnel *tunnel;
+	struct tb_switch *host;
+
+	/*
+	 * Create DMA RX tunnel from port 1 to NHI.
+	 *
+	 *   [Host 1]
+	 *    1 ^
+	 *      |
+	 *      | In HopID 15 -> Out HopID 2
+	 * ............ Domain border
+	 *      |
+	 *   [Host 2]
+	 */
+	host = alloc_host(test);
+	nhi = &host->ports[7];
+	port = &host->ports[1];
+
+	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
+	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+	KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
+	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)1);
+	/* RX path */
+	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
+	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
+	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
+
+	tb_tunnel_free(tunnel);
+}
+
+static void tb_test_tunnel_dma_tx(struct kunit *test)
+{
+	struct tb_port *nhi, *port;
+	struct tb_tunnel *tunnel;
+	struct tb_switch *host;
+
+	/*
+	 * Create DMA TX tunnel from NHI to port 1.
+	 *
+	 *   [Host 1]
+	 *    1 | In HopID 2 -> Out HopID 15
+	 *      |
+	 *      v
+	 * ............ Domain border
+	 *      |
+	 *   [Host 2]
+	 */
+	host = alloc_host(test);
+	nhi = &host->ports[7];
+	port = &host->ports[1];
+
+	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
+	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+	KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
+	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)1);
+	/* TX path */
+	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
+	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
+	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
+
+	tb_tunnel_free(tunnel);
+}
+
+static void tb_test_tunnel_dma_chain(struct kunit *test)
+{
+	struct tb_switch *host, *dev1, *dev2;
+	struct tb_port *nhi, *port;
+	struct tb_tunnel *tunnel;
+
+	/*
+	 * Create DMA tunnel from NHI to Device #2 port 3 and back.
+	 *
+	 *   [Host 1]
+	 *    1 ^ In HopID 1 -> Out HopID x
+	 *      |
+	 *    1 | In HopID x -> Out HopID 1
+	 *  [Device #1]
+	 *         7 \
+	 *          1 \
+	 *         [Device #2]
+	 *           3 | In HopID x -> Out HopID 8
+	 *             |
+	 *             v In HopID 8 -> Out HopID x
+	 * ............ Domain border
+	 *             |
+	 *          [Host 2]
+	 */
+	host = alloc_host(test);
+	dev1 = alloc_dev_default(test, host, 0x1, true);
+	dev2 = alloc_dev_default(test, dev1, 0x701, true);
+
+	nhi = &host->ports[7];
+	port = &dev2->ports[3];
+	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
+	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+	KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
+	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
+	/* RX path */
+	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
+	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
+			    &dev2->ports[1]);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
+			    &dev1->ports[7]);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
+			    &dev1->ports[1]);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
+			    &host->ports[1]);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
+	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
+	/* TX path */
+	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
+	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
+			    &dev1->ports[1]);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
+			    &dev1->ports[7]);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
+			    &dev2->ports[1]);
+	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
+	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
+
+	tb_tunnel_free(tunnel);
+}
+
+static void tb_test_tunnel_dma_match(struct kunit *test)
+{
+	struct tb_port *nhi, *port;
+	struct tb_tunnel *tunnel;
+	struct tb_switch *host;
+
+	host = alloc_host(test);
+	nhi = &host->ports[7];
+	port = &host->ports[1];
+
+	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
+	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
+	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
+	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
+
+	tb_tunnel_free(tunnel);
+
+	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
+	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
+	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
+	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
+	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
+
+	tb_tunnel_free(tunnel);
+
+	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
+	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
+	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
+	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
+	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
+	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
+
+	tb_tunnel_free(tunnel);
+}
+
 static const u32 root_directory[] = {
 	0x55584401,	/* "UXD" v1 */
 	0x00000018,	/* Root directory length */
@@ -1865,6 +2100,11 @@  static struct kunit_case tb_test_cases[] = {
 	KUNIT_CASE(tb_test_tunnel_dp_max_length),
 	KUNIT_CASE(tb_test_tunnel_port_on_path),
 	KUNIT_CASE(tb_test_tunnel_usb3),
+	KUNIT_CASE(tb_test_tunnel_dma),
+	KUNIT_CASE(tb_test_tunnel_dma_rx),
+	KUNIT_CASE(tb_test_tunnel_dma_tx),
+	KUNIT_CASE(tb_test_tunnel_dma_chain),
+	KUNIT_CASE(tb_test_tunnel_dma_match),
 	KUNIT_CASE(tb_test_property_parse),
 	KUNIT_CASE(tb_test_property_format),
 	KUNIT_CASE(tb_test_property_copy),