diff mbox series

[07/10] thunderbolt: Make it possible to allocate one directional DMA tunnel

Message ID 20201104140030.6853-8-mika.westerberg@linux.intel.com (mailing list archive)
State Not Applicable
Delegated to: Netdev Maintainers
Headers show
Series thunderbolt: Add DMA traffic test driver | expand

Commit Message

Mika Westerberg Nov. 4, 2020, 2 p.m. UTC
With DMA tunnels it is possible that the service using it does not
require bi-directional paths so make RX and TX optional (but of course
one of them needs to be set).

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
---
 drivers/thunderbolt/tunnel.c | 50 ++++++++++++++++++++++--------------
 1 file changed, 31 insertions(+), 19 deletions(-)
diff mbox series

Patch

diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 829b6ccdd5d4..dcdf9c7a9cae 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -34,9 +34,6 @@ 
 #define TB_DP_AUX_PATH_OUT		1
 #define TB_DP_AUX_PATH_IN		2
 
-#define TB_DMA_PATH_OUT			0
-#define TB_DMA_PATH_IN			1
-
 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
 
 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
@@ -829,10 +826,10 @@  static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
  * @nhi: Host controller port
  * @dst: Destination null port which the other domain is connected to
  * @transmit_ring: NHI ring number used to send packets towards the
- *		   other domain
+ *		   other domain. Set to %0 if TX path is not needed.
  * @transmit_path: HopID used for transmitting packets
  * @receive_ring: NHI ring number used to receive packets from the
- *		  other domain
+ *		  other domain. Set to %0 if RX path is not needed.
  * @reveive_path: HopID used for receiving packets
  *
  * Return: Returns a tb_tunnel on success or NULL on failure.
@@ -843,10 +840,19 @@  struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
 				      int receive_path)
 {
 	struct tb_tunnel *tunnel;
+	size_t npaths = 0, i = 0;
 	struct tb_path *path;
 	u32 credits;
 
-	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
+	if (receive_ring)
+		npaths++;
+	if (transmit_ring)
+		npaths++;
+
+	if (WARN_ON(!npaths))
+		return NULL;
+
+	tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
 	if (!tunnel)
 		return NULL;
 
@@ -856,22 +862,28 @@  struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
 
 	credits = tb_dma_credits(nhi);
 
-	path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
-	if (!path) {
-		tb_tunnel_free(tunnel);
-		return NULL;
+	if (receive_ring) {
+		path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
+				     "DMA RX");
+		if (!path) {
+			tb_tunnel_free(tunnel);
+			return NULL;
+		}
+		tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
+				 credits);
+		tunnel->paths[i++] = path;
 	}
-	tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
-			 credits);
-	tunnel->paths[TB_DMA_PATH_IN] = path;
 
-	path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
-	if (!path) {
-		tb_tunnel_free(tunnel);
-		return NULL;
+	if (transmit_ring) {
+		path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
+				     "DMA TX");
+		if (!path) {
+			tb_tunnel_free(tunnel);
+			return NULL;
+		}
+		tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
+		tunnel->paths[i++] = path;
 	}
-	tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
-	tunnel->paths[TB_DMA_PATH_OUT] = path;
 
 	return tunnel;
 }