thunderbolt: Make it possible to allocate one directional DMA tunnel
authorMika Westerberg <mika.westerberg@linux.intel.com>
Wed, 7 Oct 2020 15:17:12 +0000 (18:17 +0300)
committerMika Westerberg <mika.westerberg@linux.intel.com>
Wed, 11 Nov 2020 07:20:16 +0000 (10:20 +0300)
With DMA tunnels it is possible that the service using it does not
require bi-directional paths so make RX and TX optional (but of course
one of them needs to be set).

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Acked-by: Yehezkel Bernat <YehezkelShB@gmail.com>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/thunderbolt/tunnel.c

index 829b6cc..dcdf9c7 100644 (file)
@@ -34,9 +34,6 @@
 #define TB_DP_AUX_PATH_OUT             1
 #define TB_DP_AUX_PATH_IN              2
 
-#define TB_DMA_PATH_OUT                        0
-#define TB_DMA_PATH_IN                 1
-
 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
 
 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
@@ -829,10 +826,10 @@ static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
  * @nhi: Host controller port
  * @dst: Destination null port which the other domain is connected to
  * @transmit_ring: NHI ring number used to send packets towards the
- *                other domain
+ *                other domain. Set to %0 if TX path is not needed.
  * @transmit_path: HopID used for transmitting packets
  * @receive_ring: NHI ring number used to receive packets from the
- *               other domain
+ *               other domain. Set to %0 if RX path is not needed.
  * @reveive_path: HopID used for receiving packets
  *
  * Return: Returns a tb_tunnel on success or NULL on failure.
@@ -843,10 +840,19 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
                                      int receive_path)
 {
        struct tb_tunnel *tunnel;
+       size_t npaths = 0, i = 0;
        struct tb_path *path;
        u32 credits;
 
-       tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
+       if (receive_ring)
+               npaths++;
+       if (transmit_ring)
+               npaths++;
+
+       if (WARN_ON(!npaths))
+               return NULL;
+
+       tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
        if (!tunnel)
                return NULL;
 
@@ -856,22 +862,28 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
 
        credits = tb_dma_credits(nhi);
 
-       path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
-       if (!path) {
-               tb_tunnel_free(tunnel);
-               return NULL;
+       if (receive_ring) {
+               path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
+                                    "DMA RX");
+               if (!path) {
+                       tb_tunnel_free(tunnel);
+                       return NULL;
+               }
+               tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
+                                credits);
+               tunnel->paths[i++] = path;
        }
-       tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
-                        credits);
-       tunnel->paths[TB_DMA_PATH_IN] = path;
 
-       path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
-       if (!path) {
-               tb_tunnel_free(tunnel);
-               return NULL;
+       if (transmit_ring) {
+               path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
+                                    "DMA TX");
+               if (!path) {
+                       tb_tunnel_free(tunnel);
+                       return NULL;
+               }
+               tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
+               tunnel->paths[i++] = path;
        }
-       tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
-       tunnel->paths[TB_DMA_PATH_OUT] = path;
 
        return tunnel;
 }