gve: Batch AQ commands for creating and destroying queues.
authorSagi Shahar <sagis@google.com>
Fri, 11 Sep 2020 17:38:49 +0000 (10:38 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 11 Sep 2020 21:31:54 +0000 (14:31 -0700)
Adds support for batching AQ commands and uses it for creating and
destroying queues.

Reviewed-by: Yangchun Fu <yangchun@google.com>
Signed-off-by: Sagi Shahar <sagis@google.com>
Signed-off-by: David Awogbemila <awogbemila@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/google/gve/gve_adminq.c
drivers/net/ethernet/google/gve/gve_adminq.h
drivers/net/ethernet/google/gve/gve_main.c

index 078e6e4..6f5ccd5 100644 (file)
@@ -135,20 +135,71 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
        }
 }
 
+/* Flushes all AQ commands currently queued and waits for them to complete.
+ * If there are failures, it will return the first error.
+ */
+static int gve_adminq_kick_and_wait(struct gve_priv *priv)
+{
+       u32 tail, head;
+       int i;
+
+       tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+       head = priv->adminq_prod_cnt;
+
+       gve_adminq_kick_cmd(priv, head);
+       if (!gve_adminq_wait_for_cmd(priv, head)) {
+               dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
+               priv->adminq_timeouts++;
+               return -ENOTRECOVERABLE;
+       }
+
+       for (i = tail; i < head; i++) {
+               union gve_adminq_command *cmd;
+               u32 status, err;
+
+               cmd = &priv->adminq[i & priv->adminq_mask];
+               status = be32_to_cpu(READ_ONCE(cmd->status));
+               err = gve_adminq_parse_err(priv, status);
+               if (err)
+                       // Return the first error if we failed.
+                       return err;
+       }
+
+       return 0;
+}
+
 /* This function is not threadsafe - the caller is responsible for any
  * necessary locks.
  */
-int gve_adminq_execute_cmd(struct gve_priv *priv,
-                          union gve_adminq_command *cmd_orig)
+static int gve_adminq_issue_cmd(struct gve_priv *priv,
+                               union gve_adminq_command *cmd_orig)
 {
        union gve_adminq_command *cmd;
-       u32 status = 0;
-       u32 prod_cnt;
        u32 opcode;
+       u32 tail;
+
+       tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+
+       // Check if next command will overflow the buffer.
+       if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
+               int err;
+
+               // Flush existing commands to make room.
+               err = gve_adminq_kick_and_wait(priv);
+               if (err)
+                       return err;
+
+               // Retry.
+               tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+               if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
+                       // This should never happen. We just flushed the
+                       // command queue so there should be enough space.
+                       return -ENOMEM;
+               }
+       }
 
        cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
        priv->adminq_prod_cnt++;
-       prod_cnt = priv->adminq_prod_cnt;
 
        memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
        opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
@@ -191,16 +242,30 @@ int gve_adminq_execute_cmd(struct gve_priv *priv,
                dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
        }
 
-       gve_adminq_kick_cmd(priv, prod_cnt);
-       if (!gve_adminq_wait_for_cmd(priv, prod_cnt)) {
-               dev_err(&priv->pdev->dev, "AQ command timed out, need to reset AQ\n");
-               priv->adminq_timeouts++;
-               return -ENOTRECOVERABLE;
-       }
+       return 0;
+}
 
-       memcpy(cmd_orig, cmd, sizeof(*cmd));
-       status = be32_to_cpu(READ_ONCE(cmd->status));
-       return gve_adminq_parse_err(priv, status);
+/* This function is not threadsafe - the caller is responsible for any
+ * necessary locks.
+ * The caller is also responsible for making sure there are no commands
+ * waiting to be executed.
+ */
+static int gve_adminq_execute_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig)
+{
+       u32 tail, head;
+       int err;
+
+       tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+       head = priv->adminq_prod_cnt;
+       if (tail != head)
+               // This is not a valid path
+               return -EINVAL;
+
+       err = gve_adminq_issue_cmd(priv, cmd_orig);
+       if (err)
+               return err;
+
+       return gve_adminq_kick_and_wait(priv);
 }
 
 /* The device specifies that the management vector can either be the first irq
@@ -245,29 +310,50 @@ int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
        return gve_adminq_execute_cmd(priv, &cmd);
 }
 
-int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
+static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
 {
        struct gve_tx_ring *tx = &priv->tx[queue_index];
        union gve_adminq_command cmd;
+       int err;
 
        memset(&cmd, 0, sizeof(cmd));
        cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
        cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
                .queue_id = cpu_to_be32(queue_index),
                .reserved = 0,
-               .queue_resources_addr = cpu_to_be64(tx->q_resources_bus),
+               .queue_resources_addr =
+                       cpu_to_be64(tx->q_resources_bus),
                .tx_ring_addr = cpu_to_be64(tx->bus),
                .queue_page_list_id = cpu_to_be32(tx->tx_fifo.qpl->id),
                .ntfy_id = cpu_to_be32(tx->ntfy_id),
        };
 
-       return gve_adminq_execute_cmd(priv, &cmd);
+       err = gve_adminq_issue_cmd(priv, &cmd);
+       if (err)
+               return err;
+
+       return 0;
 }
 
-int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < num_queues; i++) {
+               err = gve_adminq_create_tx_queue(priv, i);
+               if (err)
+                       return err;
+       }
+
+       return gve_adminq_kick_and_wait(priv);
+}
+
+static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
 {
        struct gve_rx_ring *rx = &priv->rx[queue_index];
        union gve_adminq_command cmd;
+       int err;
 
        memset(&cmd, 0, sizeof(cmd));
        cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
@@ -282,12 +368,31 @@ int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
                .queue_page_list_id = cpu_to_be32(rx->data.qpl->id),
        };
 
-       return gve_adminq_execute_cmd(priv, &cmd);
+       err = gve_adminq_issue_cmd(priv, &cmd);
+       if (err)
+               return err;
+
+       return 0;
 }
 
-int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
+int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < num_queues; i++) {
+               err = gve_adminq_create_rx_queue(priv, i);
+               if (err)
+                       return err;
+       }
+
+       return gve_adminq_kick_and_wait(priv);
+}
+
+static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
 {
        union gve_adminq_command cmd;
+       int err;
 
        memset(&cmd, 0, sizeof(cmd));
        cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
@@ -295,12 +400,31 @@ int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
                .queue_id = cpu_to_be32(queue_index),
        };
 
-       return gve_adminq_execute_cmd(priv, &cmd);
+       err = gve_adminq_issue_cmd(priv, &cmd);
+       if (err)
+               return err;
+
+       return 0;
 }
 
-int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < num_queues; i++) {
+               err = gve_adminq_destroy_tx_queue(priv, i);
+               if (err)
+                       return err;
+       }
+
+       return gve_adminq_kick_and_wait(priv);
+}
+
+static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
 {
        union gve_adminq_command cmd;
+       int err;
 
        memset(&cmd, 0, sizeof(cmd));
        cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
@@ -308,7 +432,25 @@ int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
                .queue_id = cpu_to_be32(queue_index),
        };
 
-       return gve_adminq_execute_cmd(priv, &cmd);
+       err = gve_adminq_issue_cmd(priv, &cmd);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < num_queues; i++) {
+               err = gve_adminq_destroy_rx_queue(priv, i);
+               if (err)
+                       return err;
+       }
+
+       return gve_adminq_kick_and_wait(priv);
 }
 
 int gve_adminq_describe_device(struct gve_priv *priv)
index a6c8c29..784830f 100644 (file)
@@ -238,8 +238,6 @@ static_assert(sizeof(union gve_adminq_command) == 64);
 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv);
 void gve_adminq_free(struct device *dev, struct gve_priv *priv);
 void gve_adminq_release(struct gve_priv *priv);
-int gve_adminq_execute_cmd(struct gve_priv *priv,
-                          union gve_adminq_command *cmd_orig);
 int gve_adminq_describe_device(struct gve_priv *priv);
 int gve_adminq_configure_device_resources(struct gve_priv *priv,
                                          dma_addr_t counter_array_bus_addr,
@@ -247,10 +245,10 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
                                          dma_addr_t db_array_bus_addr,
                                          u32 num_ntfy_blks);
 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
-int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_id);
-int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_id);
-int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_id);
-int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues);
+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
+int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
 int gve_adminq_register_page_list(struct gve_priv *priv,
                                  struct gve_queue_page_list *qpl);
 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
index 7c5a113..28e5cc5 100644 (file)
@@ -450,36 +450,37 @@ static int gve_create_rings(struct gve_priv *priv)
        int err;
        int i;
 
-       for (i = 0; i < priv->tx_cfg.num_queues; i++) {
-               err = gve_adminq_create_tx_queue(priv, i);
-               if (err) {
-                       netif_err(priv, drv, priv->dev, "failed to create tx queue %d\n",
-                                 i);
-                       /* This failure will trigger a reset - no need to clean
-                        * up
-                        */
-                       return err;
-               }
-               netif_dbg(priv, drv, priv->dev, "created tx queue %d\n", i);
+       err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
+       if (err) {
+               netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
+                         priv->tx_cfg.num_queues);
+               /* This failure will trigger a reset - no need to clean
+                * up
+                */
+               return err;
        }
-       for (i = 0; i < priv->rx_cfg.num_queues; i++) {
-               err = gve_adminq_create_rx_queue(priv, i);
-               if (err) {
-                       netif_err(priv, drv, priv->dev, "failed to create rx queue %d\n",
-                                 i);
-                       /* This failure will trigger a reset - no need to clean
-                        * up
-                        */
-                       return err;
-               }
-               /* Rx data ring has been prefilled with packet buffers at
-                * queue allocation time.
-                * Write the doorbell to provide descriptor slots and packet
-                * buffers to the NIC.
+       netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
+                 priv->tx_cfg.num_queues);
+
+       err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
+       if (err) {
+               netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
+                         priv->rx_cfg.num_queues);
+               /* This failure will trigger a reset - no need to clean
+                * up
                 */
-               gve_rx_write_doorbell(priv, &priv->rx[i]);
-               netif_dbg(priv, drv, priv->dev, "created rx queue %d\n", i);
+               return err;
        }
+       netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
+                 priv->rx_cfg.num_queues);
+
+       /* Rx data ring has been prefilled with packet buffers at queue
+        * allocation time.
+        * Write the doorbell to provide descriptor slots and packet buffers
+        * to the NIC.
+        */
+       for (i = 0; i < priv->rx_cfg.num_queues; i++)
+               gve_rx_write_doorbell(priv, &priv->rx[i]);
 
        return 0;
 }
@@ -537,34 +538,23 @@ free_tx:
 static int gve_destroy_rings(struct gve_priv *priv)
 {
        int err;
-       int i;
 
-       for (i = 0; i < priv->tx_cfg.num_queues; i++) {
-               err = gve_adminq_destroy_tx_queue(priv, i);
-               if (err) {
-                       netif_err(priv, drv, priv->dev,
-                                 "failed to destroy tx queue %d\n",
-                                 i);
-                       /* This failure will trigger a reset - no need to clean
-                        * up
-                        */
-                       return err;
-               }
-               netif_dbg(priv, drv, priv->dev, "destroyed tx queue %d\n", i);
+       err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
+       if (err) {
+               netif_err(priv, drv, priv->dev,
+                         "failed to destroy tx queues\n");
+               /* This failure will trigger a reset - no need to clean up */
+               return err;
        }
-       for (i = 0; i < priv->rx_cfg.num_queues; i++) {
-               err = gve_adminq_destroy_rx_queue(priv, i);
-               if (err) {
-                       netif_err(priv, drv, priv->dev,
-                                 "failed to destroy rx queue %d\n",
-                                 i);
-                       /* This failure will trigger a reset - no need to clean
-                        * up
-                        */
-                       return err;
-               }
-               netif_dbg(priv, drv, priv->dev, "destroyed rx queue %d\n", i);
+       netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
+       err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
+       if (err) {
+               netif_err(priv, drv, priv->dev,
+                         "failed to destroy rx queues\n");
+               /* This failure will trigger a reset - no need to clean up */
+               return err;
        }
+       netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
        return 0;
 }