cxgb4: add adapter hotplug support for ULDs
authorPotnuri Bharat Teja <bharat@chelsio.com>
Thu, 21 May 2020 10:34:29 +0000 (16:04 +0530)
committerDavid S. Miller <davem@davemloft.net>
Fri, 22 May 2020 23:04:01 +0000 (16:04 -0700)
Upon adapter hotplug, cxgb4 registers ULD devices for all the ULDs that
are already loaded, ensuring that ULD's can enumerate the hotplugged
adapter without reloading the ULD.

Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h

index fc1405a..5a41801 100644 (file)
@@ -60,6 +60,7 @@
 
 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
 extern struct list_head adapter_list;
+extern struct list_head uld_list;
 extern struct mutex uld_mutex;
 
 /* Suspend an Ethernet Tx queue with fewer available descriptors than this.
@@ -822,6 +823,13 @@ struct sge_uld_txq_info {
        u16 ntxq;               /* # of egress uld queues */
 };
 
+/* struct to maintain ULD list to reallocate ULD resources on hotplug */
+struct cxgb4_uld_list {
+       struct cxgb4_uld_info uld_info;
+       struct list_head list_node;
+       enum cxgb4_uld uld_type;
+};
+
 enum sge_eosw_state {
        CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */
        CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */
index d05c237..7a0414f 100644 (file)
@@ -180,6 +180,7 @@ static struct dentry *cxgb4_debugfs_root;
 
 LIST_HEAD(adapter_list);
 DEFINE_MUTEX(uld_mutex);
+LIST_HEAD(uld_list);
 
 static int cfg_queues(struct adapter *adap);
 
@@ -6519,11 +6520,8 @@ fw_attach_fail:
        /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
        pdev->needs_freset = 1;
 
-       if (is_uld(adapter)) {
-               mutex_lock(&uld_mutex);
-               list_add_tail(&adapter->list_node, &adapter_list);
-               mutex_unlock(&uld_mutex);
-       }
+       if (is_uld(adapter))
+               cxgb4_uld_enable(adapter);
 
        if (!is_t4(adapter->params.chip))
                cxgb4_ptp_init(adapter);
index e65b523..6b1d3df 100644 (file)
@@ -681,6 +681,74 @@ static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
 }
 #endif
 
+static void cxgb4_uld_alloc_resources(struct adapter *adap,
+                                     enum cxgb4_uld type,
+                                     const struct cxgb4_uld_info *p)
+{
+       int ret = 0;
+
+       if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+           (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+               return;
+       if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
+               return;
+       ret = cfg_queues_uld(adap, type, p);
+       if (ret)
+               goto out;
+       ret = setup_sge_queues_uld(adap, type, p->lro);
+       if (ret)
+               goto free_queues;
+       if (adap->flags & CXGB4_USING_MSIX) {
+               ret = request_msix_queue_irqs_uld(adap, type);
+               if (ret)
+                       goto free_rxq;
+       }
+       if (adap->flags & CXGB4_FULL_INIT_DONE)
+               enable_rx_uld(adap, type);
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+       /* send mbox to enable ktls related settings. */
+       if (type == CXGB4_ULD_CRYPTO &&
+           (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
+               cxgb4_set_ktls_feature(adap, 1);
+#endif
+       if (adap->uld[type].add)
+               goto free_irq;
+       ret = setup_sge_txq_uld(adap, type, p);
+       if (ret)
+               goto free_irq;
+       adap->uld[type] = *p;
+       ret = uld_attach(adap, type);
+       if (ret)
+               goto free_txq;
+       return;
+free_txq:
+       release_sge_txq_uld(adap, type);
+free_irq:
+       if (adap->flags & CXGB4_FULL_INIT_DONE)
+               quiesce_rx_uld(adap, type);
+       if (adap->flags & CXGB4_USING_MSIX)
+               free_msix_queue_irqs_uld(adap, type);
+free_rxq:
+       free_sge_queues_uld(adap, type);
+free_queues:
+       free_queues_uld(adap, type);
+out:
+       dev_warn(adap->pdev_dev,
+                "ULD registration failed for uld type %d\n", type);
+}
+
+void cxgb4_uld_enable(struct adapter *adap)
+{
+       struct cxgb4_uld_list *uld_entry;
+
+       mutex_lock(&uld_mutex);
+       list_add_tail(&adap->list_node, &adapter_list);
+       list_for_each_entry(uld_entry, &uld_list, list_node)
+               cxgb4_uld_alloc_resources(adap, uld_entry->uld_type,
+                                         &uld_entry->uld_info);
+       mutex_unlock(&uld_mutex);
+}
+
 /* cxgb4_register_uld - register an upper-layer driver
  * @type: the ULD type
  * @p: the ULD methods
@@ -691,63 +759,23 @@ static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
 void cxgb4_register_uld(enum cxgb4_uld type,
                        const struct cxgb4_uld_info *p)
 {
+       struct cxgb4_uld_list *uld_entry;
        struct adapter *adap;
-       int ret = 0;
 
        if (type >= CXGB4_ULD_MAX)
                return;
 
+       uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL);
+       if (!uld_entry)
+               return;
+
+       memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info));
        mutex_lock(&uld_mutex);
-       list_for_each_entry(adap, &adapter_list, list_node) {
-               if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
-                   (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
-                       continue;
-               if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
-                       continue;
-               ret = cfg_queues_uld(adap, type, p);
-               if (ret)
-                       goto out;
-               ret = setup_sge_queues_uld(adap, type, p->lro);
-               if (ret)
-                       goto free_queues;
-               if (adap->flags & CXGB4_USING_MSIX) {
-                       ret = request_msix_queue_irqs_uld(adap, type);
-                       if (ret)
-                               goto free_rxq;
-               }
-               if (adap->flags & CXGB4_FULL_INIT_DONE)
-                       enable_rx_uld(adap, type);
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
-               /* send mbox to enable ktls related settings. */
-               if (type == CXGB4_ULD_CRYPTO &&
-                   (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
-                       cxgb4_set_ktls_feature(adap, 1);
-#endif
-               if (adap->uld[type].add)
-                       goto free_irq;
-               ret = setup_sge_txq_uld(adap, type, p);
-               if (ret)
-                       goto free_irq;
-               adap->uld[type] = *p;
-               ret = uld_attach(adap, type);
-               if (ret)
-                       goto free_txq;
-               continue;
-free_txq:
-               release_sge_txq_uld(adap, type);
-free_irq:
-               if (adap->flags & CXGB4_FULL_INIT_DONE)
-                       quiesce_rx_uld(adap, type);
-               if (adap->flags & CXGB4_USING_MSIX)
-                       free_msix_queue_irqs_uld(adap, type);
-free_rxq:
-               free_sge_queues_uld(adap, type);
-free_queues:
-               free_queues_uld(adap, type);
-out:
-               dev_warn(adap->pdev_dev,
-                        "ULD registration failed for uld type %d\n", type);
-       }
+       list_for_each_entry(adap, &adapter_list, list_node)
+               cxgb4_uld_alloc_resources(adap, type, p);
+
+       uld_entry->uld_type = type;
+       list_add_tail(&uld_entry->list_node, &uld_list);
        mutex_unlock(&uld_mutex);
        return;
 }
@@ -761,6 +789,7 @@ EXPORT_SYMBOL(cxgb4_register_uld);
  */
 int cxgb4_unregister_uld(enum cxgb4_uld type)
 {
+       struct cxgb4_uld_list *uld_entry, *tmp;
        struct adapter *adap;
 
        if (type >= CXGB4_ULD_MAX)
@@ -783,6 +812,13 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
                        cxgb4_set_ktls_feature(adap, 0);
 #endif
        }
+
+       list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) {
+               if (uld_entry->uld_type == type) {
+                       list_del(&uld_entry->list_node);
+                       kfree(uld_entry);
+               }
+       }
        mutex_unlock(&uld_mutex);
 
        return 0;
index 1679678..085fa14 100644 (file)
@@ -327,6 +327,7 @@ enum cxgb4_control {
        CXGB4_CONTROL_DB_DROP,
 };
 
+struct adapter;
 struct pci_dev;
 struct l2t_data;
 struct net_device;
@@ -465,6 +466,7 @@ struct cxgb4_uld_info {
        int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
 };
 
+void cxgb4_uld_enable(struct adapter *adap);
 void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
 int cxgb4_unregister_uld(enum cxgb4_uld type);
 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);