Crypto: chelsio - Fixes a deadlock between rtnl_lock and uld_mutex
authorAyush Sawal <ayush.sawal@chelsio.com>
Mon, 30 Mar 2020 15:18:53 +0000 (20:48 +0530)
committerDavid S. Miller <davem@davemloft.net>
Mon, 30 Mar 2020 17:33:23 +0000 (10:33 -0700)
The locks are taken in this order during driver registration
(uld_mutex), at: cxgb4_register_uld.part.14+0x49/0xd60 [cxgb4]
(rtnl_mutex), at: rtnetlink_rcv_msg+0x2db/0x400
(uld_mutex), at: cxgb_up+0x3a/0x7b0 [cxgb4]
(rtnl_mutex), at: chcr_add_xfrmops+0x83/0xa0 [chcr](stucked here)

To avoid this now the netdev features are updated after the
cxgb4_register_uld function is completed.

Fixes: 6dad4e8ab3ec6 ("chcr: Add support for Inline IPSec").

Signed-off-by: Ayush Sawal <ayush.sawal@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/crypto/chelsio/chcr_core.c
drivers/crypto/chelsio/chcr_ipsec.c

index f149953..dfb53e7 100644 (file)
@@ -33,6 +33,10 @@ static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
 static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
 static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
 
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+static void update_netdev_features(void);
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
        [CPL_FW6_PLD] = cpl_fw6_pld_handler,
 #ifdef CONFIG_CHELSIO_TLS_DEVICE
@@ -202,10 +206,6 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
        }
        u_ctx->lldi = *lld;
        chcr_dev_init(u_ctx);
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
-       if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
-               chcr_add_xfrmops(lld);
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
 
 #ifdef CONFIG_CHELSIO_TLS_DEVICE
        if (lld->ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
@@ -297,6 +297,24 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
        return ret;
 }
 
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+static void update_netdev_features(void)
+{
+       struct uld_ctx *u_ctx, *tmp;
+
+       mutex_lock(&drv_data.drv_mutex);
+       list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
+               if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
+                       chcr_add_xfrmops(&u_ctx->lldi);
+       }
+       list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
+               if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
+                       chcr_add_xfrmops(&u_ctx->lldi);
+       }
+       mutex_unlock(&drv_data.drv_mutex);
+}
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
 static int __init chcr_crypto_init(void)
 {
        INIT_LIST_HEAD(&drv_data.act_dev);
@@ -306,6 +324,12 @@ static int __init chcr_crypto_init(void)
        drv_data.last_dev = NULL;
        cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
 
+       #ifdef CONFIG_CHELSIO_IPSEC_INLINE
+       rtnl_lock();
+       update_netdev_features();
+       rtnl_unlock();
+       #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
        return 0;
 }
 
index 9da0f93..9fd3b9d 100644 (file)
@@ -99,9 +99,7 @@ void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
                netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
                netdev->hw_enc_features |= NETIF_F_HW_ESP;
                netdev->features |= NETIF_F_HW_ESP;
-               rtnl_lock();
                netdev_change_features(netdev);
-               rtnl_unlock();
        }
 }