wifi: ath12k: Fix memory leak in rx_desc and tx_desc
authorRajat Soni <quic_rajson@quicinc.com>
Tue, 22 Aug 2023 13:42:23 +0000 (16:42 +0300)
committerKalle Valo <quic_kvalo@quicinc.com>
Wed, 23 Aug 2023 14:17:44 +0000 (17:17 +0300)
Currently when ath12k_dp_cc_desc_init() is called we allocate
memory to rx_descs and tx_descs. In ath12k_dp_cc_cleanup(), during
descriptor cleanup rx_descs and tx_descs memory is not freed.

This is cause of memory leak. These allocated memory should be
freed in ath12k_dp_cc_cleanup.

In ath12k_dp_cc_desc_init(), we can save base address of rx_descs
and tx_descs. In ath12k_dp_cc_cleanup(), we can free rx_descs and
tx_descs memory using their base address.

Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.0.1-00029-QCAHKSWPL_SILICONZ-1

Signed-off-by: Rajat Soni <quic_rajson@quicinc.com>
Signed-off-by: Kalle Valo <quic_kvalo@quicinc.com>
Link: https://lore.kernel.org/r/20230718053510.30894-1-quic_rajson@quicinc.com
drivers/net/wireless/ath/ath12k/dp.c
drivers/net/wireless/ath/ath12k/dp.h

index ae1645d..f933896 100644 (file)
@@ -1129,6 +1129,7 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
        struct ath12k_dp *dp = &ab->dp;
        struct sk_buff *skb;
        int i;
+       u32 pool_id, tx_spt_page;
 
        if (!dp->spt_info)
                return;
@@ -1148,6 +1149,14 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
                dev_kfree_skb_any(skb);
        }
 
+       for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
+               if (!dp->spt_info->rxbaddr[i])
+                       continue;
+
+               kfree(dp->spt_info->rxbaddr[i]);
+               dp->spt_info->rxbaddr[i] = NULL;
+       }
+
        spin_unlock_bh(&dp->rx_desc_lock);
 
        /* TX Descriptor cleanup */
@@ -1170,6 +1179,21 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
                spin_unlock_bh(&dp->tx_desc_lock[i]);
        }
 
+       for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
+               spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+
+               for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
+                       tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
+                       if (!dp->spt_info->txbaddr[tx_spt_page])
+                               continue;
+
+                       kfree(dp->spt_info->txbaddr[tx_spt_page]);
+                       dp->spt_info->txbaddr[tx_spt_page] = NULL;
+               }
+
+               spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+       }
+
        /* unmap SPT pages */
        for (i = 0; i < dp->num_spt_pages; i++) {
                if (!dp->spt_info[i].vaddr)
@@ -1343,6 +1367,8 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
                        return -ENOMEM;
                }
 
+               dp->spt_info->rxbaddr[i] = &rx_descs[0];
+
                for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
                        rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j);
                        rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
@@ -1368,8 +1394,10 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
                                return -ENOMEM;
                        }
 
+                       tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
+                       dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
+
                        for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
-                               tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
                                ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page;
                                tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
                                tx_descs[j].pool_id = pool_id;
index 6e7b728..61f7654 100644 (file)
@@ -289,6 +289,8 @@ struct ath12k_tx_desc_info {
 struct ath12k_spt_info {
        dma_addr_t paddr;
        u64 *vaddr;
+       struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES];
+       struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES];
 };
 
 struct ath12k_reo_queue_ref {