struct ath12k_dp *dp = &ab->dp;
struct sk_buff *skb;
int i;
+ u32 pool_id, tx_spt_page;
if (!dp->spt_info)
return;
dev_kfree_skb_any(skb);
}
+ for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
+ if (!dp->spt_info->rxbaddr[i])
+ continue;
+
+ kfree(dp->spt_info->rxbaddr[i]);
+ dp->spt_info->rxbaddr[i] = NULL;
+ }
+
spin_unlock_bh(&dp->rx_desc_lock);
/* TX Descriptor cleanup */
spin_unlock_bh(&dp->tx_desc_lock[i]);
}
+ for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
+ spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+
+ for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
+ tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
+ if (!dp->spt_info->txbaddr[tx_spt_page])
+ continue;
+
+ kfree(dp->spt_info->txbaddr[tx_spt_page]);
+ dp->spt_info->txbaddr[tx_spt_page] = NULL;
+ }
+
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+ }
+
/* unmap SPT pages */
for (i = 0; i < dp->num_spt_pages; i++) {
if (!dp->spt_info[i].vaddr)
return -ENOMEM;
}
+ dp->spt_info->rxbaddr[i] = &rx_descs[0];
+
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j);
rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
return -ENOMEM;
}
+ tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
+ dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
+
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
- tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page;
tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
tx_descs[j].pool_id = pool_id;