iwlwifi: pcie: map only used part of RX buffers
authorJohannes Berg <johannes.berg@intel.com>
Wed, 2 Oct 2019 09:33:46 +0000 (11:33 +0200)
committerLuca Coelho <luciano.coelho@intel.com>
Mon, 23 Dec 2019 09:54:31 +0000 (11:54 +0200)
We don't need to map *everything* of the RX buffers, we won't use
that much, map only the part we're going to use. This save some
IOMMU space (if applicable and it can deal with that) and also
prepares a bit for mapping partial pages for 2K buffers later.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c

index 8cadad7..872cda2 100644 (file)
@@ -358,6 +358,24 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
        }
 }
 
+static inline int
+iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
+{
+       switch (rb_size) {
+       case IWL_AMSDU_2K:
+               return 2 * 1024;
+       case IWL_AMSDU_4K:
+               return 4 * 1024;
+       case IWL_AMSDU_8K:
+               return 8 * 1024;
+       case IWL_AMSDU_12K:
+               return 12 * 1024;
+       default:
+               WARN_ON(1);
+               return 0;
+       }
+}
+
 struct iwl_hcmd_names {
        u8 cmd_id;
        const char *const cmd_name;
index c45df9a..c7d094c 100644 (file)
@@ -491,6 +491,7 @@ struct cont_rec {
  * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
  *     frame.
  * @rx_page_order: page order for receive buffer size
+ * @rx_buf_bytes: RX buffer (RB) size in bytes
  * @reg_lock: protect hw register access
  * @mutex: to protect stop_device / start_fw / start_hw
  * @cmd_in_flight: true when we have a host command in flight
@@ -581,6 +582,7 @@ struct iwl_trans_pcie {
        bool sw_csum_tx;
        bool pcie_dbg_dumped_once;
        u32 rx_page_order;
+       u32 rx_buf_bytes;
 
        /*protect hw register */
        spinlock_t reg_lock;
index f03d3cf..20f9338 100644 (file)
@@ -485,7 +485,7 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
                /* Get physical address of the RB */
                rxb->page_dma =
                        dma_map_page(trans->dev, page, 0,
-                                    PAGE_SIZE << trans_pcie->rx_page_order,
+                                    trans_pcie->rx_buf_bytes,
                                     DMA_FROM_DEVICE);
                if (dma_mapping_error(trans->dev, rxb->page_dma)) {
                        rxb->page = NULL;
@@ -514,8 +514,7 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
                if (!trans_pcie->rx_pool[i].page)
                        continue;
                dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
-                              PAGE_SIZE << trans_pcie->rx_page_order,
-                              DMA_FROM_DEVICE);
+                              trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
                __free_pages(trans_pcie->rx_pool[i].page,
                             trans_pcie->rx_page_order);
                trans_pcie->rx_pool[i].page = NULL;
@@ -575,8 +574,8 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
 
                        /* Get physical address of the RB */
                        rxb->page_dma = dma_map_page(trans->dev, page, 0,
-                                       PAGE_SIZE << trans_pcie->rx_page_order,
-                                       DMA_FROM_DEVICE);
+                                                    trans_pcie->rx_buf_bytes,
+                                                    DMA_FROM_DEVICE);
                        if (dma_mapping_error(trans->dev, rxb->page_dma)) {
                                rxb->page = NULL;
                                __free_pages(page, trans_pcie->rx_page_order);
@@ -1248,7 +1247,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
        bool page_stolen = false;
-       int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+       int max_len = trans_pcie->rx_buf_bytes;
        u32 offset = 0;
 
        if (WARN_ON(!rxb))
@@ -1369,7 +1368,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
        if (rxb->page != NULL) {
                rxb->page_dma =
                        dma_map_page(trans->dev, rxb->page, 0,
-                                    PAGE_SIZE << trans_pcie->rx_page_order,
+                                    trans_pcie->rx_buf_bytes,
                                     DMA_FROM_DEVICE);
                if (dma_mapping_error(trans->dev, rxb->page_dma)) {
                        /*
index af9bc6b..d7617eb 100644 (file)
@@ -1915,6 +1915,8 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
        trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
        trans_pcie->rx_page_order =
                iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
+       trans_pcie->rx_buf_bytes =
+               iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
 
        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
        trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -2933,7 +2935,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
                                   int allocated_rb_nums)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+       int max_len = trans_pcie->rx_buf_bytes;
        /* Dump RBs is supported only for pre-9000 devices (1 queue) */
        struct iwl_rxq *rxq = &trans_pcie->rxq[0];
        u32 i, r, j, rb_len = 0;