if (!trans->txqs.bc_pool)
return NULL;
}
+
+ if (trans->trans_cfg->use_tfh) {
+ trans->txqs.tfd.addr_size = 64;
+ trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
+ trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
+ } else {
+ trans->txqs.tfd.addr_size = 36;
+ trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
+ trans->txqs.tfd.size = sizeof(struct iwl_tfd);
+ }
+ trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
+
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
*/
#define IWL_MAX_CMD_TBS_PER_TFD 2
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
+
/**
* enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
unsigned int wdg_timeout;
} cmd;
+ struct {
+ u8 max_tbs;
+ u16 size;
+ u8 addr_size;
+ } tfd;
};
/**
#include "iwl-op-mode.h"
#include "iwl-drv.h"
-/* We need 2 entries for the TX command and header, and another one might
- * be needed for potential data in the SKB's head. The remaining ones can
- * be used for frags.
- */
-#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
-
/*
* RX related structures and functions
*/
u8 def_rx_queue;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
- u8 max_tbs;
- u16 tfd_size;
u16 num_rx_bufs;
enum iwl_amsdu_size rx_buf_size;
static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
struct iwl_txq *txq, int idx)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
if (trans->trans_cfg->use_tfh)
idx = iwl_pcie_get_cmd_index(txq, idx);
- return txq->tfds + trans_pcie->tfd_size * idx;
+ return txq->tfds + trans->txqs.tfd.size * idx;
}
static inline const char *queue_name(struct device *dev,
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 cmdlen = 0;
int i;
- for (i = 0; i < trans_pcie->max_tbs; i++)
+ for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
return cmdlen;
data = (void *)dump_data->data;
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
- u16 tfd_size = trans_pcie->tfd_size;
+ u16 tfd_size = trans->txqs.tfd.size;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data;
trans_pcie->def_rx_queue = 0;
- if (cfg_trans->use_tfh) {
- addr_size = 64;
- trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
- trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
- } else {
- addr_size = 36;
- trans_pcie->max_tbs = IWL_NUM_OF_TBS;
- trans_pcie->tfd_size = sizeof(struct iwl_tfd);
- }
- trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
-
pci_set_master(pdev);
+ addr_size = trans->txqs.tfd.addr_size;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
if (!ret)
ret = pci_set_consistent_dma_mask(pdev,
struct iwl_cmd_meta *meta,
struct iwl_tfh_tfd *tfd)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i, num_tbs;
/* Sanity check on number of chunks */
num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
- if (num_tbs > trans_pcie->max_tbs) {
+ if (num_tbs > trans->txqs.tfd.max_tbs) {
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
return;
}
struct iwl_tfh_tfd *tfd, dma_addr_t addr,
u16 len)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
struct iwl_tfh_tb *tb;
tb = &tfd->tbs[idx];
/* Each TFD can point to a maximum max_tbs Tx buffers */
- if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
+ if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans_pcie->max_tbs);
+ trans->txqs.tfd.max_tbs);
return -EINVAL;
}
return -EINVAL;
if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
__skb_linearize(skb))
return -ENOMEM;
void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct device *dev = trans->dev;
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- trans_pcie->tfd_size * txq->n_window,
+ trans->txqs.tfd.size * txq->n_window,
txq->tfds, txq->dma_addr);
dma_free_coherent(dev,
sizeof(*txq->first_tb_bufs) * txq->n_window,
struct iwl_cmd_meta *meta,
struct iwl_txq *txq, int index)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i, num_tbs;
void *tfd = iwl_pcie_get_tfd(trans, txq, index);
/* Sanity check on number of chunks */
num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
- if (num_tbs > trans_pcie->max_tbs) {
+ if (num_tbs > trans->txqs.tfd.max_tbs) {
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
/* @todo issue fatal error, it is quite serious situation */
return;
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
dma_addr_t addr, u16 len, bool reset)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
void *tfd;
u32 num_tbs;
- tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
+ tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
if (reset)
- memset(tfd, 0, trans_pcie->tfd_size);
+ memset(tfd, 0, trans->txqs.tfd.size);
num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
/* Each TFD can point to a maximum max_tbs Tx buffers */
- if (num_tbs >= trans_pcie->max_tbs) {
+ if (num_tbs >= trans->txqs.tfd.max_tbs) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans_pcie->max_tbs);
+ trans->txqs.tfd.max_tbs);
return -EINVAL;
}
int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = trans_pcie->tfd_size *
+ size_t tfd_sz = trans->txqs.tfd.size *
trans->trans_cfg->base_params->max_tfd_queue_size;
size_t tb0_buf_sz;
int i;
return -EINVAL;
if (trans->trans_cfg->use_tfh)
- tfd_sz = trans_pcie->tfd_size * slots_num;
+ tfd_sz = trans->txqs.tfd.size * slots_num;
timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
txq->trans = trans;
*/
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
struct device *dev = trans->dev;
int i;
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- trans_pcie->tfd_size *
+ trans->txqs.tfd.size *
trans->trans_cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
- trans_pcie->tfd_size,
+ trans->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
}
if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
__skb_linearize(skb))
return -ENOMEM;
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_pcie_get_tfd(trans, txq,
txq->write_ptr),
- trans_pcie->tfd_size,
+ trans->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);