struct iwl_tx_queue *txq,
u16 byte_cnt)
{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ struct iwl_trans *trans = trans(priv);
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
int write_ptr = txq->q.write_ptr;
int txq_id = txq->q.id;
u8 sec_ctl = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
+ scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+
WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
struct iwl_tx_queue *txq)
{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ struct iwl_trans *trans = trans(priv);
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id = txq->q.id;
int read_ptr = txq->q.read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
+ scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
if (txq_id != priv->shrd->cmd_queue)
u32 tbl_dw;
u16 scd_q2ratid;
+ struct iwl_trans *trans = trans(priv);
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
- tbl_dw_addr = priv->scd_base_addr +
+ tbl_dw_addr = trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
unsigned long flags;
struct iwl_tid_data *tid_data;
+ struct iwl_trans *trans = trans(priv);
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
if (WARN_ON(sta_id == IWL_INVALID_STATION))
return;
if (WARN_ON(tid >= MAX_TID_COUNT))
iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
/* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(priv, priv->scd_base_addr +
+ iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
sizeof(u32),
((frame_limit <<
static void iwl_trans_pcie_tx_free(struct iwl_priv *priv)
{
int txq_id;
+ struct iwl_trans *trans = trans(priv);
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
/* Tx queues */
if (priv->txq) {
iwlagn_free_dma_ptr(priv, &priv->kw);
- iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
+ iwlagn_free_dma_ptr(priv, &trans_pcie->scd_bc_tbls);
}
/**
{
int ret;
int txq_id, slots_num;
+ struct iwl_trans *trans = trans(priv);
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
goto error;
}
- ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
+ ret = iwlagn_alloc_dma_ptr(priv, &trans_pcie->scd_bc_tbls,
hw_params(priv).scd_bc_tbls_size);
if (ret) {
IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
{
const struct queue_to_fifo_ac *queue_to_fifo;
struct iwl_rxon_context *ctx;
+ struct iwl_trans *trans = trans(priv);
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
u32 a;
unsigned long flags;
int i, chan;
u32 reg_val;
- spin_lock_irqsave(&priv->shrd->lock, flags);
+ spin_lock_irqsave(&trans->shrd->lock, flags);
- priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
+ trans_pcie->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
+ a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
/* reset conext data memory */
- for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
+ for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
a += 4)
iwl_write_targ_mem(priv, a, 0);
/* reset tx status memory */
- for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
+ for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
a += 4)
iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
+ for (; a < trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
a += 4)
iwl_write_targ_mem(priv, a, 0);
iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
+ trans_pcie->scd_bc_tbls.dma >> 10);
/* Enable DMA channel */
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
for (i = 0; i < hw_params(priv).max_txq_num; i++) {
iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
- iwl_write_targ_mem(priv, priv->scd_base_addr +
+ iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(priv, priv->scd_base_addr +
+ iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(i) +
sizeof(u32),
((SCD_WIN_SIZE <<
}
iwl_write_prph(priv, SCD_INTERRUPT_MASK,
- IWL_MASK(0, hw_params(priv).max_txq_num));
+ IWL_MASK(0, hw_params(trans).max_txq_num));
/* Activate all Tx DMA/FIFO channels */
iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));