ret = iwl_sta_rx_agg_stop(priv, sta, tid);
break;
case IEEE80211_AMPDU_TX_START:
- if (!priv->trans->ops->tx_agg_setup)
+ if (!priv->trans->ops->txq_enable)
break;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
break;
fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
- iwl_trans_tx_agg_setup(priv->trans, q, fifo,
- sta_priv->sta_id, tid,
- buf_size, ssn);
+ iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
+ buf_size, ssn);
/*
* If the limit is 0, then it wasn't initialised yet,
* Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
* Must be atomic
- * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
+ * @txq_enable: setup a tx queue for AMPDU - will be called once the HW is
* ready and a successful ADDBA response has been received.
* May sleep
* @txq_disable: de-configure a Tx queue to send AMPDUs
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs);
- void (*tx_agg_setup)(struct iwl_trans *trans, int queue, int fifo,
- int sta_id, int tid, int frame_limit, u16 ssn);
+ void (*txq_enable)(struct iwl_trans *trans, int queue, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn);
void (*txq_disable)(struct iwl_trans *trans, int queue);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
trans->ops->txq_disable(trans, queue);
}
-static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue,
- int fifo, int sta_id, int tid,
- int frame_limit, u16 ssn)
+static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
+ int fifo, int sta_id, int tid,
+ int frame_limit, u16 ssn)
{
might_sleep();
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"%s bad state = %d", __func__, trans->state);
- trans->ops->tx_agg_setup(trans, queue, fifo, sta_id, tid,
+ trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
frame_limit, ssn);
}
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
int tx_fifo_id, bool active);
-void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
- int sta_id, int tid, int frame_limit, u16 ssn);
+void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
+ int fifo, int sta_id, int tid,
+ int frame_limit, u16 ssn);
+void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn);
void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
enum dma_data_direction dma_dir);
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10);
- iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
- SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
- iwl_write_prph(trans, SCD_AGGR_SEL, 0);
-
- /* initiate the queues */
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
- iwl_trans_set_wr_ptrs(trans, i, 0);
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- ((SCD_WIN_SIZE <<
- SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((SCD_FRAME_LIMIT <<
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- }
-
for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
int fifo = trans_pcie->setup_q_to_fifo[i];
- set_bit(i, trans_pcie->queue_used);
-
- iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
- fifo, true);
+ __iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
+ IWL_TID_NON_QOS,
+ SCD_FRAME_LIMIT, 0);
}
/* Activate all Tx DMA/FIFO channels */
.reclaim = iwl_trans_pcie_reclaim,
.txq_disable = iwl_trans_pcie_txq_disable,
- .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
+ .txq_enable = iwl_trans_pcie_txq_enable,
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
-void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
- int sta_id, int tid, int frame_limit, u16 ssn)
+void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
+ int fifo, int sta_id, int tid,
+ int frame_limit, u16 ssn)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- u16 ra_tid = BUILD_RAxTID(sta_id, tid);
+
+ lockdep_assert_held(&trans_pcie->irq_lock);
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
/* Stop this Tx queue before configuring it */
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
- /* Map receiver-address / traffic-ID to this queue */
- iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
+ /* Set this queue as a chain-building queue unless it is CMD queue */
+ if (txq_id != trans_pcie->cmd_queue)
+ iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
+
+ /* If this queue is mapped to a certain station: it is an AGG queue */
+ if (sta_id != IWL_INVALID_STATION) {
+ u16 ra_tid = BUILD_RAxTID(sta_id, tid);
- /* Set this queue as a chain-building queue */
- iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
+ /* Map receiver-address / traffic-ID to this queue */
+ iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
- /* enable aggregations for the queue */
- iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ /* enable aggregations for the queue */
+ iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ }
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
/* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
+ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
fifo, true);
+}
+
+void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+ __iwl_trans_pcie_txq_enable(trans, txq_id, fifo, sta_id,
+ tid, frame_limit, ssn);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}