* iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
* this one. The op_mode must not configure the HCMD queue. The scheduler
* configuration may be %NULL, in which case the hardware will not be
- * configured. May sleep.
+ * configured. If true is returned, the operation mode needs to increment
+ * the sequence number of the packets routed to this queue because of a
+ * hardware scheduler bug. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
* @txq_set_shared_mode: change Tx queue shared/unshared marking
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs);
- void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
+ bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int queue_wdg_timeout);
void (*txq_disable)(struct iwl_trans *trans, int queue,
trans->ops->txq_disable(trans, queue, configure_scd);
}
-static inline void
+static inline bool
iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int queue_wdg_timeout)
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
+ return false;
}
- trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
+ return trans->ops->txq_enable(trans, queue, ssn,
+ cfg, queue_wdg_timeout);
}
static inline void
}
/* hw scheduler queue config */
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout);
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
bool using_inactive_queue = false, same_sta = false;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
- bool shared_queue = false;
+ bool shared_queue = false, inc_ssn;
int ssn;
unsigned long tfd_queue_mask;
int ret;
}
ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
- wdg_timeout);
+ inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
+ ssn, &cfg, wdg_timeout);
+ if (inc_ssn) {
+ ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
+ le16_add_cpu(&hdr->seq_ctrl, 0x10);
+ }
/*
* Mark queue as shared in transport if shared
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
spin_lock_bh(&mvmsta->lock);
+ /*
+ * This looks racy, but it is not. We have only one packet for
+ * this ra/tid in our Tx path since we stop the Qdisc when we
+ * need to allocate a new TFD queue.
+ */
+ if (inc_ssn)
+ mvmsta->tid_data[tid].seq_number += 0x10;
mvmsta->tid_data[tid].txq_id = queue;
mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue);
return queue;
}
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_ENABLE_QUEUE,
+ .window = cfg->frame_limit,
+ .sta_id = cfg->sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = cfg->fifo,
+ .aggregate = cfg->aggregate,
+ .tid = cfg->tid,
+ };
+ bool inc_ssn;
+
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return;
+ return false;
/* Send the enabling command if we need to */
- if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
- cfg->sta_id, cfg->tid)) {
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_ENABLE_QUEUE,
- .window = cfg->frame_limit,
- .sta_id = cfg->sta_id,
- .ssn = cpu_to_le16(ssn),
- .tx_fifo = cfg->fifo,
- .aggregate = cfg->aggregate,
- .tid = cfg->tid,
- };
-
- iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
- wdg_timeout);
- WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
- sizeof(struct iwl_scd_txq_cfg_cmd),
- &cmd),
- "Failed to configure queue %d on FIFO %d\n", queue,
- cfg->fifo);
- }
+ if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
+ cfg->sta_id, cfg->tid))
+ return false;
+
+ inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
+ NULL, wdg_timeout);
+ if (inc_ssn)
+ le16_add_cpu(&cmd.ssn, 1);
+
+ WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
+ "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+
+ return inc_ssn;
}
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
-void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
+bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
* combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
-void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
+bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
int fifo = -1;
+ bool scd_bug = false;
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
ssn = txq->read_ptr;
}
+ } else {
+ /*
+ * If we need to move the SCD write pointer by steps of
+ * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
+ * the op_mode know by returning true later.
+ * Do this only in case cfg is NULL since this trick can
+ * be done only if we have DQA enabled which is true for mvm
+ * only. And mvm never sets a cfg pointer.
+ * This is really ugly, but this is the easiest way out for
+ * this sad hardware issue.
+ * This bug has been fixed on devices 9000 and up.
+ */
+ scd_bug = !trans->cfg->mq_rx_supported &&
+ !((ssn - txq->write_ptr) & 0x3f) &&
+ (ssn != txq->write_ptr);
+ if (scd_bug)
+ ssn++;
}
/* Place first TFD at index corresponding to start sequence number.
"Activate queue %d WrPtr: %d\n",
txq_id, ssn & 0xff);
}
+
+ return scd_bug;
}
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,