The queues and all the related logic suits to the transport layer.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
#include "iwl-agn.h"
#include "iwl-trans.h"
-static inline int get_ac_from_tid(u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return tid_to_ac[tid];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
- int tid)
-{
- if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWLAGN_FIRST_AMPDU_QUEUE +
- hw_params(priv).num_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
- IWLAGN_FIRST_AMPDU_QUEUE +
- hw_params(priv).num_ampdu_queues - 1);
- return -EINVAL;
- }
-
- /* Modify device's station table to Tx this TID */
- return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
-}
-
static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
struct ieee80211_tx_info *info,
__le16 fc, __le32 *tx_flags)
return -1;
}
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
- */
-static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++)
- if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
- return txq_id;
- return -1;
-}
-
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
int sta_id;
- int txq_id;
int ret;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
sta->addr, tid);
return -ENXIO;
}
- txq_id = iwlagn_txq_ctx_activate_free(priv);
- if (txq_id == -1) {
- IWL_ERR(priv, "No free aggregation queue available\n");
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- tid_data = &priv->shrd->tid_data[sta_id][tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
- iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-
- ret = iwlagn_txq_agg_enable(priv, txq_id, sta_id, tid);
+ ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
if (ret)
return ret;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- tid_data = &priv->shrd->tid_data[sta_id][tid];
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- } else {
- IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
- tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ ret = iwl_trans_tx_agg_alloc(trans(priv), vif_priv->ctx->ctxid, sta_id,
+ tid, ssn);
+
return ret;
}
return cpu_to_le32(res);
}
+void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv, u8 ctx,
+ u8 sta_id, u8 tid)
+{
+ struct ieee80211_vif *vif = priv->contexts[ctx].vif;
+ u8 *addr = priv->stations[sta_id].sta.sta.addr;
+
+ ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
+}
IEEE80211_AC_VO
};
+static inline int get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
enum iwl_rxon_context_id {
IWL_RXON_CTX_BSS,
IWL_RXON_CTX_PAN,
struct iwl_cfg *cfg);
void __devexit iwl_remove(struct iwl_priv * priv);
+void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv, u8 ctx,
+ u8 sta_id, u8 tid);
+
/*****************************************************
* DRIVER STATUS FUNCTIONS
******************************************************/
void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
int tx_fifo_id, int scd_retry);
+int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid, u16 *ssn);
void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv,
enum iwl_rxon_context_id ctx,
int sta_id, int tid, int frame_limit);
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <net/mac80211.h>
#include "iwl-agn.h"
#include "iwl-dev.h"
spin_unlock_irqrestore(&priv->shrd->lock, flags);
}
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
+{
+ int txq_id;
+
+ for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
+ if (!test_and_set_bit(txq_id,
+ &priv(trans)->txq_ctx_active_msk))
+ return txq_id;
+ return -1;
+}
+
+int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id,
+ int tid, u16 *ssn)
+{
+ struct iwl_tid_data *tid_data;
+ unsigned long flags;
+ u16 txq_id;
+ struct iwl_priv *priv = priv(trans);
+
+ txq_id = iwlagn_txq_ctx_activate_free(trans);
+ if (txq_id == -1) {
+ IWL_ERR(trans, "No free aggregation queue available\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.txq_id = txq_id;
+ iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
+
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(trans, "HW queue is empty\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
+ } else {
+ IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
+ "queue\n", tid_data->tfds_in_queue);
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+ return 0;
+}
int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id)
{
struct iwl_trans *trans = trans(priv);
.reclaim = iwl_trans_pcie_reclaim,
.txq_agg_disable = iwl_trans_pcie_txq_agg_disable,
+ .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
.txq_agg_setup = iwl_trans_pcie_txq_agg_setup,
.kick_nic = iwl_trans_pcie_kick_nic,
* @send_cmd_pdu:send a host command: flags can be CMD_*
* @tx: send an skb
* @reclaim: free packet until ssn. Returns a list of freed packets.
+ * @tx_agg_alloc: allocate resources for a TX BA session
* @txq_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
* ready and a successful ADDBA response has been received.
* @txq_agg_disable: de-configure a Tx queue to send AMPDUs
u32 status, struct sk_buff_head *skbs);
int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id);
+ int (*tx_agg_alloc)(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx, int sta_id, int tid,
+ u16 *ssn);
void (*txq_agg_setup)(struct iwl_priv *priv,
enum iwl_rxon_context_id ctx, int sta_id,
int tid, int frame_limit);
return trans->ops->txq_agg_disable(priv(trans), txq_id);
}
+static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
+ enum iwl_rxon_context_id ctx,
+ int sta_id, int tid, u16 *ssn)
+{
+ return trans->ops->tx_agg_alloc(trans, ctx, sta_id, tid, ssn);
+}
+
+
static inline void iwl_trans_txq_agg_setup(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx,
int sta_id, int tid,