struct ath_tx {
u16 seq_no;
u32 txqsetup;
- int hwq_map[ATH9K_WME_AC_VO+1];
+ int hwq_map[WME_NUM_AC];
spinlock_t txbuflock;
struct list_head txbuf;
struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
-int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
-
void ath_start_rfkill_poll(struct ath_softc *sc);
extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
qi.tqi_cwmax = 0;
} else {
/* Adhoc mode; important thing is to use 2x cwmin. */
- qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA,
- ATH9K_WME_AC_BE);
+ qnum = sc->tx.hwq_map[WME_AC_BE];
ath9k_hw_get_txq_props(ah, qnum, &qi_be);
qi.tqi_aifs = qi_be.tqi_aifs;
qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
do { \
len += snprintf(buf + len, size - len, \
"%s%13u%11u%10u%10u\n", str, \
- sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BE]].elem, \
- sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BK]].elem, \
- sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VI]].elem, \
- sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VO]].elem); \
+ sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BE]].elem, \
+ sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BK]].elem, \
+ sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VI]].elem, \
+ sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VO]].elem); \
} while(0)
static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
r = ath_init_btcoex_timer(sc);
if (r)
return -1;
- qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
+ qnum = sc->tx.hwq_map[WME_AC_BE];
ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
break;
sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
+ if (!ath_tx_setup(sc, WME_AC_BK)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for BK traffic\n");
goto err;
}
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
+ if (!ath_tx_setup(sc, WME_AC_BE)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for BE traffic\n");
goto err;
}
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
+ if (!ath_tx_setup(sc, WME_AC_VI)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for VI traffic\n");
goto err;
}
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
+ if (!ath_tx_setup(sc, WME_AC_VO)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for VO traffic\n");
goto err;
#define ATH9K_NUM_TX_QUEUES 10
-enum ath9k_tx_queue_subtype {
- ATH9K_WME_AC_BK = 0,
- ATH9K_WME_AC_BE,
- ATH9K_WME_AC_VI,
- ATH9K_WME_AC_VO,
- ATH9K_WME_UPSD
-};
+/* Used as a queue subtype instead of a WMM AC */
+#define ATH9K_WME_UPSD 4
enum ath9k_tx_queue_flags {
TXQ_FLAG_TXOKINT_ENABLE = 0x0001,
struct ath9k_tx_queue_info {
u32 tqi_ver;
enum ath9k_tx_queue tqi_type;
- enum ath9k_tx_queue_subtype tqi_subtype;
+ int tqi_subtype;
enum ath9k_tx_queue_flags tqi_qflags;
u32 tqi_priority;
u32 tqi_aifs;
switch (queue) {
case 0:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO];
+ qnum = sc->tx.hwq_map[WME_AC_VO];
break;
case 1:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI];
+ qnum = sc->tx.hwq_map[WME_AC_VI];
break;
case 2:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
+ qnum = sc->tx.hwq_map[WME_AC_BE];
break;
case 3:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK];
+ qnum = sc->tx.hwq_map[WME_AC_BK];
break;
default:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
+ qnum = sc->tx.hwq_map[WME_AC_BE];
break;
}
int qnum;
switch (queue) {
- case ATH9K_WME_AC_VO:
+ case WME_AC_VO:
qnum = 0;
break;
- case ATH9K_WME_AC_VI:
+ case WME_AC_VI:
qnum = 1;
break;
- case ATH9K_WME_AC_BE:
+ case WME_AC_BE:
qnum = 2;
break;
- case ATH9K_WME_AC_BK:
+ case WME_AC_BK:
qnum = 3;
break;
default:
ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
- if ((qnum == sc->tx.hwq_map[ATH9K_WME_AC_BE]) && !ret)
+ if ((qnum == sc->tx.hwq_map[WME_AC_BE]) && !ret)
ath_beaconq_config(sc);
mutex_unlock(&sc->mutex);
info->control.rates[1].idx = -1;
memset(&txctl, 0, sizeof(struct ath_tx_control));
- txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
+ txctl.txq = &sc->tx.txq[sc->tx.hwq_map[WME_AC_VO]];
txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
return &sc->tx.txq[qnum];
}
-int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
-{
- int qnum;
-
- switch (qtype) {
- case ATH9K_TX_QUEUE_DATA:
- if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "HAL AC %u out of range, max %zu!\n",
- haltype, ARRAY_SIZE(sc->tx.hwq_map));
- return -1;
- }
- qnum = sc->tx.hwq_map[haltype];
- break;
- case ATH9K_TX_QUEUE_BEACON:
- qnum = sc->beacon.beaconq;
- break;
- case ATH9K_TX_QUEUE_CAB:
- qnum = sc->beacon.cabq->axq_qnum;
- break;
- default:
- qnum = -1;
- }
- return qnum;
-}
-
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *qinfo)
{
for (acno = 0, ac = &an->ac[acno];
acno < WME_NUM_AC; acno++, ac++) {
ac->sched = false;
+ ac->qnum = sc->tx.hwq_map[acno];
INIT_LIST_HEAD(&ac->tid_q);
-
- switch (acno) {
- case WME_AC_BE:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
- break;
- case WME_AC_BK:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
- break;
- case WME_AC_VI:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
- break;
- case WME_AC_VO:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
- break;
- }
}
}