sc->tx99_power = MAX_RATE_POWER + 1;
init_waitqueue_head(&sc->tx_wait);
sc->cur_chan = &sc->chanctx[0];
+ if (!ath9k_use_chanctx)
+ sc->cur_chan->hw_queue_base = 0;
if (!pdata || pdata->use_eeprom) {
ah->ah_flags |= AH_USE_EEPROM;
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_RC_TABLE |
+ IEEE80211_HW_QUEUE_CONTROL |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
if (ath9k_ps_enable)
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
- hw->queues = 4;
+ /* allow 4 queues per channel context +
+ * 1 cab queue + 1 offchannel tx queue
+ */
+ hw->queues = 10;
+ /* last queue for offchannel */
+ hw->offchannel_tx_hw_queue = hw->queues - 1;
hw->max_rates = 4;
hw->max_listen_interval = 1;
hw->max_rate_tries = 10;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
unsigned long flags;
+ int i;
if (ath_startrecv(sc) != 0) {
ath_err(common, "Unable to restart recv logic\n");
ath9k_hw_set_interrupts(ah);
ath9k_hw_enable_interrupts(ah);
- ieee80211_wake_queues(sc->hw);
+ if (!ath9k_use_chanctx)
+ ieee80211_wake_queues(sc->hw);
+ else {
+ if (sc->cur_chan == &sc->offchannel.chan)
+ ieee80211_wake_queue(sc->hw,
+ sc->hw->offchannel_tx_hw_queue);
+ else {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ ieee80211_wake_queue(sc->hw,
+ sc->cur_chan->hw_queue_base + i);
+ }
+ if (ah->opmode == NL80211_IFTYPE_AP)
+ ieee80211_wake_queue(sc->hw, sc->hw->queues - 2);
+ }
ath9k_p2p_ps_timer(sc);
struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
struct ath_node *an = &avp->mcast_node;
+ int i;
mutex_lock(&sc->mutex);
avp->chanctx = sc->cur_chan;
list_add_tail(&avp->list, &avp->chanctx->vifs);
}
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ vif->hw_queue[i] = i;
+ if (vif->type == NL80211_IFTYPE_AP)
+ vif->cab_queue = hw->queues - 2;
+ else
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
an->sc = sc;
an->sta = NULL;
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp = (void *)vif->drv_priv;
+ int i;
mutex_lock(&sc->mutex);
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_assign_slot(sc, vif);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ vif->hw_queue[i] = i;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ vif->cab_queue = hw->queues - 2;
+ else
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+
ath9k_calculate_summary_state(sc, avp->chanctx);
mutex_unlock(&sc->mutex);
struct ath_common *common = ath9k_hw_common(ah);
int timeout = HZ / 5; /* 200 ms */
bool drain_txq;
+ int i;
cancel_delayed_work_sync(&sc->tx_complete_work);
ath_reset(sc);
ath9k_ps_restore(sc);
- ieee80211_wake_queues(hw);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ ieee80211_wake_queue(sc->hw,
+ sc->cur_chan->hw_queue_base + i);
+ }
}
ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
{
struct ath_softc *sc = hw->priv;
struct ath_chanctx *ctx, **ptr;
+ int pos;
mutex_lock(&sc->mutex);
ptr = (void *) conf->drv_priv;
*ptr = ctx;
ctx->assigned = true;
+ pos = ctx - &sc->chanctx[0];
+ ctx->hw_queue_base = pos * IEEE80211_NUM_ACS;
ath_chanctx_set_channel(sc, ctx, &conf->def);
mutex_unlock(&sc->mutex);
return 0;
mutex_lock(&sc->mutex);
ctx->assigned = false;
+ ctx->hw_queue_base = -1;
ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN);
mutex_unlock(&sc->mutex);
}
struct ath_softc *sc = hw->priv;
struct ath_vif *avp = (void *)vif->drv_priv;
struct ath_chanctx *ctx = ath_chanctx_get(conf);
+ int i;
mutex_lock(&sc->mutex);
avp->chanctx = ctx;
list_add_tail(&avp->list, &ctx->vifs);
ath9k_calculate_summary_state(sc, ctx);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ vif->hw_queue[i] = ctx->hw_queue_base + i;
mutex_unlock(&sc->mutex);
return 0;
struct ath_softc *sc = hw->priv;
struct ath_vif *avp = (void *)vif->drv_priv;
struct ath_chanctx *ctx = ath_chanctx_get(conf);
+ int ac;
mutex_lock(&sc->mutex);
avp->chanctx = NULL;
list_del(&avp->list);
ath9k_calculate_summary_state(sc, ctx);
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
mutex_unlock(&sc->mutex);
}
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
struct sk_buff *skb)
{
- int q;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int q, hw_queue;
q = skb_get_queue_mapping(skb);
if (txq == sc->tx.uapsdq)
if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0;
+ hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue;
if (txq->stopped &&
txq->pending_frames < sc->tx.txq_max_pending[q]) {
- ieee80211_wake_queue(sc->hw, q);
+ ieee80211_wake_queue(sc->hw, hw_queue);
txq->stopped = false;
}
}
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf;
bool queue;
- int q;
+ int q, hw_queue;
int ret;
if (vif)
*/
q = skb_get_queue_mapping(skb);
+ hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue;
ath_txq_lock(sc, txq);
if (txq == sc->tx.txq_map[q] &&
++txq->pending_frames > sc->tx.txq_max_pending[q] &&
!txq->stopped) {
- ieee80211_stop_queue(sc->hw, q);
+ ieee80211_stop_queue(sc->hw, hw_queue);
txq->stopped = true;
}