#include "util.h"
#include "testmode.h"
-#define MT_MCU_RING_SIZE 32
-#define MT_RX_BUF_SIZE 2048
-#define MT_SKB_HEAD_LEN 128
+#define MT_MCU_RING_SIZE 32
+#define MT_RX_BUF_SIZE 2048
+#define MT_SKB_HEAD_LEN 128
-#define MT_MAX_NON_AQL_PKT 16
-#define MT_TXQ_FREE_THR 32
+#define MT_MAX_NON_AQL_PKT 16
+#define MT_TXQ_FREE_THR 32
+
+#define MT76_TOKEN_FREE_THR 64
struct mt76_dev;
struct mt76_phy;
u32 drv_flags;
u32 survey_flags;
u16 txwi_size;
+ u16 token_size;
u8 mcs_rates;
void (*update_survey)(struct mt76_dev *dev);
struct mt76_power_limits *dest,
s8 target_power);
+struct mt76_txwi_cache *
+mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
+int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
+void mt76_token_init(struct mt76_dev *dev);
+void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
+
+static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
+{
+ spin_lock_bh(&dev->token_lock);
+ __mt76_set_tx_blocked(dev, blocked);
+ spin_unlock_bh(&dev->token_lock);
+}
+
+static inline int
+mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
+{
+ int token;
+
+ spin_lock_bh(&dev->token_lock);
+ token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
+ GFP_ATOMIC);
+ spin_unlock_bh(&dev->token_lock);
+
+ return token;
+}
+
+static inline struct mt76_txwi_cache *
+mt76_token_put(struct mt76_dev *dev, int token)
+{
+ struct mt76_txwi_cache *txwi;
+
+ spin_lock_bh(&dev->token_lock);
+ txwi = idr_remove(&dev->token, token);
+ spin_unlock_bh(&dev->token_lock);
+
+ return txwi;
+}
#endif
u8 wcid;
trace_mac_tx_free(dev, token);
-
- spin_lock_bh(&mdev->token_lock);
- txwi = idr_remove(&mdev->token, token);
- spin_unlock_bh(&mdev->token_lock);
-
+ txwi = mt76_token_put(mdev, token);
if (!txwi)
return;
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7615_TOKEN_SIZE,
.tx_prepare_skb = mt7615_tx_prepare_skb,
.tx_complete_skb = mt7615_tx_complete_skb,
.rx_skb = mt7615_queue_rx_skb,
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->mcu_work, mt7615_pci_init_work);
- spin_lock_init(&dev->mt76.token_lock);
- idr_init(&dev->mt76.token);
+ mt76_token_init(&dev->mt76);
ret = mt7615_eeprom_init(dev, addr);
if (ret < 0)
token = le16_to_cpu(txp->hw.msdu_id[0]) &
~MT_MSDU_ID_VALID;
- spin_lock_bh(&mdev->token_lock);
- t = idr_remove(&mdev->token, token);
- spin_unlock_bh(&mdev->token_lock);
+ t = mt76_token_put(mdev, token);
e->skb = t ? t->skb : NULL;
}
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
- spin_lock_bh(&mdev->token_lock);
- id = idr_alloc(&mdev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
- spin_unlock_bh(&mdev->token_lock);
+ id = mt76_token_get(mdev, &t);
if (id < 0)
return id;
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->init_work, mt7915_init_work);
- spin_lock_init(&dev->mt76.token_lock);
- idr_init(&dev->mt76.token);
+ mt76_token_init(&dev->mt76);
dev->dbdc_support = !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5));
mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
}
-static void
-mt7915_set_tx_blocked(struct mt7915_dev *dev, bool blocked)
-{
- struct mt76_phy *mphy = &dev->mphy, *mphy2 = dev->mt76.phy2;
- struct mt76_queue *q, *q2 = NULL;
-
- q = mphy->q_tx[0];
- if (blocked == q->blocked)
- return;
-
- q->blocked = blocked;
- if (mphy2) {
- q2 = mphy2->q_tx[0];
- q2->blocked = blocked;
- }
-
- if (!blocked)
- mt76_worker_schedule(&dev->mt76.tx_worker);
-}
-
int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
- spin_lock_bh(&mdev->token_lock);
- id = idr_alloc(&mdev->token, t, 0, MT7915_TOKEN_SIZE, GFP_ATOMIC);
- if (id >= 0)
- mdev->token_count++;
-
- if (mdev->token_count >= MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR)
- mt7915_set_tx_blocked(dev, true);
- spin_unlock_bh(&mdev->token_lock);
-
+ id = mt76_token_consume(mdev, &t);
if (id < 0)
return id;
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
stat = FIELD_GET(MT_TX_FREE_STATUS, info);
- spin_lock_bh(&mdev->token_lock);
- txwi = idr_remove(&mdev->token, msdu);
- if (txwi)
- mdev->token_count--;
- if (mdev->token_count < MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR &&
- dev->mphy.q_tx[0]->blocked)
- wake = true;
- spin_unlock_bh(&mdev->token_lock);
-
+ txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
mt7915_mac_sta_poll(dev);
- if (wake) {
- spin_lock_bh(&mdev->token_lock);
- mt7915_set_tx_blocked(dev, false);
- spin_unlock_bh(&mdev->token_lock);
- }
+ if (wake)
+ mt76_set_tx_blocked(&dev->mt76, false);
mt76_worker_schedule(&dev->mt76.tx_worker);
struct mt7915_txp *txp;
txp = mt7915_txwi_to_txp(mdev, e->txwi);
-
- spin_lock_bh(&mdev->token_lock);
- t = idr_remove(&mdev->token, le16_to_cpu(txp->token));
- spin_unlock_bh(&mdev->token_lock);
+ t = mt76_token_put(mdev, le16_to_cpu(txp->token));
e->skb = t ? t->skb : NULL;
}
#define MT7915_EEPROM_SIZE 3584
#define MT7915_TOKEN_SIZE 8192
-#define MT7915_TOKEN_FREE_THR 64
#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7915_TOKEN_SIZE,
.tx_prepare_skb = mt7915_tx_prepare_skb,
.tx_complete_skb = mt7915_tx_complete_skb,
.rx_skb = mt7915_queue_rx_skb,
{
int ret, idx;
- spin_lock_init(&dev->mt76.token_lock);
- idr_init(&dev->mt76.token);
-
+ mt76_token_init(&dev->mt76);
ret = mt7921_dma_init(dev);
if (ret)
return ret;
}
}
-static void mt7921_set_tx_blocked(struct mt7921_dev *dev, bool blocked)
-{
- struct mt76_phy *mphy = &dev->mphy;
- struct mt76_queue *q;
-
- q = mphy->q_tx[0];
- if (blocked == q->blocked)
- return;
-
- q->blocked = blocked;
- if (!blocked)
- mt76_worker_schedule(&dev->mt76.tx_worker);
-}
-
int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
- spin_lock_bh(&mdev->token_lock);
- id = idr_alloc(&mdev->token, t, 0, MT7921_TOKEN_SIZE, GFP_ATOMIC);
- if (id >= 0)
- mdev->token_count++;
-
- if (mdev->token_count >= MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR)
- mt7921_set_tx_blocked(dev, true);
- spin_unlock_bh(&mdev->token_lock);
-
+ id = mt76_token_consume(mdev, &t);
if (id < 0)
return id;
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
stat = FIELD_GET(MT_TX_FREE_STATUS, info);
- spin_lock_bh(&mdev->token_lock);
- txwi = idr_remove(&mdev->token, msdu);
- if (txwi)
- mdev->token_count--;
- if (mdev->token_count < MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR &&
- dev->mphy.q_tx[0]->blocked)
- wake = true;
- spin_unlock_bh(&mdev->token_lock);
-
+ txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
mt76_put_txwi(mdev, txwi);
}
- if (wake) {
- spin_lock_bh(&mdev->token_lock);
- mt7921_set_tx_blocked(dev, false);
- spin_unlock_bh(&mdev->token_lock);
- }
+ if (wake)
+ mt76_set_tx_blocked(&dev->mt76, false);
napi_consume_skb(skb, 1);
u16 token;
txp = mt7921_txwi_to_txp(mdev, e->txwi);
-
token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
- spin_lock_bh(&mdev->token_lock);
- t = idr_remove(&mdev->token, token);
- spin_unlock_bh(&mdev->token_lock);
+ t = mt76_token_put(mdev, token);
e->skb = t ? t->skb : NULL;
}
#define MT7921_EEPROM_SIZE 3584
#define MT7921_TOKEN_SIZE 8192
-#define MT7921_TOKEN_FREE_THR 64
#define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921_tx_prepare_skb,
.tx_complete_skb = mt7921_tx_complete_skb,
.rx_skb = mt7921_queue_rx_skb,
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
+
+void mt76_token_init(struct mt76_dev *dev)
+{
+ spin_lock_init(&dev->token_lock);
+ idr_init(&dev->token);
+}
+EXPORT_SYMBOL_GPL(mt76_token_init);
+
+void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
+{
+ struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2;
+ struct mt76_queue *q, *q2 = NULL;
+
+ q = phy->q_tx[0];
+ if (blocked == q->blocked)
+ return;
+
+ q->blocked = blocked;
+ if (phy2) {
+ q2 = phy2->q_tx[0];
+ q2->blocked = blocked;
+ }
+
+ if (!blocked)
+ mt76_worker_schedule(&dev->tx_worker);
+}
+EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
+
+int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
+{
+ int token;
+
+ spin_lock_bh(&dev->token_lock);
+
+ token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
+ GFP_ATOMIC);
+ if (token >= 0)
+ dev->token_count++;
+
+ if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR)
+ __mt76_set_tx_blocked(dev, true);
+
+ spin_unlock_bh(&dev->token_lock);
+
+ return token;
+}
+EXPORT_SYMBOL_GPL(mt76_token_consume);
+
+struct mt76_txwi_cache *
+mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
+{
+ struct mt76_txwi_cache *txwi;
+
+ spin_lock_bh(&dev->token_lock);
+
+ txwi = idr_remove(&dev->token, token);
+ if (txwi)
+ dev->token_count--;
+
+ if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR &&
+ dev->phy.q_tx[0]->blocked)
+ *wake = true;
+
+ spin_unlock_bh(&dev->token_lock);
+
+ return txwi;
+}
+EXPORT_SYMBOL_GPL(mt76_token_release);