1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2021 MediaTek Inc. */
9 mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
10 void *txp_ptr, u32 id)
12 struct mt7921_hw_txp *txp = txp_ptr;
13 struct mt7921_txp_ptr *ptr = &txp->ptr[0];
14 int i, nbuf = tx_info->nbuf - 1;
16 tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
19 txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
21 for (i = 0; i < nbuf; i++) {
22 u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
23 u32 addr = tx_info->buf[i + 1].addr;
26 len |= MT_TXD_LEN_LAST;
29 ptr->buf1 = cpu_to_le32(addr);
30 ptr->len1 = cpu_to_le16(len);
33 ptr->buf0 = cpu_to_le32(addr);
34 ptr->len0 = cpu_to_le16(len);
39 int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
40 enum mt76_txq_id qid, struct mt76_wcid *wcid,
41 struct ieee80211_sta *sta,
42 struct mt76_tx_info *tx_info)
44 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
46 struct ieee80211_key_conf *key = info->control.hw_key;
47 struct mt76_txwi_cache *t;
48 struct mt7921_txp_common *txp;
50 u8 *txwi = (u8 *)txwi_ptr;
52 if (unlikely(tx_info->skb->len <= ETH_HLEN))
56 wcid = &dev->mt76.global_wcid;
58 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
59 t->skb = tx_info->skb;
61 id = mt76_token_consume(mdev, &t);
66 struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
68 if (time_after(jiffies, msta->last_txs + HZ / 4)) {
69 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
70 msta->last_txs = jiffies;
74 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
75 mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
78 txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
79 memset(txp, 0, sizeof(struct mt7921_txp_common));
80 mt7921_write_hw_txp(dev, tx_info, txp, id);
82 tx_info->skb = DMA_DUMMY_DATA;
88 mt7921_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
90 struct mt7921_txp_common *txp;
93 txp = mt7921_txwi_to_txp(dev, t);
95 for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) {
96 struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i];
100 len = le16_to_cpu(ptr->len0);
101 last = len & MT_TXD_LEN_LAST;
102 len &= MT_TXD_LEN_MASK;
103 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
108 len = le16_to_cpu(ptr->len1);
109 last = len & MT_TXD_LEN_LAST;
110 len &= MT_TXD_LEN_MASK;
111 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
119 mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
120 struct ieee80211_sta *sta, bool clear_status,
121 struct list_head *free_list)
123 struct mt76_dev *mdev = &dev->mt76;
127 mt7921_txp_skb_unmap(mdev, t);
131 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
133 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
135 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
136 mt7921_tx_check_aggr(sta, txwi);
138 wcid_idx = wcid->idx;
140 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
143 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
147 mt76_put_txwi(mdev, t);
151 mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
153 struct mt7921_tx_free *free = (struct mt7921_tx_free *)data;
154 struct mt76_dev *mdev = &dev->mt76;
155 struct mt76_txwi_cache *txwi;
156 struct ieee80211_sta *sta = NULL;
157 struct sk_buff *skb, *tmp;
158 void *end = data + len;
159 LIST_HEAD(free_list);
163 /* clean DMA queues and unmap buffers first */
164 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
165 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
167 count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
168 if (WARN_ON_ONCE((void *)&free->info[count] > end))
171 for (i = 0; i < count; i++) {
172 u32 msdu, info = le32_to_cpu(free->info[i]);
175 /* 1'b1: new wcid pair.
176 * 1'b0: msdu_id with the same 'wcid pair' as above.
178 if (info & MT_TX_FREE_PAIR) {
179 struct mt7921_sta *msta;
180 struct mt76_wcid *wcid;
184 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
185 wcid = rcu_dereference(dev->mt76.wcid[idx]);
186 sta = wcid_to_sta(wcid);
190 msta = container_of(wcid, struct mt7921_sta, wcid);
191 spin_lock_bh(&dev->sta_poll_lock);
192 if (list_empty(&msta->poll_list))
193 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
194 spin_unlock_bh(&dev->sta_poll_lock);
198 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
199 stat = FIELD_GET(MT_TX_FREE_STATUS, info);
201 txwi = mt76_token_release(mdev, msdu, &wake);
205 mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
209 mt76_set_tx_blocked(&dev->mt76, false);
211 list_for_each_entry_safe(skb, tmp, &free_list, list) {
212 skb_list_del_init(skb);
213 napi_consume_skb(skb, 1);
217 mt7921_mac_sta_poll(dev);
220 mt76_worker_schedule(&dev->mt76.tx_worker);
223 bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
225 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
226 __le32 *rxd = (__le32 *)data;
227 __le32 *end = (__le32 *)&rxd[len / 4];
228 enum rx_pkt_type type;
230 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
233 case PKT_TYPE_TXRX_NOTIFY:
234 mt7921e_mac_tx_free(dev, data, len);
237 for (rxd += 2; rxd + 8 <= end; rxd += 8)
238 mt7921_mac_add_txs(dev, rxd);
245 void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
248 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
249 __le32 *rxd = (__le32 *)skb->data;
250 enum rx_pkt_type type;
252 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
255 case PKT_TYPE_TXRX_NOTIFY:
256 mt7921e_mac_tx_free(dev, skb->data, skb->len);
257 napi_consume_skb(skb, 1);
260 mt7921_queue_rx_skb(mdev, q, skb);
265 void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
268 dev_kfree_skb_any(e->skb);
273 if (e->skb == DMA_DUMMY_DATA) {
274 struct mt76_txwi_cache *t;
275 struct mt7921_txp_common *txp;
278 txp = mt7921_txwi_to_txp(mdev, e->txwi);
279 token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
280 t = mt76_token_put(mdev, token);
281 e->skb = t ? t->skb : NULL;
285 mt76_tx_complete_skb(mdev, e->wcid, e->skb);
288 void mt7921_tx_token_put(struct mt7921_dev *dev)
290 struct mt76_txwi_cache *txwi;
293 spin_lock_bh(&dev->mt76.token_lock);
294 idr_for_each_entry(&dev->mt76.token, txwi, id) {
295 mt7921_txwi_free(dev, txwi, NULL, false, NULL);
296 dev->mt76.token_count--;
298 spin_unlock_bh(&dev->mt76.token_lock);
299 idr_destroy(&dev->mt76.token);
302 int mt7921e_mac_reset(struct mt7921_dev *dev)
306 mt7921e_mcu_drv_pmctrl(dev);
308 mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
310 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
311 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
313 set_bit(MT76_RESET, &dev->mphy.state);
314 set_bit(MT76_MCU_RESET, &dev->mphy.state);
315 wake_up(&dev->mt76.mcu.wait);
316 skb_queue_purge(&dev->mt76.mcu.res_q);
318 mt76_txq_schedule_all(&dev->mphy);
320 mt76_worker_disable(&dev->mt76.tx_worker);
321 napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]);
322 napi_disable(&dev->mt76.napi[MT_RXQ_MCU]);
323 napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]);
324 napi_disable(&dev->mt76.tx_napi);
326 mt7921_tx_token_put(dev);
327 idr_init(&dev->mt76.token);
329 mt7921_wpdma_reset(dev, true);
332 mt76_for_each_q_rx(&dev->mt76, i) {
333 napi_enable(&dev->mt76.napi[i]);
334 napi_schedule(&dev->mt76.napi[i]);
338 dev->fw_assert = false;
339 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
341 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
342 MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
344 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
346 err = mt7921e_driver_own(dev);
350 err = mt7921_run_firmware(dev);
354 err = mt7921_mcu_set_eeprom(dev);
358 err = mt7921_mac_init(dev);
362 err = __mt7921_start(&dev->phy);
364 clear_bit(MT76_RESET, &dev->mphy.state);
367 napi_enable(&dev->mt76.tx_napi);
368 napi_schedule(&dev->mt76.tx_napi);
371 mt76_worker_enable(&dev->mt76.tx_worker);