1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2019 MediaTek Inc.
4 * Author: Ryder Lee <ryder.lee@mediatek.com>
5 * Roy Luo <royluo@google.com>
6 * Felix Fietkau <nbd@nbd.name>
7 * Lorenzo Bianconi <lorenzo@kernel.org>
10 #include <linux/devcoredump.h>
11 #include <linux/etherdevice.h>
12 #include <linux/timekeeping.h>
16 #include "mt7615_trace.h"
20 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
22 static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
23 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
25 [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 },
26 [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 },
27 [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 },
28 [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 },
29 [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 },
30 [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 },
31 [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 },
32 [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 },
36 static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
37 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
39 [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 },
40 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
41 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 },
42 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 },
43 [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 },
47 static const struct mt7615_dfs_radar_spec jp_radar_specs = {
48 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
50 [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 },
51 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
52 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 },
53 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 },
54 [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 },
55 [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 },
56 [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 },
60 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
63 struct mt7615_sta *sta;
64 struct mt76_wcid *wcid;
66 if (idx >= MT7615_WTBL_SIZE)
69 wcid = rcu_dereference(dev->mt76.wcid[idx]);
76 sta = container_of(wcid, struct mt7615_sta, wcid);
80 return &sta->vif->sta.wcid;
83 void mt7615_mac_reset_counters(struct mt7615_dev *dev)
87 for (i = 0; i < 4; i++) {
88 mt76_rr(dev, MT_TX_AGG_CNT(0, i));
89 mt76_rr(dev, MT_TX_AGG_CNT(1, i));
92 memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
93 dev->mt76.phy.survey_time = ktime_get_boottime();
95 dev->mt76.phy2->survey_time = ktime_get_boottime();
97 /* reset airtime counters */
98 mt76_rr(dev, MT_MIB_SDR9(0));
99 mt76_rr(dev, MT_MIB_SDR9(1));
101 mt76_rr(dev, MT_MIB_SDR36(0));
102 mt76_rr(dev, MT_MIB_SDR36(1));
104 mt76_rr(dev, MT_MIB_SDR37(0));
105 mt76_rr(dev, MT_MIB_SDR37(1));
107 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
108 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
111 void mt7615_mac_set_timing(struct mt7615_phy *phy)
113 s16 coverage_class = phy->coverage_class;
114 struct mt7615_dev *dev = phy->dev;
115 bool ext_phy = phy != &dev->phy;
117 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
118 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
119 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
120 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
122 bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
124 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
133 coverage_class = max_t(s16, dev->phy.coverage_class,
135 mt76_set(dev, MT_ARB_SCR,
136 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
138 struct mt7615_phy *phy_ext = mt7615_ext_phy(dev);
141 coverage_class = max_t(s16, phy_ext->coverage_class,
143 mt76_set(dev, MT_ARB_SCR,
144 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
148 offset = 3 * coverage_class;
149 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
150 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
151 mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset);
152 mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset);
154 mt76_wr(dev, MT_TMAC_ICR(ext_phy),
155 FIELD_PREP(MT_IFS_EIFS, 360) |
156 FIELD_PREP(MT_IFS_RIFS, 2) |
157 FIELD_PREP(MT_IFS_SIFS, sifs) |
158 FIELD_PREP(MT_IFS_SLOT, phy->slottime));
160 if (phy->slottime < 20 || is_5ghz)
161 val = MT7615_CFEND_RATE_DEFAULT;
163 val = MT7615_CFEND_RATE_11B;
165 mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
167 mt76_clear(dev, MT_ARB_SCR,
168 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
170 mt76_clear(dev, MT_ARB_SCR,
171 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
176 mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy,
177 struct mt76_rx_status *status, u8 chfreq)
179 if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
180 !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
181 !test_bit(MT76_STATE_ROC, &mphy->state)) {
182 status->freq = mphy->chandef.chan->center_freq;
183 status->band = mphy->chandef.chan->band;
187 status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
188 status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
191 static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv)
193 #ifdef CONFIG_NL80211_TESTMODE
194 u32 rxv1 = le32_to_cpu(rxv[0]);
195 u32 rxv3 = le32_to_cpu(rxv[2]);
196 u32 rxv4 = le32_to_cpu(rxv[3]);
197 u32 rxv5 = le32_to_cpu(rxv[4]);
198 u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1);
199 u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1);
200 s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5);
201 u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000;
212 foe = (foe * foe_const) >> 15;
215 phy->test.last_freq_offset = foe;
216 phy->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4);
217 phy->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4);
218 phy->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4);
219 phy->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4);
220 phy->test.last_ib_rssi[0] = FIELD_GET(MT_RXV3_IB_RSSI, rxv3);
221 phy->test.last_wb_rssi[0] = FIELD_GET(MT_RXV3_WB_RSSI, rxv3);
225 static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
227 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
228 struct mt76_phy *mphy = &dev->mt76.phy;
229 struct mt7615_phy *phy = &dev->phy;
230 struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL;
231 struct ieee80211_supported_band *sband;
232 struct ieee80211_hdr *hdr;
233 __le32 *rxd = (__le32 *)skb->data;
234 u32 rxd0 = le32_to_cpu(rxd[0]);
235 u32 rxd1 = le32_to_cpu(rxd[1]);
236 u32 rxd2 = le32_to_cpu(rxd[2]);
237 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
238 bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false;
241 u8 chfreq, amsdu_info, qos_ctl = 0;
245 memset(status, 0, sizeof(*status));
247 chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
250 else if (phy2->chfreq == phy->chfreq)
252 else if (phy->chfreq == chfreq)
254 else if (phy2->chfreq == chfreq)
259 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
262 unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
263 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
264 hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS;
265 status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
268 struct mt7615_sta *msta;
270 msta = container_of(status->wcid, struct mt7615_sta, wcid);
271 spin_lock_bh(&dev->sta_poll_lock);
272 if (list_empty(&msta->poll_list))
273 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
274 spin_unlock_bh(&dev->sta_poll_lock);
277 if ((rxd0 & csum_mask) == csum_mask)
278 skb->ip_summed = CHECKSUM_UNNECESSARY;
280 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
281 status->flag |= RX_FLAG_FAILED_FCS_CRC;
283 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
284 status->flag |= RX_FLAG_MMIC_ERROR;
286 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
287 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
288 status->flag |= RX_FLAG_DECRYPTED;
289 status->flag |= RX_FLAG_IV_STRIPPED;
290 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
293 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
295 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
299 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
300 u32 v0 = le32_to_cpu(rxd[0]);
301 u32 v2 = le32_to_cpu(rxd[2]);
303 fc = cpu_to_le16(FIELD_GET(MT_RXD4_FRAME_CONTROL, v0));
304 qos_ctl = FIELD_GET(MT_RXD6_QOS_CTL, v2);
305 seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, v2);
308 if ((u8 *)rxd - skb->data >= skb->len)
312 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
313 u8 *data = (u8 *)rxd;
315 if (status->flag & RX_FLAG_DECRYPTED) {
316 status->iv[0] = data[5];
317 status->iv[1] = data[4];
318 status->iv[2] = data[3];
319 status->iv[3] = data[2];
320 status->iv[4] = data[1];
321 status->iv[5] = data[0];
323 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
326 if ((u8 *)rxd - skb->data >= skb->len)
330 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
331 status->timestamp = le32_to_cpu(rxd[0]);
332 status->flag |= RX_FLAG_MACTIME_START;
334 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
335 MT_RXD2_NORMAL_NON_AMPDU))) {
336 status->flag |= RX_FLAG_AMPDU_DETAILS;
338 /* all subframes of an A-MPDU have the same timestamp */
339 if (phy->rx_ampdu_ts != status->timestamp) {
340 if (!++phy->ampdu_ref)
343 phy->rx_ampdu_ts = status->timestamp;
345 status->ampdu_ref = phy->ampdu_ref;
349 if ((u8 *)rxd - skb->data >= skb->len)
353 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
354 u32 rxdg5 = le32_to_cpu(rxd[5]);
357 * If both PHYs are on the same channel and we don't have a WCID,
358 * we need to figure out which PHY this packet was received on.
359 * On the primary PHY, the noise value for the chains belonging to the
360 * second PHY will be set to the noise value of the last packet from
364 int first_chain = ffs(phy2->mt76->chainmask) - 1;
366 phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0;
370 if (phy_idx == 1 && phy2) {
371 mphy = dev->mt76.phy2;
373 status->ext_phy = true;
376 if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq)
379 mt7615_get_status_freq_info(dev, mphy, status, chfreq);
380 if (status->band == NL80211_BAND_5GHZ)
381 sband = &mphy->sband_5g.sband;
383 sband = &mphy->sband_2g.sband;
385 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
388 if (!sband->channels)
391 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
392 u32 rxdg0 = le32_to_cpu(rxd[0]);
393 u32 rxdg1 = le32_to_cpu(rxd[1]);
394 u32 rxdg3 = le32_to_cpu(rxd[3]);
395 u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
398 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
399 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
400 case MT_PHY_TYPE_CCK:
403 case MT_PHY_TYPE_OFDM:
404 i = mt76_get_rate(&dev->mt76, sband, i, cck);
406 case MT_PHY_TYPE_HT_GF:
408 status->encoding = RX_ENC_HT;
412 case MT_PHY_TYPE_VHT:
413 status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
414 status->encoding = RX_ENC_VHT;
419 status->rate_idx = i;
421 switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
425 status->bw = RATE_INFO_BW_40;
428 status->bw = RATE_INFO_BW_80;
431 status->bw = RATE_INFO_BW_160;
437 if (rxdg0 & MT_RXV1_HT_SHORT_GI)
438 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
439 if (rxdg0 & MT_RXV1_HT_AD_CODE)
440 status->enc_flags |= RX_ENC_FLAG_LDPC;
442 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
444 status->chains = mphy->antenna_mask;
445 status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
446 status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
447 status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
448 status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
449 status->signal = status->chain_signal[0];
451 for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
452 if (!(status->chains & BIT(i)))
455 status->signal = max(status->signal,
456 status->chain_signal[i]);
459 mt7615_mac_fill_tm_rx(mphy->priv, rxd);
462 if ((u8 *)rxd - skb->data >= skb->len)
466 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
468 amsdu_info = FIELD_GET(MT_RXD1_NORMAL_PAYLOAD_FORMAT, rxd1);
469 status->amsdu = !!amsdu_info;
471 status->first_amsdu = amsdu_info == MT_RXD1_FIRST_AMSDU_FRAME;
472 status->last_amsdu = amsdu_info == MT_RXD1_LAST_AMSDU_FRAME;
474 memmove(skb->data + 2, skb->data,
475 ieee80211_get_hdrlen_from_skb(skb));
480 if (insert_ccmp_hdr && !hdr_trans) {
481 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
483 mt76_insert_ccmp_hdr(skb, key_id);
487 hdr = (struct ieee80211_hdr *)skb->data;
488 fc = hdr->frame_control;
489 if (ieee80211_is_data_qos(fc)) {
490 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
491 qos_ctl = *ieee80211_get_qos_ctl(hdr);
494 status->flag |= RX_FLAG_8023;
497 if (!status->wcid || !ieee80211_is_data_qos(fc))
500 status->aggr = unicast &&
501 !ieee80211_is_qos_nullfunc(fc);
502 status->qos_ctl = qos_ctl;
503 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
508 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
511 EXPORT_SYMBOL_GPL(mt7615_sta_ps);
514 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
515 struct mt76_phy *mphy,
516 const struct ieee80211_tx_rate *rate,
519 u8 phy, nss, rate_idx;
524 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
525 rate_idx = ieee80211_rate_get_vht_mcs(rate);
526 nss = ieee80211_rate_get_vht_nss(rate);
527 phy = MT_PHY_TYPE_VHT;
528 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
530 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
532 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
534 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
535 rate_idx = rate->idx;
536 nss = 1 + (rate->idx >> 3);
537 phy = MT_PHY_TYPE_HT;
538 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
539 phy = MT_PHY_TYPE_HT_GF;
540 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
543 const struct ieee80211_rate *r;
544 int band = mphy->chandef.chan->band;
548 r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx];
549 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
550 val = r->hw_value_short;
555 rate_idx = val & 0xff;
558 if (stbc && nss == 1) {
560 rateval |= MT_TX_RATE_STBC;
563 rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
564 FIELD_PREP(MT_TX_RATE_MODE, phy) |
565 FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
570 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
571 struct sk_buff *skb, struct mt76_wcid *wcid,
572 struct ieee80211_sta *sta, int pid,
573 struct ieee80211_key_conf *key, bool beacon)
575 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
576 u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
577 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
578 struct ieee80211_tx_rate *rate = &info->control.rates[0];
579 bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
580 bool multicast = is_multicast_ether_addr(hdr->addr1);
581 struct ieee80211_vif *vif = info->control.vif;
582 bool is_mmio = mt76_is_mmio(&dev->mt76);
583 u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE;
584 struct mt76_phy *mphy = &dev->mphy;
585 __le16 fc = hdr->frame_control;
590 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
592 omac_idx = mvif->omac_idx;
593 wmm_idx = mvif->wmm_idx;
597 struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
599 tx_count = msta->rate_count;
602 if (ext_phy && dev->mt76.phy2)
603 mphy = dev->mt76.phy2;
605 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
606 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
609 p_fmt = MT_TX_TYPE_FW;
610 q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
611 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
612 p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
613 q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
615 p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
616 q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
617 mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb));
620 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
621 FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
622 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
623 txwi[0] = cpu_to_le32(val);
625 val = MT_TXD1_LONG_FORMAT |
626 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
627 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
628 FIELD_PREP(MT_TXD1_HDR_INFO,
629 ieee80211_get_hdrlen_from_skb(skb) / 2) |
630 FIELD_PREP(MT_TXD1_TID,
631 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
632 FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
633 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
634 txwi[1] = cpu_to_le32(val);
636 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
637 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
638 FIELD_PREP(MT_TXD2_MULTICAST, multicast);
640 if (multicast && ieee80211_is_robust_mgmt_frame(skb) &&
641 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
645 txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME);
650 txwi[2] = cpu_to_le32(val);
652 if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
653 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
658 if (rate->idx >= 0 && rate->count &&
659 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
660 bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
662 u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc,
665 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
667 val = MT_TXD6_FIXED_BW |
668 FIELD_PREP(MT_TXD6_BW, bw) |
669 FIELD_PREP(MT_TXD6_TX_RATE, rateval);
670 txwi[6] |= cpu_to_le32(val);
672 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
673 txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
675 if (info->flags & IEEE80211_TX_CTL_LDPC)
676 txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
678 if (!(rate->flags & (IEEE80211_TX_RC_MCS |
679 IEEE80211_TX_RC_VHT_MCS)))
680 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
682 tx_count = rate->count;
685 if (!ieee80211_is_beacon(fc)) {
686 struct ieee80211_hw *hw = mt76_hw(dev);
688 val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid);
689 if (!ieee80211_hw_check(hw, SUPPORTS_PS))
690 val |= MT_TXD5_SW_POWER_MGMT;
691 txwi[5] = cpu_to_le32(val);
694 /* use maximum tx count for beacons */
698 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
699 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
700 seqno = le16_to_cpu(hdr->seq_ctrl);
702 if (ieee80211_is_back_req(hdr->frame_control)) {
703 struct ieee80211_bar *bar;
705 bar = (struct ieee80211_bar *)skb->data;
706 seqno = le16_to_cpu(bar->start_seq_num);
709 val |= MT_TXD3_SN_VALID |
710 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
713 txwi[3] |= cpu_to_le32(val);
715 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
716 txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
718 txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
719 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) |
720 FIELD_PREP(MT_TXD7_SPE_IDX, 0x18);
722 txwi[8] = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
723 FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
727 EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi);
730 mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
734 for (i = 0; i < txp->nbuf; i++)
735 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
736 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
740 mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp)
745 last_mask = is_mt7663(dev) ? MT_TXD_LEN_LAST : MT_TXD_LEN_MSDU_LAST;
747 for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) {
748 struct mt7615_txp_ptr *ptr = &txp->ptr[i];
752 len = le16_to_cpu(ptr->len0);
753 last = len & last_mask;
754 len &= MT_TXD_LEN_MASK;
755 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
760 len = le16_to_cpu(ptr->len1);
761 last = len & last_mask;
762 len &= MT_TXD_LEN_MASK;
763 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
770 void mt7615_txp_skb_unmap(struct mt76_dev *dev,
771 struct mt76_txwi_cache *t)
773 struct mt7615_txp_common *txp;
775 txp = mt7615_txwi_to_txp(dev, t);
777 mt7615_txp_skb_unmap_fw(dev, &txp->fw);
779 mt7615_txp_skb_unmap_hw(dev, &txp->hw);
781 EXPORT_SYMBOL_GPL(mt7615_txp_skb_unmap);
783 bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
785 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
786 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
788 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
792 void mt7615_mac_sta_poll(struct mt7615_dev *dev)
794 static const u8 ac_to_tid[4] = {
795 [IEEE80211_AC_BE] = 0,
796 [IEEE80211_AC_BK] = 1,
797 [IEEE80211_AC_VI] = 4,
798 [IEEE80211_AC_VO] = 6
800 static const u8 hw_queue_map[] = {
801 [IEEE80211_AC_BK] = 0,
802 [IEEE80211_AC_BE] = 1,
803 [IEEE80211_AC_VI] = 2,
804 [IEEE80211_AC_VO] = 3,
806 struct ieee80211_sta *sta;
807 struct mt7615_sta *msta;
808 u32 addr, tx_time[4], rx_time[4];
809 struct list_head sta_poll_list;
812 INIT_LIST_HEAD(&sta_poll_list);
813 spin_lock_bh(&dev->sta_poll_lock);
814 list_splice_init(&dev->sta_poll_list, &sta_poll_list);
815 spin_unlock_bh(&dev->sta_poll_lock);
817 while (!list_empty(&sta_poll_list)) {
820 msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
822 list_del_init(&msta->poll_list);
824 addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
826 for (i = 0; i < 4; i++, addr += 8) {
827 u32 tx_last = msta->airtime_ac[i];
828 u32 rx_last = msta->airtime_ac[i + 4];
830 msta->airtime_ac[i] = mt76_rr(dev, addr);
831 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
832 tx_time[i] = msta->airtime_ac[i] - tx_last;
833 rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
835 if ((tx_last | rx_last) & BIT(30))
840 mt7615_mac_wtbl_update(dev, msta->wcid.idx,
841 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
842 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
848 sta = container_of((void *)msta, struct ieee80211_sta,
850 for (i = 0; i < 4; i++) {
851 u32 tx_cur = tx_time[i];
852 u32 rx_cur = rx_time[hw_queue_map[i]];
853 u8 tid = ac_to_tid[i];
855 if (!tx_cur && !rx_cur)
858 ieee80211_sta_register_airtime(sta, tid, tx_cur,
863 EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll);
866 mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta,
867 struct ieee80211_tx_rate *probe_rate,
868 struct ieee80211_tx_rate *rates,
869 struct mt7615_rate_desc *rd)
871 struct mt7615_dev *dev = phy->dev;
872 struct mt76_phy *mphy = phy->mt76;
873 struct ieee80211_tx_rate *ref;
874 bool rateset, stbc = false;
875 int n_rates = sta->n_rates;
879 for (i = n_rates; i < 4; i++)
880 rates[i] = rates[n_rates - 1];
882 rateset = !(sta->rate_set_tsf & BIT(0));
883 memcpy(sta->rateset[rateset].rates, rates,
884 sizeof(sta->rateset[rateset].rates));
886 sta->rateset[rateset].probe_rate = *probe_rate;
887 ref = &sta->rateset[rateset].probe_rate;
889 sta->rateset[rateset].probe_rate.idx = -1;
890 ref = &sta->rateset[rateset].rates[0];
893 rates = sta->rateset[rateset].rates;
894 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
896 * We don't support switching between short and long GI
897 * within the rate set. For accurate tx status reporting, we
898 * need to make sure that flags match.
899 * For improved performance, avoid duplicate entries by
900 * decrementing the MCS index if necessary
902 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
903 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
905 for (j = 0; j < i; j++) {
906 if (rates[i].idx != rates[j].idx)
908 if ((rates[i].flags ^ rates[j].flags) &
909 (IEEE80211_TX_RC_40_MHZ_WIDTH |
910 IEEE80211_TX_RC_80_MHZ_WIDTH |
911 IEEE80211_TX_RC_160_MHZ_WIDTH))
921 rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw);
925 rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate,
932 rd->probe_val = rd->val[0];
935 rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw);
941 rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw);
947 rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw);
951 rd->rateset = rateset;
956 mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta,
957 struct ieee80211_tx_rate *probe_rate,
958 struct ieee80211_tx_rate *rates)
960 struct mt7615_dev *dev = phy->dev;
961 struct mt7615_wtbl_rate_desc *wrd;
963 if (work_pending(&dev->rate_work))
966 wrd = kzalloc(sizeof(*wrd), GFP_ATOMIC);
971 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates,
973 list_add_tail(&wrd->node, &dev->wrd_head);
974 queue_work(dev->mt76.wq, &dev->rate_work);
979 u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
984 addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4;
987 addr += 4 * (offset / 32);
990 val = mt76_rr(dev, addr);
995 val2 = mt76_rr(dev, addr);
996 val |= val2 << (32 - offset);
999 return val & GENMASK(11, 0);
1002 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
1003 struct ieee80211_tx_rate *probe_rate,
1004 struct ieee80211_tx_rate *rates)
1006 int wcid = sta->wcid.idx, n_rates = sta->n_rates;
1007 struct mt7615_dev *dev = phy->dev;
1008 struct mt7615_rate_desc rd;
1010 u16 idx = sta->vif->mt76.omac_idx;
1012 if (!mt76_is_mmio(&dev->mt76)) {
1013 mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
1017 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
1020 memset(&rd, 0, sizeof(struct mt7615_rate_desc));
1021 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd);
1023 addr = mt7615_mac_wtbl_addr(dev, wcid);
1024 w27 = mt76_rr(dev, addr + 27 * 4);
1025 w27 &= ~MT_WTBL_W27_CC_BW_SEL;
1026 w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw);
1028 w5 = mt76_rr(dev, addr + 5 * 4);
1029 w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
1030 MT_WTBL_W5_MPDU_OK_COUNT |
1031 MT_WTBL_W5_MPDU_FAIL_COUNT |
1032 MT_WTBL_W5_RATE_IDX);
1033 w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) |
1034 FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
1035 rd.bw_idx ? rd.bw_idx - 1 : 7);
1037 mt76_wr(dev, MT_WTBL_RIUCR0, w5);
1039 mt76_wr(dev, MT_WTBL_RIUCR1,
1040 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) |
1041 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) |
1042 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1]));
1044 mt76_wr(dev, MT_WTBL_RIUCR2,
1045 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) |
1046 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) |
1047 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) |
1048 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2]));
1050 mt76_wr(dev, MT_WTBL_RIUCR3,
1051 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) |
1052 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) |
1053 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3]));
1055 mt76_wr(dev, MT_WTBL_UPDATE,
1056 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
1057 MT_WTBL_UPDATE_RATE_UPDATE |
1058 MT_WTBL_UPDATE_TX_COUNT_CLEAR);
1060 mt76_wr(dev, addr + 27 * 4, w27);
1062 idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
1063 addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
1065 mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
1066 sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
1067 sta->rate_set_tsf |= rd.rateset;
1069 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
1070 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
1072 sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
1073 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
1074 sta->rate_probe = !!probe_rate;
1076 EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
1079 mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
1080 struct ieee80211_key_conf *key,
1081 enum mt7615_cipher_type cipher, u16 cipher_mask,
1082 enum set_key_cmd cmd)
1084 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
1087 if (key->keylen > sizeof(data))
1090 mt76_rr_copy(dev, addr, data, sizeof(data));
1091 if (cmd == SET_KEY) {
1092 if (cipher == MT_CIPHER_TKIP) {
1093 /* Rx/Tx MIC keys are swapped */
1094 memcpy(data, key->key, 16);
1095 memcpy(data + 16, key->key + 24, 8);
1096 memcpy(data + 24, key->key + 16, 8);
1098 if (cipher_mask == BIT(cipher))
1099 memcpy(data, key->key, key->keylen);
1100 else if (cipher != MT_CIPHER_BIP_CMAC_128)
1101 memcpy(data, key->key, 16);
1102 if (cipher == MT_CIPHER_BIP_CMAC_128)
1103 memcpy(data + 16, key->key, 16);
1106 if (cipher == MT_CIPHER_BIP_CMAC_128)
1107 memset(data + 16, 0, 16);
1108 else if (cipher_mask)
1109 memset(data, 0, 16);
1111 memset(data, 0, sizeof(data));
1114 mt76_wr_copy(dev, addr, data, sizeof(data));
1120 mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
1121 enum mt7615_cipher_type cipher, u16 cipher_mask,
1122 int keyidx, enum set_key_cmd cmd)
1124 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
1126 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
1129 w0 = mt76_rr(dev, addr);
1130 w1 = mt76_rr(dev, addr + 4);
1133 w0 |= MT_WTBL_W0_RX_KEY_VALID;
1135 w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX);
1136 if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128))
1137 w0 |= MT_WTBL_W0_RX_IK_VALID;
1139 w0 &= ~MT_WTBL_W0_RX_IK_VALID;
1141 if (cmd == SET_KEY &&
1142 (cipher != MT_CIPHER_BIP_CMAC_128 ||
1143 cipher_mask == BIT(cipher))) {
1144 w0 &= ~MT_WTBL_W0_KEY_IDX;
1145 w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
1148 mt76_wr(dev, MT_WTBL_RICR0, w0);
1149 mt76_wr(dev, MT_WTBL_RICR1, w1);
1151 if (!mt7615_mac_wtbl_update(dev, wcid->idx,
1152 MT_WTBL_UPDATE_RXINFO_UPDATE))
1159 mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
1160 enum mt7615_cipher_type cipher, u16 cipher_mask,
1161 enum set_key_cmd cmd)
1163 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
1166 mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
1173 if (cipher == MT_CIPHER_BIP_CMAC_128 &&
1174 cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
1177 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
1178 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
1181 int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
1182 struct mt76_wcid *wcid,
1183 struct ieee80211_key_conf *key,
1184 enum set_key_cmd cmd)
1186 enum mt7615_cipher_type cipher;
1187 u16 cipher_mask = wcid->cipher;
1190 cipher = mt7615_mac_get_cipher(key->cipher);
1191 if (cipher == MT_CIPHER_NONE)
1195 cipher_mask |= BIT(cipher);
1197 cipher_mask &= ~BIT(cipher);
1199 mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
1200 err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
1205 err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
1210 wcid->cipher = cipher_mask;
1215 int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
1216 struct mt76_wcid *wcid,
1217 struct ieee80211_key_conf *key,
1218 enum set_key_cmd cmd)
1222 spin_lock_bh(&dev->mt76.lock);
1223 err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
1224 spin_unlock_bh(&dev->mt76.lock);
1229 static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
1230 struct ieee80211_tx_info *info, __le32 *txs_data)
1232 struct ieee80211_supported_band *sband;
1233 struct mt7615_rate_set *rs;
1234 struct mt76_phy *mphy;
1235 int first_idx = 0, last_idx;
1237 bool fixed_rate, ack_timeout;
1238 bool probe, ampdu, cck = false;
1241 u32 final_rate, final_rate_flags, final_nss, txs;
1243 fixed_rate = info->status.rates[0].count;
1244 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1246 txs = le32_to_cpu(txs_data[1]);
1247 ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
1249 txs = le32_to_cpu(txs_data[3]);
1250 count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
1251 last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
1253 txs = le32_to_cpu(txs_data[0]);
1254 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1255 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
1257 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
1260 if (txs & MT_TXS0_QUEUE_TIMEOUT)
1264 info->flags |= IEEE80211_TX_STAT_ACK;
1266 info->status.ampdu_len = 1;
1267 info->status.ampdu_ack_len = !!(info->flags &
1268 IEEE80211_TX_STAT_ACK);
1270 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
1271 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
1273 first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY);
1275 if (fixed_rate && !probe) {
1276 info->status.rates[0].count = count;
1281 rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
1282 rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) -
1283 rate_set_tsf) < 1000000);
1284 rs_idx ^= rate_set_tsf & BIT(0);
1285 rs = &sta->rateset[rs_idx];
1287 if (!first_idx && rs->probe_rate.idx >= 0) {
1288 info->status.rates[0] = rs->probe_rate;
1290 spin_lock_bh(&dev->mt76.lock);
1291 if (sta->rate_probe) {
1292 struct mt7615_phy *phy = &dev->phy;
1294 if (sta->wcid.ext_phy && dev->mt76.phy2)
1295 phy = dev->mt76.phy2->priv;
1297 mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
1299 spin_unlock_bh(&dev->mt76.lock);
1301 info->status.rates[0] = rs->rates[first_idx / 2];
1303 info->status.rates[0].count = 0;
1305 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
1306 struct ieee80211_tx_rate *cur_rate;
1309 cur_rate = &rs->rates[idx / 2];
1310 cur_count = min_t(int, MT7615_RATE_RETRY, count);
1313 if (idx && (cur_rate->idx != info->status.rates[i].idx ||
1314 cur_rate->flags != info->status.rates[i].flags)) {
1316 if (i == ARRAY_SIZE(info->status.rates)) {
1321 info->status.rates[i] = *cur_rate;
1322 info->status.rates[i].count = 0;
1325 info->status.rates[i].count += cur_count;
1329 final_rate_flags = info->status.rates[i].flags;
1331 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
1332 case MT_PHY_TYPE_CCK:
1335 case MT_PHY_TYPE_OFDM:
1337 if (sta->wcid.ext_phy && dev->mt76.phy2)
1338 mphy = dev->mt76.phy2;
1340 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1341 sband = &mphy->sband_5g.sband;
1343 sband = &mphy->sband_2g.sband;
1344 final_rate &= MT_TX_RATE_IDX;
1345 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
1347 final_rate_flags = 0;
1349 case MT_PHY_TYPE_HT_GF:
1350 case MT_PHY_TYPE_HT:
1351 final_rate_flags |= IEEE80211_TX_RC_MCS;
1352 final_rate &= MT_TX_RATE_IDX;
1353 if (final_rate > 31)
1356 case MT_PHY_TYPE_VHT:
1357 final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
1359 if ((final_rate & MT_TX_RATE_STBC) && final_nss)
1362 final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
1363 final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
1369 info->status.rates[i].idx = final_rate;
1370 info->status.rates[i].flags = final_rate_flags;
1375 static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
1376 struct mt7615_sta *sta, int pid,
1379 struct mt76_dev *mdev = &dev->mt76;
1380 struct sk_buff_head list;
1381 struct sk_buff *skb;
1383 if (pid < MT_PACKET_ID_FIRST)
1386 trace_mac_txdone(mdev, sta->wcid.idx, pid);
1388 mt76_tx_status_lock(mdev, &list);
1389 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
1391 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1393 if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
1394 ieee80211_tx_info_clear_status(info);
1395 info->status.rates[0].idx = -1;
1398 mt76_tx_status_skb_done(mdev, skb, &list);
1400 mt76_tx_status_unlock(mdev, &list);
1405 static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
1407 struct ieee80211_tx_info info = {};
1408 struct ieee80211_sta *sta = NULL;
1409 struct mt7615_sta *msta = NULL;
1410 struct mt76_wcid *wcid;
1411 struct mt76_phy *mphy = &dev->mt76.phy;
1412 __le32 *txs_data = data;
1417 txs = le32_to_cpu(txs_data[0]);
1418 pid = FIELD_GET(MT_TXS0_PID, txs);
1419 txs = le32_to_cpu(txs_data[2]);
1420 wcidx = FIELD_GET(MT_TXS2_WCID, txs);
1422 if (pid == MT_PACKET_ID_NO_ACK)
1425 if (wcidx >= MT7615_WTBL_SIZE)
1430 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1434 msta = container_of(wcid, struct mt7615_sta, wcid);
1435 sta = wcid_to_sta(wcid);
1437 spin_lock_bh(&dev->sta_poll_lock);
1438 if (list_empty(&msta->poll_list))
1439 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1440 spin_unlock_bh(&dev->sta_poll_lock);
1442 if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
1445 if (wcidx >= MT7615_WTBL_STA || !sta)
1448 if (wcid->ext_phy && dev->mt76.phy2)
1449 mphy = dev->mt76.phy2;
1451 if (mt7615_fill_txs(dev, msta, &info, txs_data))
1452 ieee80211_tx_status_noskb(mphy->hw, sta, &info);
1459 mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
1461 struct mt76_dev *mdev = &dev->mt76;
1462 struct mt76_txwi_cache *txwi;
1467 trace_mac_tx_free(dev, token);
1468 txwi = mt76_token_put(mdev, token);
1472 txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi);
1473 val = le32_to_cpu(txwi_data[1]);
1474 wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val);
1476 mt7615_txp_skb_unmap(mdev, txwi);
1478 mt76_tx_complete_skb(mdev, wcid, txwi->skb);
1482 mt76_put_txwi(mdev, txwi);
1485 static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
1487 struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
1490 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1491 if (is_mt7615(&dev->mt76)) {
1492 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1494 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1495 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
1498 count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
1499 if (is_mt7615(&dev->mt76)) {
1500 __le16 *token = &free->token[0];
1502 for (i = 0; i < count; i++)
1503 mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
1505 __le32 *token = (__le32 *)&free->token[0];
1507 for (i = 0; i < count; i++)
1508 mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
1514 mt7615_mac_sta_poll(dev);
1517 mt76_worker_schedule(&dev->mt76.tx_worker);
1520 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1521 struct sk_buff *skb)
1523 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
1524 __le32 *rxd = (__le32 *)skb->data;
1525 __le32 *end = (__le32 *)&skb->data[skb->len];
1526 enum rx_pkt_type type;
1529 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
1530 flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
1531 if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
1532 type = PKT_TYPE_NORMAL_MCU;
1536 for (rxd++; rxd + 7 <= end; rxd += 7)
1537 mt7615_mac_add_txs(dev, rxd);
1540 case PKT_TYPE_TXRX_NOTIFY:
1541 mt7615_mac_tx_free(dev, skb);
1543 case PKT_TYPE_RX_EVENT:
1544 mt7615_mcu_rx_event(dev, skb);
1546 case PKT_TYPE_NORMAL_MCU:
1547 case PKT_TYPE_NORMAL:
1548 if (!mt7615_mac_fill_rx(dev, skb)) {
1549 mt76_rx(&dev->mt76, q, skb);
1558 EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb);
1561 mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm)
1563 struct mt7615_dev *dev = phy->dev;
1564 bool ext_phy = phy != &dev->phy;
1566 if (is_mt7663(&dev->mt76)) {
1568 mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy),
1569 MT_WF_PHY_PD_OFDM_MASK(0),
1570 MT_WF_PHY_PD_OFDM(0, val));
1572 mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy),
1573 MT_WF_PHY_PD_CCK_MASK(ext_phy),
1574 MT_WF_PHY_PD_CCK(ext_phy, val));
1579 mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
1580 MT_WF_PHY_PD_OFDM_MASK(ext_phy),
1581 MT_WF_PHY_PD_OFDM(ext_phy, val));
1583 mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
1584 MT_WF_PHY_PD_CCK_MASK(ext_phy),
1585 MT_WF_PHY_PD_CCK(ext_phy, val));
1589 mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy)
1592 mt7615_mac_set_sensitivity(phy, 0x13c, true);
1594 mt7615_mac_set_sensitivity(phy, 0x92, false);
1596 phy->ofdm_sensitivity = -98;
1597 phy->cck_sensitivity = -110;
1598 phy->last_cca_adj = jiffies;
1601 void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable)
1603 struct mt7615_dev *dev = phy->dev;
1604 bool ext_phy = phy != &dev->phy;
1607 mt7615_mutex_acquire(dev);
1609 if (phy->scs_en == enable)
1612 if (is_mt7663(&dev->mt76)) {
1613 reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy);
1614 mask = MT_WF_PHY_PD_BLK(0);
1616 reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy);
1617 mask = MT_WF_PHY_PD_BLK(ext_phy);
1621 mt76_set(dev, reg, mask);
1622 if (is_mt7622(&dev->mt76)) {
1623 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8);
1624 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7);
1627 mt76_clear(dev, reg, mask);
1630 mt7615_mac_set_default_sensitivity(phy);
1631 phy->scs_en = enable;
1634 mt7615_mutex_release(dev);
1637 void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy)
1641 if (is_mt7663(&dev->mt76))
1642 reg = MT7663_WF_PHY_R0_PHYMUX_5;
1644 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
1647 rxtd = MT_WF_PHY_RXTD2(10);
1649 rxtd = MT_WF_PHY_RXTD(12);
1651 mt76_set(dev, rxtd, BIT(18) | BIT(29));
1652 mt76_set(dev, reg, 0x5 << 12);
1655 void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy)
1657 struct mt7615_dev *dev = phy->dev;
1658 bool ext_phy = phy != &dev->phy;
1661 if (is_mt7663(&dev->mt76))
1662 reg = MT7663_WF_PHY_R0_PHYMUX_5;
1664 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
1666 /* reset PD and MDRDY counters */
1667 mt76_clear(dev, reg, GENMASK(22, 20));
1668 mt76_set(dev, reg, BIT(22) | BIT(20));
1672 mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
1673 u32 rts_err_rate, bool ofdm)
1675 struct mt7615_dev *dev = phy->dev;
1676 int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
1677 bool ext_phy = phy != &dev->phy;
1678 u16 def_th = ofdm ? -98 : -110;
1679 bool update = false;
1683 sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity;
1684 signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy);
1686 mt7615_mac_set_default_sensitivity(phy);
1690 signal = min(signal, -72);
1691 if (false_cca > 500) {
1692 if (rts_err_rate > MT_FRAC(40, 100))
1695 /* decrease coverage */
1696 if (*sensitivity == def_th && signal > -90) {
1699 } else if (*sensitivity + 2 < signal) {
1703 } else if ((false_cca > 0 && false_cca < 50) ||
1704 rts_err_rate > MT_FRAC(60, 100)) {
1705 /* increase coverage */
1706 if (*sensitivity - 2 >= def_th) {
1712 if (*sensitivity > signal) {
1713 *sensitivity = signal;
1718 u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256;
1720 mt7615_mac_set_sensitivity(phy, val, ofdm);
1721 phy->last_cca_adj = jiffies;
1726 mt7615_mac_scs_check(struct mt7615_phy *phy)
1728 struct mt7615_dev *dev = phy->dev;
1729 struct mib_stats *mib = &phy->mib;
1730 u32 val, rts_err_rate = 0;
1731 u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm;
1732 bool ext_phy = phy != &dev->phy;
1737 if (is_mt7663(&dev->mt76))
1738 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
1740 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
1741 pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val);
1742 pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val);
1744 if (is_mt7663(&dev->mt76))
1745 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
1747 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
1748 mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val);
1749 mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val);
1751 phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
1752 phy->false_cca_cck = pd_cck - mdrdy_cck;
1753 mt7615_mac_cca_stats_reset(phy);
1755 if (mib->rts_cnt + mib->rts_retries_cnt)
1756 rts_err_rate = MT_FRAC(mib->rts_retries_cnt,
1757 mib->rts_cnt + mib->rts_retries_cnt);
1760 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false);
1762 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true);
1764 if (time_after(jiffies, phy->last_cca_adj + 10 * HZ))
1765 mt7615_mac_set_default_sensitivity(phy);
1769 mt7615_phy_get_nf(struct mt7615_dev *dev, int idx)
1771 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1772 u32 reg, val, sum = 0, n = 0;
1775 if (is_mt7663(&dev->mt76))
1776 reg = MT7663_WF_PHY_RXTD(20);
1778 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20);
1780 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1781 val = mt76_rr(dev, reg);
1782 sum += val * nf_power[i];
1793 mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
1795 struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
1796 struct mt7615_phy *phy = mphy->priv;
1797 struct mt76_channel_state *state;
1798 u64 busy_time, tx_time, rx_time, obss_time;
1799 u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5;
1802 busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
1803 MT_MIB_SDR9_BUSY_MASK);
1804 tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
1805 MT_MIB_SDR36_TXTIME_MASK);
1806 rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
1807 MT_MIB_SDR37_RXTIME_MASK);
1808 obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK);
1810 nf = mt7615_phy_get_nf(dev, idx);
1812 phy->noise = nf << 4;
1814 phy->noise += nf - (phy->noise >> 4);
1816 state = mphy->chan_state;
1817 state->cc_busy += busy_time;
1818 state->cc_tx += tx_time;
1819 state->cc_rx += rx_time + obss_time;
1820 state->cc_bss_rx += rx_time;
1821 state->noise = -(phy->noise >> 4);
1824 static void __mt7615_update_channel(struct mt7615_dev *dev)
1826 struct mt76_dev *mdev = &dev->mt76;
1828 mt7615_phy_update_channel(&mdev->phy, 0);
1830 mt7615_phy_update_channel(mdev->phy2, 1);
1832 /* reset obss airtime */
1833 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
1836 void mt7615_update_channel(struct mt76_dev *mdev)
1838 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
1840 if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
1843 __mt7615_update_channel(dev);
1844 mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
1846 EXPORT_SYMBOL_GPL(mt7615_update_channel);
1848 static void mt7615_update_survey(struct mt7615_dev *dev)
1850 struct mt76_dev *mdev = &dev->mt76;
1853 __mt7615_update_channel(dev);
1854 cur_time = ktime_get_boottime();
1856 mt76_update_survey_active_time(&mdev->phy, cur_time);
1858 mt76_update_survey_active_time(mdev->phy2, cur_time);
1862 mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
1864 struct mt7615_dev *dev = phy->dev;
1865 struct mib_stats *mib = &phy->mib;
1866 bool ext_phy = phy != &dev->phy;
1870 mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
1871 MT_MIB_SDR3_FCS_ERR_MASK);
1873 val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
1874 MT_MIB_AMPDU_MPDU_COUNT);
1876 val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy),
1877 MT_MIB_AMPDU_ACK_COUNT);
1878 mib->aggr_per = 1000 * (val - val2) / val;
1881 aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
1882 for (i = 0; i < 4; i++) {
1883 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
1884 mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
1885 mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
1888 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
1889 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1890 mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
1893 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
1894 dev->mt76.aggr_stats[aggr++] += val & 0xffff;
1895 dev->mt76.aggr_stats[aggr++] += val >> 16;
1899 void mt7615_pm_wake_work(struct work_struct *work)
1901 struct mt7615_dev *dev;
1902 struct mt76_phy *mphy;
1904 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
1906 mphy = dev->phy.mt76;
1908 if (!mt7615_mcu_set_drv_ctrl(dev)) {
1911 mt76_for_each_q_rx(&dev->mt76, i)
1912 napi_schedule(&dev->mt76.napi[i]);
1913 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
1914 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
1915 if (test_bit(MT76_STATE_RUNNING, &mphy->state))
1916 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
1917 MT7615_WATCHDOG_TIME);
1920 ieee80211_wake_queues(mphy->hw);
1921 wake_up(&dev->pm.wait);
1924 void mt7615_pm_power_save_work(struct work_struct *work)
1926 struct mt7615_dev *dev;
1927 unsigned long delta;
1929 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
1932 delta = dev->pm.idle_timeout;
1933 if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
1934 test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
1937 if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
1938 delta = dev->pm.last_activity + delta - jiffies;
1942 if (!mt7615_mcu_set_fw_ctrl(dev))
1945 queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
1948 void mt7615_mac_work(struct work_struct *work)
1950 struct mt7615_phy *phy;
1951 struct mt76_phy *mphy;
1953 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
1957 mt7615_mutex_acquire(phy->dev);
1959 mt7615_update_survey(phy->dev);
1960 if (++mphy->mac_work_count == 5) {
1961 mphy->mac_work_count = 0;
1963 mt7615_mac_update_mib_stats(phy);
1964 mt7615_mac_scs_check(phy);
1967 mt7615_mutex_release(phy->dev);
1969 mt76_tx_status_check(mphy->dev, NULL, false);
1970 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
1971 MT7615_WATCHDOG_TIME);
1974 void mt7615_tx_token_put(struct mt7615_dev *dev)
1976 struct mt76_txwi_cache *txwi;
1979 spin_lock_bh(&dev->mt76.token_lock);
1980 idr_for_each_entry(&dev->mt76.token, txwi, id) {
1981 mt7615_txp_skb_unmap(&dev->mt76, txwi);
1983 struct ieee80211_hw *hw;
1985 hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
1986 ieee80211_free_txskb(hw, txwi->skb);
1988 mt76_put_txwi(&dev->mt76, txwi);
1990 spin_unlock_bh(&dev->mt76.token_lock);
1991 idr_destroy(&dev->mt76.token);
1993 EXPORT_SYMBOL_GPL(mt7615_tx_token_put);
1995 static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
1997 struct mt7615_dev *dev = phy->dev;
1999 if (phy->rdd_state & BIT(0))
2000 mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
2001 if (phy->rdd_state & BIT(1))
2002 mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
2005 static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
2009 err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
2013 return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
2017 static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
2019 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2020 struct mt7615_dev *dev = phy->dev;
2021 bool ext_phy = phy != &dev->phy;
2025 err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
2029 err = mt7615_dfs_start_rdd(dev, ext_phy);
2033 phy->rdd_state |= BIT(ext_phy);
2035 if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2036 chandef->width == NL80211_CHAN_WIDTH_80P80) {
2037 err = mt7615_dfs_start_rdd(dev, 1);
2041 phy->rdd_state |= BIT(1);
2048 mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
2050 const struct mt7615_dfs_radar_spec *radar_specs;
2051 struct mt7615_dev *dev = phy->dev;
2054 switch (dev->mt76.region) {
2055 case NL80211_DFS_FCC:
2056 radar_specs = &fcc_radar_specs;
2057 err = mt7615_mcu_set_fcc5_lpn(dev, 8);
2061 case NL80211_DFS_ETSI:
2062 radar_specs = &etsi_radar_specs;
2064 case NL80211_DFS_JP:
2065 radar_specs = &jp_radar_specs;
2071 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2072 err = mt7615_mcu_set_radar_th(dev, i,
2073 &radar_specs->radar_pattern[i]);
2078 return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2081 int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
2083 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2084 struct mt7615_dev *dev = phy->dev;
2085 bool ext_phy = phy != &dev->phy;
2088 if (is_mt7663(&dev->mt76))
2091 if (dev->mt76.region == NL80211_DFS_UNSET) {
2092 phy->dfs_state = -1;
2099 if (test_bit(MT76_SCANNING, &phy->mt76->state))
2102 if (phy->dfs_state == chandef->chan->dfs_state)
2105 err = mt7615_dfs_init_radar_specs(phy);
2107 phy->dfs_state = -1;
2111 phy->dfs_state = chandef->chan->dfs_state;
2113 if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
2114 if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
2115 return mt7615_dfs_start_radar_detector(phy);
2117 return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
2122 err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0);
2126 mt7615_dfs_stop_radar_detector(phy);
2130 int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy,
2131 struct ieee80211_vif *vif,
2134 struct mt7615_dev *dev = phy->dev;
2135 bool ext_phy = phy != &dev->phy;
2138 if (!mt7615_firmware_offload(dev))
2141 switch (vif->type) {
2142 case NL80211_IFTYPE_MONITOR:
2144 case NL80211_IFTYPE_MESH_POINT:
2145 case NL80211_IFTYPE_ADHOC:
2146 case NL80211_IFTYPE_AP:
2148 phy->n_beacon_vif++;
2150 phy->n_beacon_vif--;
2156 err = mt7615_mcu_set_bss_pm(dev, vif, !phy->n_beacon_vif);
2160 if (phy->n_beacon_vif) {
2161 vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
2162 mt76_clear(dev, MT_WF_RFCR(ext_phy),
2163 MT_WF_RFCR_DROP_OTHER_BEACON);
2165 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
2166 mt76_set(dev, MT_WF_RFCR(ext_phy),
2167 MT_WF_RFCR_DROP_OTHER_BEACON);
2173 void mt7615_coredump_work(struct work_struct *work)
2175 struct mt7615_dev *dev;
2178 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
2179 coredump.work.work);
2181 if (time_is_after_jiffies(dev->coredump.last_activity +
2182 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
2183 queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
2184 MT76_CONNAC_COREDUMP_TIMEOUT);
2188 dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
2192 struct sk_buff *skb;
2194 spin_lock_bh(&dev->mt76.lock);
2195 skb = __skb_dequeue(&dev->coredump.msg_list);
2196 spin_unlock_bh(&dev->mt76.lock);
2201 skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
2202 if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
2207 memcpy(data, skb->data, skb->len);
2212 dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,