1 /* SPDX-License-Identifier: ISC */
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
9 #include <linux/kernel.h>
11 #include <linux/spinlock.h>
12 #include <linux/skbuff.h>
13 #include <linux/leds.h>
14 #include <linux/usb.h>
15 #include <linux/average.h>
16 #include <net/mac80211.h>
20 #ifdef IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US
21 #define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US 0x0
22 #define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US 0x1
23 #define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US 0x2
24 #define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED 0x3
25 #define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK 0xc0
28 #define MT_MCU_RING_SIZE 32
29 #define MT_RX_BUF_SIZE 2048
30 #define MT_SKB_HEAD_LEN 256
32 #define MT_MAX_NON_AQL_PKT 16
33 #define MT_TXQ_FREE_THR 32
35 #define MT76_TOKEN_FREE_THR 64
42 struct mt76_reg_pair {
54 u32 (*rr)(struct mt76_dev *dev, u32 offset);
55 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
56 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
57 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
59 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
61 int (*wr_rp)(struct mt76_dev *dev, u32 base,
62 const struct mt76_reg_pair *rp, int len);
63 int (*rd_rp)(struct mt76_dev *dev, u32 base,
64 struct mt76_reg_pair *rp, int len);
65 enum mt76_bus_type type;
68 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
69 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
70 #define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO)
73 MT_TXQ_VO = IEEE80211_AC_VO,
74 MT_TXQ_VI = IEEE80211_AC_VI,
75 MT_TXQ_BE = IEEE80211_AC_BE,
76 MT_TXQ_BK = IEEE80211_AC_BK,
100 enum mt76_cipher_type {
104 MT_CIPHER_TKIP_NO_MIC,
107 MT_CIPHER_BIP_CMAC_128,
116 enum mt76_dfs_state {
117 MT_DFS_STATE_UNKNOWN,
118 MT_DFS_STATE_DISABLED,
123 struct mt76_queue_buf {
129 struct mt76_tx_info {
130 struct mt76_queue_buf buf[32];
136 struct mt76_queue_entry {
142 struct mt76_txwi_cache *txwi;
154 struct mt76_queue_regs {
159 } __packed __aligned(4);
162 struct mt76_queue_regs __iomem *regs;
165 spinlock_t cleanup_lock;
166 struct mt76_queue_entry *entry;
167 struct mt76_desc *desc;
183 struct sk_buff *rx_head;
184 struct page_frag_cache rx_page;
187 struct mt76_mcu_ops {
191 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
192 int len, bool wait_resp);
193 int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
195 int (*mcu_parse_response)(struct mt76_dev *dev, int cmd,
196 struct sk_buff *skb, int seq);
197 u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset);
198 void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val);
199 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
200 const struct mt76_reg_pair *rp, int len);
201 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
202 struct mt76_reg_pair *rp, int len);
203 int (*mcu_restart)(struct mt76_dev *dev);
206 struct mt76_queue_ops {
207 int (*init)(struct mt76_dev *dev,
208 int (*poll)(struct napi_struct *napi, int budget));
210 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
211 int idx, int n_desc, int bufsize,
214 int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
215 struct sk_buff *skb, struct mt76_wcid *wcid,
216 struct ieee80211_sta *sta);
218 int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q,
219 struct sk_buff *skb, u32 tx_info);
221 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
222 int *len, u32 *info, bool *more);
224 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
226 void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
229 void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
231 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
233 void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
236 enum mt76_wcid_flags {
237 MT_WCID_FLAG_CHECK_PS,
240 MT_WCID_FLAG_HDR_TRANS,
243 #define MT76_N_WCIDS 544
245 /* stored in ieee80211_tx_info::hw_queue */
246 #define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
248 DECLARE_EWMA(signal, 10, 8);
250 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
251 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16)
252 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
253 #define MT_WCID_TX_INFO_SET BIT(31)
256 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
258 atomic_t non_aql_packets;
261 struct ewma_signal rssi;
264 struct rate_info rate;
275 u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6];
281 struct list_head list;
293 struct mt76_txwi_cache {
294 struct list_head list;
301 struct rcu_head rcu_head;
303 struct mt76_dev *dev;
306 struct delayed_work reorder_work;
314 u8 started:1, stopped:1, timer_pending:1;
316 struct sk_buff *reorder_buf[];
319 #define MT_TX_CB_DMA_DONE BIT(0)
320 #define MT_TX_CB_TXS_DONE BIT(1)
321 #define MT_TX_CB_TXS_FAILED BIT(2)
323 #define MT_PACKET_ID_MASK GENMASK(6, 0)
324 #define MT_PACKET_ID_NO_ACK 0
325 #define MT_PACKET_ID_NO_SKB 1
326 #define MT_PACKET_ID_FIRST 2
327 #define MT_PACKET_ID_HAS_RATE BIT(7)
328 /* This is timer for when to give up when waiting for TXS callback,
329 * with starting time being the time at which the DMA_DONE callback
330 * was seen (so, we know packet was processed then, it should not take
331 * long after that for firmware to send the TXS callback if it is going
334 #define MT_TX_STATUS_SKB_TIMEOUT (HZ / 4)
337 unsigned long jiffies;
344 MT76_STATE_INITIALIZED,
346 MT76_STATE_MCU_RUNNING,
349 MT76_HW_SCHED_SCANNING,
355 MT76_STATE_POWER_OFF,
367 #define MT_DRV_TXWI_NO_FREE BIT(0)
368 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
369 #define MT_DRV_SW_RX_AIRTIME BIT(2)
370 #define MT_DRV_RX_DMA_HDR BIT(3)
371 #define MT_DRV_HW_MGMT_TXQ BIT(4)
373 struct mt76_driver_ops {
380 void (*update_survey)(struct mt76_phy *phy);
382 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
383 enum mt76_txq_id qid, struct mt76_wcid *wcid,
384 struct ieee80211_sta *sta,
385 struct mt76_tx_info *tx_info);
387 void (*tx_complete_skb)(struct mt76_dev *dev,
388 struct mt76_queue_entry *e);
390 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
392 bool (*rx_check)(struct mt76_dev *dev, void *data, int len);
394 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
395 struct sk_buff *skb);
397 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
399 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
402 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
403 struct ieee80211_sta *sta);
405 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
406 struct ieee80211_sta *sta);
408 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
409 struct ieee80211_sta *sta);
412 struct mt76_channel_state {
423 struct ieee80211_supported_band sband;
424 struct mt76_channel_state *chan;
427 struct mt76_rate_power {
441 #define MT_VEND_TYPE_EEPROM BIT(31)
442 #define MT_VEND_TYPE_CFG BIT(30)
443 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
445 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
447 MT_VEND_DEV_MODE = 0x1,
449 MT_VEND_POWER_ON = 0x4,
450 MT_VEND_MULTI_WRITE = 0x6,
451 MT_VEND_MULTI_READ = 0x7,
452 MT_VEND_READ_EEPROM = 0x9,
453 MT_VEND_WRITE_FCE = 0x42,
454 MT_VEND_WRITE_CFG = 0x46,
455 MT_VEND_READ_CFG = 0x47,
456 MT_VEND_READ_EXT = 0x63,
457 MT_VEND_WRITE_EXT = 0x66,
458 MT_VEND_FEATURE_SET = 0x91,
468 MT_EP_OUT_INBAND_CMD,
482 struct sk_buff_head res_q;
483 wait_queue_head_t wait;
486 #define MT_TX_SG_MAX_SIZE 8
487 #define MT_RX_SG_MAX_SIZE 4
488 #define MT_NUM_TX_ENTRIES 256
489 #define MT_NUM_RX_ENTRIES 128
490 #define MCU_RESP_URB_SIZE 1024
492 struct mutex usb_ctrl_mtx;
496 struct mt76_worker status_worker;
497 struct mt76_worker rx_worker;
499 struct work_struct stat_work;
501 u8 out_ep[__MT_EP_OUT_MAX];
502 u8 in_ep[__MT_EP_IN_MAX];
508 struct mt76_reg_pair *rp;
515 #define MT76S_XMIT_BUF_SZ 0x3fe00
516 #define MT76S_NUM_TX_ENTRIES 256
517 #define MT76S_NUM_RX_ENTRIES 512
519 struct mt76_worker txrx_worker;
520 struct mt76_worker status_worker;
521 struct mt76_worker net_worker;
523 struct work_struct stat_work;
528 struct sdio_func *func;
531 wait_queue_head_t wait;
541 int (*parse_irq)(struct mt76_dev *dev, struct mt76s_intr *intr);
550 struct mt76_rx_status {
552 struct mt76_wcid *wcid;
571 u8 encoding:2, bw:3, he_ru:3;
572 u8 he_gi:2, he_dcm:1;
573 u8 amsdu:1, first_amsdu:1, last_amsdu:1;
579 s8 chain_signal[IEEE80211_MAX_CHAINS];
582 struct mt76_freq_range_power {
583 const struct cfg80211_sar_freq_ranges *range;
587 struct mt76_testmode_ops {
588 int (*set_state)(struct mt76_phy *phy, enum mt76_testmode_state state);
589 int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
590 enum mt76_testmode_state new_state);
591 int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
594 struct mt76_testmode_data {
595 enum mt76_testmode_state state;
597 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
598 struct sk_buff *tx_skb;
623 u8 addr[3][ETH_ALEN];
630 u64 packets[__MT_RXQ_MAX];
631 u64 fcs_error[__MT_RXQ_MAX];
645 struct ieee80211_hw *hw;
646 struct mt76_dev *dev;
651 struct mt76_queue *q_tx[__MT_TXQ_MAX];
653 struct cfg80211_chan_def chandef;
654 struct ieee80211_channel *main_chan;
656 struct mt76_channel_state *chan_state;
657 enum mt76_dfs_state dfs_state;
660 struct mt76_hw_cap cap;
661 struct mt76_sband sband_2g;
662 struct mt76_sband sband_5g;
663 struct mt76_sband sband_6g;
665 u8 macaddr[ETH_ALEN];
671 #ifdef CONFIG_NL80211_TESTMODE
672 struct mt76_testmode_data test;
675 struct delayed_work mac_work;
679 struct sk_buff *head;
680 struct sk_buff **tail;
682 } rx_amsdu[__MT_RXQ_MAX];
684 struct mt76_freq_range_power *frp;
688 struct mt76_phy phy; /* must be first */
690 struct mt76_phy *phy2;
692 struct ieee80211_hw *hw;
699 struct mt76_rx_status rx_ampdu_status;
705 const struct mt76_bus_ops *bus;
706 const struct mt76_driver_ops *drv;
707 const struct mt76_mcu_ops *mcu_ops;
712 struct net_device napi_dev;
713 struct net_device tx_napi_dev;
715 struct napi_struct napi[__MT_RXQ_MAX];
716 struct sk_buff_head rx_skb[__MT_RXQ_MAX];
718 struct list_head txwi_cache;
719 struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
720 struct mt76_queue q_rx[__MT_RXQ_MAX];
721 const struct mt76_queue_ops *queue_ops;
724 struct mt76_worker tx_worker;
725 struct napi_struct tx_napi;
727 spinlock_t token_lock;
731 wait_queue_head_t tx_wait;
732 /* spinclock used to protect wcid pktid linked list */
733 spinlock_t status_lock;
735 u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
736 u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
740 struct mt76_wcid global_wcid;
741 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
742 struct list_head wcid_list;
748 struct tasklet_struct pre_tbtt_tasklet;
752 struct debugfs_blob_wrapper eeprom;
753 struct debugfs_blob_wrapper otp;
755 struct mt76_rate_power rate_power;
758 enum nl80211_dfs_regions region;
762 struct led_classdev led_cdev;
771 #ifdef CONFIG_NL80211_TESTMODE
772 const struct mt76_testmode_ops *test_ops;
778 struct workqueue_struct *wq;
781 struct mt76_mmio mmio;
783 struct mt76_sdio sdio;
787 struct mt76_power_limits {
800 MT_PHY_TYPE_HE_SU = 8,
801 MT_PHY_TYPE_HE_EXT_SU,
804 __MT_PHY_TYPE_HE_MAX,
807 struct mt76_sta_stats {
808 u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
809 u64 tx_bw[4]; /* 20, 40, 80, 160 */
810 u64 tx_nss[4]; /* 1, 2, 3, 4 */
811 u64 tx_mcs[16]; /* mcs idx */
814 struct mt76_ethtool_worker_info {
817 int initial_stat_idx;
818 int worker_stat_count;
822 #define CCK_RATE(_idx, _rate) { \
824 .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
825 .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
826 .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \
829 #define OFDM_RATE(_idx, _rate) { \
831 .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
832 .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
835 extern struct ieee80211_rate mt76_rates[12];
837 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
838 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
839 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
840 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__)
841 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__)
843 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val)
844 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0)
846 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
847 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
848 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
849 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
850 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
851 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
852 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
855 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
856 #define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
858 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
859 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
861 #define mt76_get_field(_dev, _reg, _field) \
862 FIELD_GET(_field, mt76_rr(dev, _reg))
864 #define mt76_rmw_field(_dev, _reg, _field, _val) \
865 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
867 #define __mt76_rmw_field(_dev, _reg, _field, _val) \
868 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
870 #define mt76_hw(dev) (dev)->mphy.hw
872 static inline struct ieee80211_hw *
873 mt76_wcid_hw(struct mt76_dev *dev, u16 wcid)
875 if (wcid <= MT76_N_WCIDS &&
876 mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
877 return dev->phy2->hw;
882 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
885 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
887 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
890 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
892 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
893 void mt76_pci_disable_aspm(struct pci_dev *pdev);
895 static inline u16 mt76_chip(struct mt76_dev *dev)
897 return dev->rev >> 16;
900 static inline u16 mt76_rev(struct mt76_dev *dev)
902 return dev->rev & 0xffff;
905 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
906 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
908 #define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__)
909 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
910 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
911 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
912 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
913 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
914 #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
915 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
916 #define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
918 #define mt76_for_each_q_rx(dev, i) \
919 for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \
920 if ((dev)->q_rx[i].ndesc)
922 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
923 const struct ieee80211_ops *ops,
924 const struct mt76_driver_ops *drv_ops);
925 int mt76_register_device(struct mt76_dev *dev, bool vht,
926 struct ieee80211_rate *rates, int n_rates);
927 void mt76_unregister_device(struct mt76_dev *dev);
928 void mt76_free_device(struct mt76_dev *dev);
929 void mt76_unregister_phy(struct mt76_phy *phy);
931 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
932 const struct ieee80211_ops *ops);
933 int mt76_register_phy(struct mt76_phy *phy, bool vht,
934 struct ieee80211_rate *rates, int n_rates);
936 struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy,
937 const struct file_operations *ops);
938 static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
940 return mt76_register_debugfs_fops(&dev->phy, NULL);
943 int mt76_queues_read(struct seq_file *s, void *data);
944 void mt76_seq_puts_array(struct seq_file *file, const char *str,
947 int mt76_eeprom_init(struct mt76_dev *dev, int len);
948 void mt76_eeprom_override(struct mt76_phy *phy);
949 int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
952 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
954 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
955 static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
956 int n_desc, int ring_base)
958 struct mt76_queue *q;
960 q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base);
970 static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
971 int n_desc, int ring_base)
973 struct mt76_queue *q;
975 q = mt76_init_queue(dev, qid, idx, n_desc, ring_base);
979 q->qid = __MT_TXQ_MAX + qid;
985 static inline struct mt76_phy *
986 mt76_dev_phy(struct mt76_dev *dev, bool phy_ext)
988 if (phy_ext && dev->phy2)
993 static inline struct ieee80211_hw *
994 mt76_phy_hw(struct mt76_dev *dev, bool phy_ext)
996 return mt76_dev_phy(dev, phy_ext)->hw;
1000 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
1002 return (u8 *)t - dev->drv->txwi_size;
1005 /* increment with wrap-around */
1006 static inline int mt76_incr(int val, int size)
1008 return (val + 1) & (size - 1);
1011 /* decrement with wrap-around */
1012 static inline int mt76_decr(int val, int size)
1014 return (val - 1) & (size - 1);
1017 u8 mt76_ac_to_hwq(u8 ac);
1019 static inline struct ieee80211_txq *
1020 mtxq_to_txq(struct mt76_txq *mtxq)
1024 return container_of(ptr, struct ieee80211_txq, drv_priv);
1027 static inline struct ieee80211_sta *
1028 wcid_to_sta(struct mt76_wcid *wcid)
1032 if (!wcid || !wcid->sta)
1035 return container_of(ptr, struct ieee80211_sta, drv_priv);
1038 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
1040 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
1041 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
1042 return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
1045 static inline void *mt76_skb_get_hdr(struct sk_buff *skb)
1047 struct mt76_rx_status mstat;
1048 u8 *data = skb->data;
1050 /* Alignment concerns */
1051 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4);
1052 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4);
1054 mstat = *((struct mt76_rx_status *)skb->cb);
1056 if (mstat.flag & RX_FLAG_RADIOTAP_HE)
1057 data += sizeof(struct ieee80211_radiotap_he);
1058 if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU)
1059 data += sizeof(struct ieee80211_radiotap_he_mu);
1064 static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
1066 int len = ieee80211_get_hdrlen_from_skb(skb);
1072 memmove(skb->data, skb->data + 2, len);
1075 skb->data[len + 1] = 0;
1078 static inline bool mt76_is_skb_pktid(u8 pktid)
1080 if (pktid & MT_PACKET_ID_HAS_RATE)
1083 return pktid >= MT_PACKET_ID_FIRST;
1086 static inline u8 mt76_tx_power_nss_delta(u8 nss)
1088 static const u8 nss_delta[4] = { 0, 6, 9, 12 };
1090 return nss_delta[nss - 1];
1093 static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
1095 #ifdef CONFIG_NL80211_TESTMODE
1096 return phy->test.state != MT76_TM_STATE_OFF;
1102 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
1103 struct sk_buff *skb,
1104 struct ieee80211_hw **hw)
1106 #ifdef CONFIG_NL80211_TESTMODE
1107 if (skb == dev->phy.test.tx_skb)
1109 else if (dev->phy2 && skb == dev->phy2->test.tx_skb)
1110 *hw = dev->phy2->hw;
1119 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
1120 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
1121 struct mt76_wcid *wcid, struct sk_buff *skb);
1122 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
1123 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
1125 void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
1126 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
1127 void mt76_txq_schedule_all(struct mt76_phy *phy);
1128 void mt76_tx_worker_run(struct mt76_dev *dev);
1129 void mt76_tx_worker(struct mt76_worker *w);
1130 void mt76_release_buffered_frames(struct ieee80211_hw *hw,
1131 struct ieee80211_sta *sta,
1132 u16 tids, int nframes,
1133 enum ieee80211_frame_release_type reason,
1135 bool mt76_has_tx_pending(struct mt76_phy *phy);
1136 void mt76_set_channel(struct mt76_phy *phy);
1137 void mt76_update_survey(struct mt76_phy *phy);
1138 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
1139 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1140 struct survey_info *survey);
1141 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);
1143 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
1145 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
1147 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1148 struct ieee80211_key_conf *key);
1150 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
1151 __acquires(&dev->status_lock);
1152 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
1153 __releases(&dev->status_lock);
1155 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
1156 struct sk_buff *skb);
1157 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
1158 struct mt76_wcid *wcid, int pktid,
1159 struct sk_buff_head *list);
1160 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
1161 struct sk_buff_head *list);
1162 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
1163 struct list_head *free_list);
1165 mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb)
1167 __mt76_tx_complete_skb(dev, wcid, skb, NULL);
1170 void mt76_tx_status_check(struct mt76_dev *dev, bool flush);
1171 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1172 struct ieee80211_sta *sta,
1173 enum ieee80211_sta_state old_state,
1174 enum ieee80211_sta_state new_state);
1175 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1176 struct ieee80211_sta *sta);
1177 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1178 struct ieee80211_sta *sta);
1180 int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
1182 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1184 int mt76_init_sar_power(struct ieee80211_hw *hw,
1185 const struct cfg80211_sar_specs *sar);
1186 int mt76_get_sar_power(struct mt76_phy *phy,
1187 struct ieee80211_channel *chan,
1190 void mt76_csa_check(struct mt76_dev *dev);
1191 void mt76_csa_finish(struct mt76_dev *dev);
1193 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
1194 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
1195 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
1196 int mt76_get_rate(struct mt76_dev *dev,
1197 struct ieee80211_supported_band *sband,
1199 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1201 void mt76_sw_scan_complete(struct ieee80211_hw *hw,
1202 struct ieee80211_vif *vif);
1203 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy);
1204 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1205 void *data, int len);
1206 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
1207 struct netlink_callback *cb, void *data, int len);
1208 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
1209 int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
1211 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
1213 #ifdef CONFIG_NL80211_TESTMODE
1214 enum mt76_testmode_state state = MT76_TM_STATE_IDLE;
1216 if (disable || phy->test.state == MT76_TM_STATE_OFF)
1217 state = MT76_TM_STATE_OFF;
1219 mt76_testmode_set_state(phy, state);
1225 static inline struct ieee80211_hw *
1226 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
1228 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1229 struct ieee80211_hw *hw = dev->phy.hw;
1231 if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2)
1234 info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY;
1239 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
1240 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1241 struct napi_struct *napi);
1242 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1243 struct napi_struct *napi);
1244 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
1245 void mt76_testmode_tx_pending(struct mt76_phy *phy);
1246 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
1247 struct mt76_queue_entry *e);
1250 static inline bool mt76u_urb_error(struct urb *urb)
1252 return urb->status &&
1253 urb->status != -ECONNRESET &&
1254 urb->status != -ESHUTDOWN &&
1255 urb->status != -ENOENT;
1258 /* Map hardware queues to usb endpoints */
1259 static inline u8 q2ep(u8 qid)
1261 /* TODO: take management packets to queue 5 */
1266 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
1267 int timeout, int ep)
1269 struct usb_interface *uintf = to_usb_interface(dev->dev);
1270 struct usb_device *udev = interface_to_usbdev(uintf);
1271 struct mt76_usb *usb = &dev->usb;
1275 pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]);
1277 pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]);
1279 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
1282 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1283 struct mt76_sta_stats *stats);
1284 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
1285 int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
1286 u16 val, u16 offset, void *buf, size_t len);
1287 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
1288 u8 req_type, u16 val, u16 offset,
1289 void *buf, size_t len);
1290 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
1291 const u16 offset, const u32 val);
1292 void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
1293 void *data, int len);
1294 u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr);
1295 void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
1297 int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
1298 struct mt76_bus_ops *ops);
1299 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
1300 int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
1301 int mt76u_alloc_queues(struct mt76_dev *dev);
1302 void mt76u_stop_tx(struct mt76_dev *dev);
1303 void mt76u_stop_rx(struct mt76_dev *dev);
1304 int mt76u_resume_rx(struct mt76_dev *dev);
1305 void mt76u_queues_deinit(struct mt76_dev *dev);
1307 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
1308 const struct mt76_bus_ops *bus_ops);
1309 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid);
1310 int mt76s_alloc_tx(struct mt76_dev *dev);
1311 void mt76s_deinit(struct mt76_dev *dev);
1312 void mt76s_sdio_irq(struct sdio_func *func);
1313 void mt76s_txrx_worker(struct mt76_sdio *sdio);
1314 bool mt76s_txqs_empty(struct mt76_dev *dev);
1315 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func,
1317 u32 mt76s_rr(struct mt76_dev *dev, u32 offset);
1318 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val);
1319 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
1320 u32 mt76s_read_pcr(struct mt76_dev *dev);
1321 void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
1322 const void *data, int len);
1323 void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
1324 void *data, int len);
1325 int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
1326 const struct mt76_reg_pair *data,
1328 int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
1329 struct mt76_reg_pair *data, int len);
1332 mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
1334 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
1335 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
1336 unsigned long expires);
1337 int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data,
1338 int len, bool wait_resp, struct sk_buff **ret);
1339 int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
1340 int cmd, bool wait_resp, struct sk_buff **ret);
1341 int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
1342 int len, int max_len);
1344 mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
1347 int max_len = 4096 - dev->mcu_ops->headroom;
1349 return __mt76_mcu_send_firmware(dev, cmd, data, len, max_len);
1353 mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len,
1356 return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL);
1360 mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd,
1363 return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL);
1366 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
1368 s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
1369 struct ieee80211_channel *chan,
1370 struct mt76_power_limits *dest,
1373 struct mt76_txwi_cache *
1374 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
1375 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
1376 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
1378 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
1380 spin_lock_bh(&dev->token_lock);
1381 __mt76_set_tx_blocked(dev, blocked);
1382 spin_unlock_bh(&dev->token_lock);
1386 mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
1390 spin_lock_bh(&dev->token_lock);
1391 token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
1393 spin_unlock_bh(&dev->token_lock);
1398 static inline struct mt76_txwi_cache *
1399 mt76_token_put(struct mt76_dev *dev, int token)
1401 struct mt76_txwi_cache *txwi;
1403 spin_lock_bh(&dev->token_lock);
1404 txwi = idr_remove(&dev->token, token);
1405 spin_unlock_bh(&dev->token_lock);
1410 static inline void mt76_packet_id_init(struct mt76_wcid *wcid)
1412 INIT_LIST_HEAD(&wcid->list);
1413 idr_init(&wcid->pktid);
1417 mt76_packet_id_flush(struct mt76_dev *dev, struct mt76_wcid *wcid)
1419 struct sk_buff_head list;
1421 mt76_tx_status_lock(dev, &list);
1422 mt76_tx_status_skb_get(dev, wcid, -1, &list);
1423 mt76_tx_status_unlock(dev, &list);
1425 idr_destroy(&wcid->pktid);