#include "mt76.h"
#include "mt76x02_regs.h"
#include "mt76x02_mac.h"
+#include "mt76x02_util.h"
enum mt76x02_cipher_type
mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
return 0;
}
+void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+ u8 nss;
+ s8 txpwr_adj, max_txpwr_adj;
+ u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf;
+
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (wcid)
+ txwi->wcid = wcid->idx;
+ else
+ txwi->wcid = 0xff;
+
+ txwi->pktid = 1;
+
+ if (wcid && wcid->sw_iv && key) {
+ u64 pn = atomic64_inc_return(&key->tx_pn);
+ ccmp_pn[0] = pn;
+ ccmp_pn[1] = pn >> 8;
+ ccmp_pn[2] = 0;
+ ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+ ccmp_pn[4] = pn >> 16;
+ ccmp_pn[5] = pn >> 24;
+ ccmp_pn[6] = pn >> 32;
+ ccmp_pn[7] = pn >> 40;
+ txwi->iv = *((__le32 *)&ccmp_pn[0]);
+ txwi->eiv = *((__le32 *)&ccmp_pn[1]);
+ }
+
+ spin_lock_bh(&dev->lock);
+ if (wcid && (rate->idx < 0 || !rate->count)) {
+ txwi->rate = wcid->tx_rate;
+ max_txpwr_adj = wcid->max_txpwr_adj;
+ nss = wcid->tx_rate_nss;
+ } else {
+ txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
+ max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
+ }
+ spin_unlock_bh(&dev->lock);
+
+ if (dev->drv->get_tx_txpwr_adj) {
+ txpwr_adj = dev->drv->get_tx_txpwr_adj(dev, dev->txpower_conf,
+ max_txpwr_adj);
+ txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+ }
+
+ if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E4)
+ txwi->txstream = 0x13;
+ else if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E3 &&
+ !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+ txwi->txstream = 0x93;
+
+ mt76x02_mac_fill_txwi(txwi, skb, sta, len, nss);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
+
static void
mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
struct ieee80211_tx_info *info,
int
mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate);
void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr);
+void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int len);
#endif
}
EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x02_txwi *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta, int len)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rate = &info->control.rates[0];
- struct ieee80211_key_conf *key = info->control.hw_key;
- u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
- u8 nss;
- s8 txpwr_adj, max_txpwr_adj;
- u8 ccmp_pn[8];
-
- memset(txwi, 0, sizeof(*txwi));
-
- if (wcid)
- txwi->wcid = wcid->idx;
- else
- txwi->wcid = 0xff;
-
- txwi->pktid = 1;
-
- if (wcid && wcid->sw_iv && key) {
- u64 pn = atomic64_inc_return(&key->tx_pn);
- ccmp_pn[0] = pn;
- ccmp_pn[1] = pn >> 8;
- ccmp_pn[2] = 0;
- ccmp_pn[3] = 0x20 | (key->keyidx << 6);
- ccmp_pn[4] = pn >> 16;
- ccmp_pn[5] = pn >> 24;
- ccmp_pn[6] = pn >> 32;
- ccmp_pn[7] = pn >> 40;
- txwi->iv = *((__le32 *)&ccmp_pn[0]);
- txwi->eiv = *((__le32 *)&ccmp_pn[1]);
- }
-
- spin_lock_bh(&dev->mt76.lock);
- if (wcid && (rate->idx < 0 || !rate->count)) {
- txwi->rate = wcid->tx_rate;
- max_txpwr_adj = wcid->max_txpwr_adj;
- nss = wcid->tx_rate_nss;
- } else {
- txwi->rate = mt76x02_mac_tx_rate_val(&dev->mt76, rate, &nss);
- max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(&dev->mt76, rate);
- }
- spin_unlock_bh(&dev->mt76.lock);
-
- txpwr_adj = mt76x2_tx_get_txpwr_adj(&dev->mt76, dev->mt76.txpower_conf,
- max_txpwr_adj);
- txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
-
- if (mt76xx_rev(dev) >= MT76XX_REV_E4)
- txwi->txstream = 0x13;
- else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
- !(txwi->rate & cpu_to_le16(rate_ht_mask)))
- txwi->txstream = 0x93;
-
- mt76x02_mac_fill_txwi(txwi, skb, sta, len, nss);
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi);
-
int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
{
struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
void *rxi);
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x02_txwi *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta, int len);
int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
struct sk_buff *skb);
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
return -ENOSPC;
- mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
+ mt76x02_mac_write_txwi(&dev->mt76, &txwi, skb, NULL, NULL, skb->len);
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
offset += sizeof(txwi);
if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
mt76x02_mac_wcid_set_drop(&dev->mt76, wcid->idx, false);
- mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
+ mt76x02_mac_write_txwi(mdev, txwi, skb, wcid, sta, skb->len);
ret = mt76x02_insert_hdr_pad(skb);
if (ret < 0)
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info)
{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
struct mt76x02_txwi *txwi;
int err, len = skb->len;
mt76x02_insert_hdr_pad(skb);
txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
- mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+ mt76x02_mac_write_txwi(mdev, txwi, skb, wcid, sta, len);
return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
}