1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #include <linux/ieee80211.h>
8 #include <linux/etherdevice.h>
13 #include "iwl-trans.h"
14 #include "iwl-eeprom-parse.h"
17 #include "time-sync.h"
20 iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
23 struct iwl_fw_dbg_trigger_tlv *trig;
24 struct iwl_fw_dbg_trigger_ba *ba_trig;
26 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
30 ba_trig = (void *)trig->data;
32 if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
35 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
36 "BAR sent to %pM, tid %d, ssn %d",
40 #define OPT_HDR(type, skb, off) \
41 (type *)(skb_network_header(skb) + (off))
43 static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
44 struct ieee80211_tx_info *info,
47 struct ieee80211_hdr *hdr = (void *)skb->data;
48 u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
49 u16 offload_assist = 0;
50 #if IS_ENABLED(CONFIG_INET)
53 /* Do not compute checksum if already computed */
54 if (skb->ip_summed != CHECKSUM_PARTIAL)
57 /* We do not expect to be requested to csum stuff we do not support */
58 if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
59 (skb->protocol != htons(ETH_P_IP) &&
60 skb->protocol != htons(ETH_P_IPV6)),
61 "No support for requested checksum\n")) {
62 skb_checksum_help(skb);
66 if (skb->protocol == htons(ETH_P_IP)) {
67 protocol = ip_hdr(skb)->protocol;
69 #if IS_ENABLED(CONFIG_IPV6)
70 struct ipv6hdr *ipv6h =
71 (struct ipv6hdr *)skb_network_header(skb);
72 unsigned int off = sizeof(*ipv6h);
74 protocol = ipv6h->nexthdr;
75 while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
76 struct ipv6_opt_hdr *hp;
78 /* only supported extension headers */
79 if (protocol != NEXTHDR_ROUTING &&
80 protocol != NEXTHDR_HOP &&
81 protocol != NEXTHDR_DEST) {
82 skb_checksum_help(skb);
86 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
87 protocol = hp->nexthdr;
88 off += ipv6_optlen(hp);
90 /* if we get here - protocol now should be TCP/UDP */
94 if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
96 skb_checksum_help(skb);
101 offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
104 * Set offset to IP header (snap).
105 * We don't support tunneling so no need to take care of inner header.
108 offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
110 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
111 if (skb->protocol == htons(ETH_P_IP) && amsdu) {
112 ip_hdr(skb)->check = 0;
113 offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
116 /* reset UDP/TCP header csum */
117 if (protocol == IPPROTO_TCP)
118 tcp_hdr(skb)->check = 0;
120 udp_hdr(skb)->check = 0;
125 * mac header len should include IV, size is in words unless
126 * the IV is added by the firmware like in WEP.
127 * In new Tx API, the IV is always added by the firmware.
129 if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
130 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
131 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
132 mh_len += info->control.hw_key->iv_len;
134 offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
137 offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
138 else if (ieee80211_hdrlen(hdr->frame_control) % 4)
139 /* padding is inserted later in transport */
140 offload_assist |= BIT(TX_CMD_OFFLD_PAD);
142 return offload_assist;
146 * Sets most of the Tx cmd's fields
148 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
149 struct iwl_tx_cmd *tx_cmd,
150 struct ieee80211_tx_info *info, u8 sta_id)
152 struct ieee80211_hdr *hdr = (void *)skb->data;
153 __le16 fc = hdr->frame_control;
154 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
155 u32 len = skb->len + FCS_LEN;
159 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) ||
160 (ieee80211_is_probe_resp(fc) &&
161 !is_multicast_ether_addr(hdr->addr1)))
162 tx_flags |= TX_CMD_FLG_ACK;
164 tx_flags &= ~TX_CMD_FLG_ACK;
166 if (ieee80211_is_probe_resp(fc))
167 tx_flags |= TX_CMD_FLG_TSF;
169 if (ieee80211_has_morefrags(fc))
170 tx_flags |= TX_CMD_FLG_MORE_FRAG;
172 if (ieee80211_is_data_qos(fc)) {
173 u8 *qc = ieee80211_get_qos_ctl(hdr);
174 tx_cmd->tid_tspec = qc[0] & 0xf;
175 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
176 amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
177 } else if (ieee80211_is_back_req(fc)) {
178 struct ieee80211_bar *bar = (void *)skb->data;
179 u16 control = le16_to_cpu(bar->control);
180 u16 ssn = le16_to_cpu(bar->start_seq_num);
182 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
183 tx_cmd->tid_tspec = (control &
184 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
185 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
186 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
187 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
190 if (ieee80211_is_data(fc))
191 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
193 tx_cmd->tid_tspec = IWL_MAX_TID_COUNT;
195 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
196 tx_flags |= TX_CMD_FLG_SEQ_CTL;
198 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
201 /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */
202 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
203 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
205 ac = tid_to_mac80211_ac[0];
207 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
208 TX_CMD_FLG_BT_PRIO_POS;
210 if (ieee80211_is_mgmt(fc)) {
211 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
212 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
213 else if (ieee80211_is_action(fc))
214 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
216 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
218 /* The spec allows Action frames in A-MPDU, we don't support
221 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
222 } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
223 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
225 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
228 if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
229 !is_multicast_ether_addr(hdr->addr1))
230 tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
232 if (fw_has_capa(&mvm->fw->ucode_capa,
233 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
234 ieee80211_action_contains_tpc(skb))
235 tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
237 tx_cmd->tx_flags = cpu_to_le32(tx_flags);
238 /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
239 tx_cmd->len = cpu_to_le16((u16)skb->len);
240 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
241 tx_cmd->sta_id = sta_id;
243 tx_cmd->offload_assist =
244 cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, info, amsdu));
247 static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
248 struct ieee80211_tx_info *info,
249 struct ieee80211_sta *sta, __le16 fc)
251 if (info->band == NL80211_BAND_2GHZ &&
252 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
253 return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
255 if (sta && ieee80211_is_data(fc)) {
256 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
258 return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS;
261 return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
264 static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm,
265 struct ieee80211_tx_info *info)
267 struct ieee80211_tx_rate *rate = &info->control.rates[0];
271 * we only care about legacy/HT/VHT so far, so we can
272 * build in v1 and use iwl_new_rate_from_v1()
275 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
276 u8 mcs = ieee80211_rate_get_vht_mcs(rate);
277 u8 nss = ieee80211_rate_get_vht_nss(rate);
279 result = RATE_MCS_VHT_MSK_V1;
280 result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
281 result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
282 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
283 result |= RATE_MCS_SGI_MSK_V1;
284 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
285 result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
286 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
287 result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
288 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
289 result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
290 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
291 result = RATE_MCS_HT_MSK_V1;
292 result |= u32_encode_bits(rate->idx,
293 RATE_HT_MCS_RATE_CODE_MSK_V1 |
294 RATE_HT_MCS_NSS_MSK_V1);
295 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
296 result |= RATE_MCS_SGI_MSK_V1;
297 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
298 result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
299 if (info->flags & IEEE80211_TX_CTL_LDPC)
300 result |= RATE_MCS_LDPC_MSK_V1;
301 if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
302 result |= RATE_MCS_STBC_MSK;
307 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
308 return iwl_new_rate_from_v1(result);
312 static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
313 struct ieee80211_tx_info *info,
314 struct ieee80211_sta *sta, __le16 fc)
321 if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
322 u32 result = iwl_mvm_get_inject_tx_rate(mvm, info);
326 rate_idx = info->control.rates[0].idx;
327 } else if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
328 /* info->control is only relevant for non HW rate control */
330 /* HT rate doesn't make sense for a non data frame */
331 WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
332 !ieee80211_is_data(fc),
333 "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
334 info->control.rates[0].flags,
335 info->control.rates[0].idx,
337 sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1);
339 rate_idx = info->control.rates[0].idx;
341 /* For non 2 GHZ band, remap mac80211 rate indices into driver
344 if (info->band != NL80211_BAND_2GHZ ||
345 (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
346 rate_idx += IWL_FIRST_OFDM_RATE;
348 /* For 2.4 GHZ band, check that there is no need to remap */
349 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
352 /* if the rate isn't a well known legacy rate, take the lowest one */
353 if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
354 rate_idx = iwl_mvm_mac_ctxt_get_lowest_rate(mvm,
358 /* Get PLCP rate for tx_cmd->rate_n_flags */
359 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx);
360 is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE);
362 /* Set CCK or OFDM flag */
363 if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
365 rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
367 rate_flags |= RATE_MCS_CCK_MSK;
369 rate_flags |= RATE_MCS_CCK_MSK_V1;
372 return (u32)rate_plcp | rate_flags;
375 static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
376 struct ieee80211_tx_info *info,
377 struct ieee80211_sta *sta, __le16 fc)
379 return iwl_mvm_get_tx_rate(mvm, info, sta, fc) |
380 iwl_mvm_get_tx_ant(mvm, info, sta, fc);
384 * Sets the fields in the Tx cmd that are rate related
386 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
387 struct ieee80211_tx_info *info,
388 struct ieee80211_sta *sta, __le16 fc)
390 /* Set retry limit on RTS packets */
391 tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
393 /* Set retry limit on DATA packets and Probe Responses*/
394 if (ieee80211_is_probe_resp(fc)) {
395 tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
396 tx_cmd->rts_retry_limit =
397 min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
398 } else if (ieee80211_is_back_req(fc)) {
399 tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
401 tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
405 * for data packets, rate info comes from the table inside the fw. This
406 * table is controlled by LINK_QUALITY commands
409 if (likely(ieee80211_is_data(fc) && sta &&
410 !(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))) {
411 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
413 if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
414 tx_cmd->initial_rate_index = 0;
415 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
418 } else if (ieee80211_is_back_req(fc)) {
420 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
423 /* Set the rate in the TX cmd */
424 tx_cmd->rate_n_flags =
425 cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc));
428 static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
431 struct ieee80211_key_conf *keyconf = info->control.hw_key;
434 pn = atomic64_inc_return(&keyconf->tx_pn);
437 crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
438 crypto_hdr[1] = pn >> 8;
439 crypto_hdr[4] = pn >> 16;
440 crypto_hdr[5] = pn >> 24;
441 crypto_hdr[6] = pn >> 32;
442 crypto_hdr[7] = pn >> 40;
446 * Sets the fields in the Tx cmd that are crypto related
448 static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
449 struct ieee80211_tx_info *info,
450 struct iwl_tx_cmd *tx_cmd,
451 struct sk_buff *skb_frag,
454 struct ieee80211_key_conf *keyconf = info->control.hw_key;
455 u8 *crypto_hdr = skb_frag->data + hdrlen;
456 enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
459 switch (keyconf->cipher) {
460 case WLAN_CIPHER_SUITE_CCMP:
461 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
462 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
465 case WLAN_CIPHER_SUITE_TKIP:
466 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
467 pn = atomic64_inc_return(&keyconf->tx_pn);
468 ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
469 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
472 case WLAN_CIPHER_SUITE_WEP104:
473 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
475 case WLAN_CIPHER_SUITE_WEP40:
476 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
477 ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
478 TX_CMD_SEC_WEP_KEY_IDX_MSK);
480 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
482 case WLAN_CIPHER_SUITE_GCMP:
483 case WLAN_CIPHER_SUITE_GCMP_256:
484 type = TX_CMD_SEC_GCMP;
486 case WLAN_CIPHER_SUITE_CCMP_256:
487 /* TODO: Taking the key from the table might introduce a race
488 * when PTK rekeying is done, having an old packets with a PN
489 * based on the old key but the message encrypted with a new
491 * Need to handle this.
493 tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
494 tx_cmd->key[0] = keyconf->hw_key_idx;
495 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
498 tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
503 * Allocates and sets the Tx cmd the driver data pointers in the skb
505 static struct iwl_device_tx_cmd *
506 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
507 struct ieee80211_tx_info *info, int hdrlen,
508 struct ieee80211_sta *sta, u8 sta_id)
510 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
511 struct iwl_device_tx_cmd *dev_cmd;
512 struct iwl_tx_cmd *tx_cmd;
514 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
516 if (unlikely(!dev_cmd))
519 dev_cmd->hdr.cmd = TX_CMD;
521 if (iwl_mvm_has_new_tx_api(mvm)) {
522 u32 rate_n_flags = 0;
524 struct iwl_mvm_sta *mvmsta = sta ?
525 iwl_mvm_sta_from_mac80211(sta) : NULL;
528 if (ieee80211_is_data_qos(hdr->frame_control)) {
529 u8 *qc = ieee80211_get_qos_ctl(hdr);
531 amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
534 if (!info->control.hw_key)
535 flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
538 * For data packets rate info comes from the fw. Only
539 * set rate/antenna during connection establishment or in case
540 * no station is given.
542 if (!sta || !ieee80211_is_data(hdr->frame_control) ||
543 mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
544 flags |= IWL_TX_FLAGS_CMD_RATE;
546 iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
550 if (mvm->trans->trans_cfg->device_family >=
551 IWL_DEVICE_FAMILY_AX210) {
552 struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
553 u32 offload_assist = iwl_mvm_tx_csum(mvm, skb,
556 cmd->offload_assist = cpu_to_le32(offload_assist);
558 /* Total # bytes to be transmitted */
559 cmd->len = cpu_to_le16((u16)skb->len);
561 /* Copy MAC header from skb into command buffer */
562 memcpy(cmd->hdr, hdr, hdrlen);
564 cmd->flags = cpu_to_le16(flags);
565 cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
567 struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
568 u16 offload_assist = iwl_mvm_tx_csum(mvm, skb,
571 cmd->offload_assist = cpu_to_le16(offload_assist);
573 /* Total # bytes to be transmitted */
574 cmd->len = cpu_to_le16((u16)skb->len);
576 /* Copy MAC header from skb into command buffer */
577 memcpy(cmd->hdr, hdr, hdrlen);
579 cmd->flags = cpu_to_le32(flags);
580 cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
585 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
587 if (info->control.hw_key)
588 iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
590 iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
592 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
594 /* Copy MAC header from skb into command buffer */
595 memcpy(tx_cmd->hdr, hdr, hdrlen);
601 static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
602 struct iwl_device_tx_cmd *cmd)
604 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
606 memset(&skb_info->status, 0, sizeof(skb_info->status));
607 memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
609 skb_info->driver_data[1] = cmd;
612 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
613 struct iwl_mvm_vif_link_info *link,
614 struct ieee80211_tx_info *info,
617 struct ieee80211_hdr *hdr = (void *)skb->data;
618 __le16 fc = hdr->frame_control;
620 switch (info->control.vif->type) {
621 case NL80211_IFTYPE_AP:
622 case NL80211_IFTYPE_ADHOC:
624 * Non-bufferable frames use the broadcast station, thus they
625 * use the probe queue.
626 * Also take care of the case where we send a deauth to a
627 * station that we don't have, or similarly an association
628 * response (with non-success status) for a station we can't
630 * Also, disassociate frames might happen, particular with
631 * reason 7 ("Class 3 frame received from nonassociated STA").
633 if (ieee80211_is_mgmt(fc) &&
634 (!ieee80211_is_bufferable_mmpdu(skb) ||
635 ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
636 return link->mgmt_queue;
638 if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
639 is_multicast_ether_addr(hdr->addr1))
640 return link->cab_queue;
642 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
643 "fc=0x%02x", le16_to_cpu(fc));
644 return link->mgmt_queue;
645 case NL80211_IFTYPE_P2P_DEVICE:
646 if (ieee80211_is_mgmt(fc))
647 return mvm->p2p_dev_queue;
650 return mvm->p2p_dev_queue;
652 WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
657 static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
660 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
661 struct iwl_mvm_vif *mvmvif =
662 iwl_mvm_vif_from_mac80211(info->control.vif);
663 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
664 int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
665 struct iwl_probe_resp_data *resp_data;
669 (WLAN_OUI_WFA >> 16) & 0xff,
670 (WLAN_OUI_WFA >> 8) & 0xff,
672 WLAN_OUI_TYPE_WFA_P2P,
677 resp_data = rcu_dereference(mvmvif->deflink.probe_resp_data);
681 if (!resp_data->notif.noa_active)
684 ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
685 mgmt->u.probe_resp.variable,
689 IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
693 if (skb_tailroom(skb) < resp_data->noa_len) {
694 if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
696 "Failed to reallocate probe resp\n");
701 pos = skb_put(skb, resp_data->noa_len);
703 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
704 /* Set length of IE body (not including ID and length itself) */
705 *pos++ = resp_data->noa_len - 2;
706 *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
707 *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
708 *pos++ = WLAN_OUI_WFA & 0xff;
709 *pos++ = WLAN_OUI_TYPE_WFA_P2P;
711 memcpy(pos, &resp_data->notif.noa_attr,
712 resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
718 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
720 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
721 struct ieee80211_tx_info info;
722 struct iwl_device_tx_cmd *dev_cmd;
724 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
725 __le16 fc = hdr->frame_control;
726 bool offchannel = IEEE80211_SKB_CB(skb)->flags &
727 IEEE80211_TX_CTL_TX_OFFCHAN;
730 if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
733 memcpy(&info, skb->cb, sizeof(info));
735 if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
738 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
741 if (info.control.vif) {
742 struct iwl_mvm_vif *mvmvif =
743 iwl_mvm_vif_from_mac80211(info.control.vif);
745 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
746 info.control.vif->type == NL80211_IFTYPE_AP ||
747 info.control.vif->type == NL80211_IFTYPE_ADHOC) {
748 u32 link_id = u32_get_bits(info.control.flags,
749 IEEE80211_TX_CTRL_MLO_LINK);
750 struct iwl_mvm_vif_link_info *link;
752 if (link_id == IEEE80211_LINK_UNSPECIFIED) {
753 if (info.control.vif->active_links)
754 link_id = ffs(info.control.vif->active_links) - 1;
759 link = mvmvif->link[link_id];
763 if (!ieee80211_is_data(hdr->frame_control))
764 sta_id = link->bcast_sta.sta_id;
766 sta_id = link->mcast_sta.sta_id;
768 queue = iwl_mvm_get_ctrl_vif_queue(mvm, link, &info,
770 } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
771 queue = mvm->snif_queue;
772 sta_id = mvm->snif_sta.sta_id;
773 } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
776 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
777 * that can be used in 2 different types of vifs, P2P &
779 * P2P uses the offchannel queue.
780 * STATION (HS2.0) uses the auxiliary context of the FW,
781 * and hence needs to be sent on the aux queue.
783 sta_id = mvm->aux_sta.sta_id;
784 queue = mvm->aux_queue;
789 IWL_ERR(mvm, "No queue was found. Dropping TX\n");
793 if (unlikely(ieee80211_is_probe_resp(fc)))
794 iwl_mvm_probe_resp_set_noa(mvm, skb);
796 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
798 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
802 /* From now on, we cannot access info->control */
803 iwl_mvm_skb_prepare_status(skb, dev_cmd);
805 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
806 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
813 unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
814 struct ieee80211_sta *sta, unsigned int tid)
816 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
817 u8 ac = tid_to_mac80211_ac[tid];
818 enum nl80211_band band;
823 /* For HE redirect to trigger based fifos */
824 if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
827 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
830 * Don't send an AMSDU that will be longer than the TXF.
831 * Add a security margin of 256 for the TX command + headers.
832 * We also want to have the start of the next packet inside the
833 * fifo to be able to send bursts.
835 val = mvmsta->max_amsdu_len;
837 if (hweight16(sta->valid_links) <= 1) {
838 if (sta->valid_links) {
839 struct ieee80211_bss_conf *link_conf;
840 unsigned int link = ffs(sta->valid_links) - 1;
843 link_conf = rcu_dereference(mvmsta->vif->link_conf[link]);
844 if (WARN_ON(!link_conf))
845 band = NL80211_BAND_2GHZ;
847 band = link_conf->chandef.chan->band;
850 band = mvmsta->vif->bss_conf.chandef.chan->band;
853 lmac = iwl_mvm_get_lmac_id(mvm, band);
854 } else if (fw_has_capa(&mvm->fw->ucode_capa,
855 IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) {
856 /* for real MLO restrict to both LMACs if they exist */
857 lmac = IWL_LMAC_5G_INDEX;
858 val = min_t(unsigned int, val,
859 mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
860 lmac = IWL_LMAC_24G_INDEX;
862 lmac = IWL_LMAC_24G_INDEX;
865 return min_t(unsigned int, val,
866 mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
872 iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
873 netdev_features_t netdev_flags,
874 struct sk_buff_head *mpdus_skb)
876 struct sk_buff *tmp, *next;
877 struct ieee80211_hdr *hdr = (void *)skb->data;
878 char cb[sizeof(skb->cb)];
880 unsigned int tcp_payload_len;
881 unsigned int mss = skb_shinfo(skb)->gso_size;
882 bool ipv4 = (skb->protocol == htons(ETH_P_IP));
883 bool qos = ieee80211_is_data_qos(hdr->frame_control);
884 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
886 skb_shinfo(skb)->gso_size = num_subframes * mss;
887 memcpy(cb, skb->cb, sizeof(cb));
889 next = skb_gso_segment(skb, netdev_flags);
890 skb_shinfo(skb)->gso_size = mss;
891 skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
892 if (WARN_ON_ONCE(IS_ERR(next)))
897 skb_list_walk_safe(next, tmp, next) {
898 memcpy(tmp->cb, cb, sizeof(tmp->cb));
900 * Compute the length of all the data added for the A-MSDU.
901 * This will be used to compute the length to write in the TX
902 * command. We have: SNAP + IP + TCP for n -1 subframes and
903 * ETH header for n subframes.
905 tcp_payload_len = skb_tail_pointer(tmp) -
906 skb_transport_header(tmp) -
907 tcp_hdrlen(tmp) + tmp->data_len;
910 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
912 if (tcp_payload_len > mss) {
913 skb_shinfo(tmp)->gso_size = mss;
914 skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
921 ip_send_check(ip_hdr(tmp));
923 qc = ieee80211_get_qos_ctl((void *)tmp->data);
924 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
926 skb_shinfo(tmp)->gso_size = 0;
929 skb_mark_not_on_list(tmp);
930 __skb_queue_tail(mpdus_skb, tmp);
937 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
938 struct ieee80211_tx_info *info,
939 struct ieee80211_sta *sta,
940 struct sk_buff_head *mpdus_skb)
942 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
943 struct ieee80211_hdr *hdr = (void *)skb->data;
944 unsigned int mss = skb_shinfo(skb)->gso_size;
945 unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
946 u16 snap_ip_tcp, pad;
947 netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
950 snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
953 if (!mvmsta->max_amsdu_len ||
954 !ieee80211_is_data_qos(hdr->frame_control) ||
955 !mvmsta->amsdu_enabled)
956 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
959 * Do not build AMSDU for IPv6 with extension headers.
960 * ask stack to segment and checkum the generated MPDUs for us.
962 if (skb->protocol == htons(ETH_P_IPV6) &&
963 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
965 netdev_flags &= ~NETIF_F_CSUM_MASK;
966 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
969 tid = ieee80211_get_tid(hdr);
970 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
974 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
975 * during an BA session.
977 if ((info->flags & IEEE80211_TX_CTL_AMPDU &&
978 !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) ||
979 !(mvmsta->amsdu_enabled & BIT(tid)))
980 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
983 * Take the min of ieee80211 station and mvm station
986 min_t(unsigned int, sta->cur->max_amsdu_len,
987 iwl_mvm_max_amsdu_size(mvm, sta, tid));
990 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
991 * supported. This is a spec requirement (IEEE 802.11-2015
992 * section 8.7.3 NOTE 3).
994 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
995 !sta->deflink.vht_cap.vht_supported)
996 max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
998 /* Sub frame header + SNAP + IP header + TCP header + MSS */
999 subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
1000 pad = (4 - subf_len) & 0x3;
1003 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
1004 * N * subf_len + (N - 1) * pad.
1006 num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
1008 if (sta->max_amsdu_subframes &&
1009 num_subframes > sta->max_amsdu_subframes)
1010 num_subframes = sta->max_amsdu_subframes;
1012 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1013 tcp_hdrlen(skb) + skb->data_len;
1016 * Make sure we have enough TBs for the A-MSDU:
1017 * 2 for each subframe
1018 * 1 more for each fragment
1019 * 1 more for the potential data in the header
1021 if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
1022 mvm->trans->max_skb_frags)
1025 if (num_subframes > 1)
1026 *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1028 /* This skb fits in one single A-MSDU */
1029 if (num_subframes * mss >= tcp_payload_len) {
1030 __skb_queue_tail(mpdus_skb, skb);
1035 * Trick the segmentation function to make it
1036 * create SKBs that can fit into one A-MSDU.
1038 return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags,
1041 #else /* CONFIG_INET */
1042 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
1043 struct ieee80211_tx_info *info,
1044 struct ieee80211_sta *sta,
1045 struct sk_buff_head *mpdus_skb)
1047 /* Impossible to get TSO with CONFIG_INET */
1054 /* Check if there are any timed-out TIDs on a given shared TXQ */
1055 static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
1057 unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
1058 unsigned long now = jiffies;
1061 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1064 for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1065 if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
1066 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1073 static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
1074 struct iwl_mvm_sta *mvmsta,
1077 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1078 struct iwl_mvm_tcm_mac *mdata;
1080 if (mac >= NUM_MAC_INDEX_DRIVER)
1083 mdata = &mvm->tcm.data[mac];
1085 if (mvm->tcm.paused)
1088 if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
1089 schedule_delayed_work(&mvm->tcm.work, 0);
1091 mdata->tx.airtime += airtime;
1094 static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
1095 struct iwl_mvm_sta *mvmsta, int tid)
1097 u32 ac = tid_to_mac80211_ac[tid];
1098 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1099 struct iwl_mvm_tcm_mac *mdata;
1101 if (mac >= NUM_MAC_INDEX_DRIVER)
1104 mdata = &mvm->tcm.data[mac];
1106 mdata->tx.pkts[ac]++;
1112 * Sets the fields in the Tx cmd that are crypto related.
1114 * This function must be called with BHs disabled.
1116 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
1117 struct ieee80211_tx_info *info,
1118 struct ieee80211_sta *sta)
1120 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1121 struct iwl_mvm_sta *mvmsta;
1122 struct iwl_device_tx_cmd *dev_cmd;
1125 u8 tid = IWL_MAX_TID_COUNT;
1127 bool is_ampdu = false;
1130 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1131 fc = hdr->frame_control;
1132 hdrlen = ieee80211_hdrlen(fc);
1134 if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
1137 if (WARN_ON_ONCE(!mvmsta))
1140 if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
1143 if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he)
1146 if (unlikely(ieee80211_is_probe_resp(fc)))
1147 iwl_mvm_probe_resp_set_noa(mvm, skb);
1149 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
1150 sta, mvmsta->deflink.sta_id);
1155 * we handle that entirely ourselves -- for uAPSD the firmware
1156 * will always send a notification, and for PS-Poll responses
1157 * we'll notify mac80211 when getting frame status
1159 info->flags &= ~IEEE80211_TX_STATUS_EOSP;
1161 spin_lock(&mvmsta->lock);
1163 /* nullfunc frames should go to the MGMT queue regardless of QOS,
1164 * the conditions of !ieee80211_is_qos_nullfunc(fc) and
1165 * !ieee80211_is_data_qos(fc) keep the default assignment of MGMT TID
1167 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
1168 tid = ieee80211_get_tid(hdr);
1169 if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid))
1170 goto drop_unlock_sta;
1172 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
1173 if (WARN_ONCE(is_ampdu &&
1174 mvmsta->tid_data[tid].state != IWL_AGG_ON,
1175 "Invalid internal agg state %d for TID %d",
1176 mvmsta->tid_data[tid].state, tid))
1177 goto drop_unlock_sta;
1179 seq_number = mvmsta->tid_data[tid].seq_number;
1180 seq_number &= IEEE80211_SCTL_SEQ;
1182 if (!iwl_mvm_has_new_tx_api(mvm)) {
1183 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1185 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1186 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1187 /* update the tx_cmd hdr as it was already copied */
1188 tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
1190 } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc) &&
1191 !ieee80211_is_nullfunc(fc)) {
1192 tid = IWL_TID_NON_QOS;
1195 txq_id = mvmsta->tid_data[tid].txq_id;
1197 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1199 if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
1200 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1201 spin_unlock(&mvmsta->lock);
1205 if (!iwl_mvm_has_new_tx_api(mvm)) {
1206 /* Keep track of the time of the last frame for this RA/TID */
1207 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
1210 * If we have timed-out TIDs - schedule the worker that will
1211 * reconfig the queues and update them
1213 * Note that the no lock is taken here in order to not serialize
1214 * the TX flow. This isn't dangerous because scheduling
1215 * mvm->add_stream_wk can't ruin the state, and if we DON'T
1216 * schedule it due to some race condition then next TX we get
1219 if (unlikely(mvm->queue_info[txq_id].status ==
1220 IWL_MVM_QUEUE_SHARED &&
1221 iwl_mvm_txq_should_update(mvm, txq_id)))
1222 schedule_work(&mvm->add_stream_wk);
1225 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n",
1226 mvmsta->deflink.sta_id, tid, txq_id,
1227 IEEE80211_SEQ_TO_SN(seq_number), skb->len);
1229 /* From now on, we cannot access info->control */
1230 iwl_mvm_skb_prepare_status(skb, dev_cmd);
1233 * The IV is introduced by the HW for new tx api, and it is not present
1234 * in the skb, hence, don't tell iwl_mvm_mei_tx_copy_to_csme about the
1235 * IV for those devices.
1237 if (ieee80211_is_data(fc))
1238 iwl_mvm_mei_tx_copy_to_csme(mvm, skb,
1239 info->control.hw_key &&
1240 !iwl_mvm_has_new_tx_api(mvm) ?
1241 info->control.hw_key->iv_len : 0);
1243 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
1244 goto drop_unlock_sta;
1246 if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
1247 mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
1249 spin_unlock(&mvmsta->lock);
1251 if (iwl_mvm_tx_pkt_queued(mvm, mvmsta,
1252 tid == IWL_MAX_TID_COUNT ? 0 : tid))
1258 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1259 spin_unlock(&mvmsta->lock);
1261 IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->deflink.sta_id,
1266 int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
1267 struct ieee80211_sta *sta)
1269 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1270 struct ieee80211_tx_info info;
1271 struct sk_buff_head mpdus_skbs;
1272 unsigned int payload_len;
1274 struct sk_buff *orig_skb = skb;
1276 if (WARN_ON_ONCE(!mvmsta))
1279 if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
1282 memcpy(&info, skb->cb, sizeof(info));
1284 if (!skb_is_gso(skb))
1285 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
1287 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1288 tcp_hdrlen(skb) + skb->data_len;
1290 if (payload_len <= skb_shinfo(skb)->gso_size)
1291 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
1293 __skb_queue_head_init(&mpdus_skbs);
1295 ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
1299 WARN_ON(skb_queue_empty(&mpdus_skbs));
1301 while (!skb_queue_empty(&mpdus_skbs)) {
1302 skb = __skb_dequeue(&mpdus_skbs);
1304 ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
1306 /* Free skbs created as part of TSO logic that have not yet been dequeued */
1307 __skb_queue_purge(&mpdus_skbs);
1308 /* skb here is not necessarily same as skb that entered this method,
1309 * so free it explicitly.
1311 if (skb == orig_skb)
1312 ieee80211_free_txskb(mvm->hw, skb);
1315 /* there was error, but we consumed skb one way or another, so return 0 */
1323 static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
1324 struct ieee80211_sta *sta, u8 tid)
1326 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1327 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1328 struct ieee80211_vif *vif = mvmsta->vif;
1331 lockdep_assert_held(&mvmsta->lock);
1333 if ((tid_data->state == IWL_AGG_ON ||
1334 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
1335 iwl_mvm_tid_queued(mvm, tid_data) == 0) {
1337 * Now that this aggregation or DQA queue is empty tell
1338 * mac80211 so it knows we no longer have frames buffered for
1339 * the station on this TID (for the TIM bitmap calculation.)
1341 ieee80211_sta_set_buffered(sta, tid, false);
1345 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
1346 * to align the wrap around of ssn so we compare relevant values.
1348 normalized_ssn = tid_data->ssn;
1349 if (mvm->trans->trans_cfg->gen2)
1350 normalized_ssn &= 0xff;
1352 if (normalized_ssn != tid_data->next_reclaimed)
1355 switch (tid_data->state) {
1356 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1357 IWL_DEBUG_TX_QUEUES(mvm,
1358 "Can continue addBA flow ssn = next_recl = %d\n",
1359 tid_data->next_reclaimed);
1360 tid_data->state = IWL_AGG_STARTING;
1361 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1364 case IWL_EMPTYING_HW_QUEUE_DELBA:
1365 IWL_DEBUG_TX_QUEUES(mvm,
1366 "Can continue DELBA flow ssn = next_recl = %d\n",
1367 tid_data->next_reclaimed);
1368 tid_data->state = IWL_AGG_OFF;
1369 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1377 #ifdef CONFIG_IWLWIFI_DEBUG
1378 const char *iwl_mvm_get_tx_fail_reason(u32 status)
1380 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1381 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1383 switch (status & TX_STATUS_MSK) {
1384 case TX_STATUS_SUCCESS:
1386 TX_STATUS_POSTPONE(DELAY);
1387 TX_STATUS_POSTPONE(FEW_BYTES);
1388 TX_STATUS_POSTPONE(BT_PRIO);
1389 TX_STATUS_POSTPONE(QUIET_PERIOD);
1390 TX_STATUS_POSTPONE(CALC_TTAK);
1391 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1392 TX_STATUS_FAIL(SHORT_LIMIT);
1393 TX_STATUS_FAIL(LONG_LIMIT);
1394 TX_STATUS_FAIL(UNDERRUN);
1395 TX_STATUS_FAIL(DRAIN_FLOW);
1396 TX_STATUS_FAIL(RFKILL_FLUSH);
1397 TX_STATUS_FAIL(LIFE_EXPIRE);
1398 TX_STATUS_FAIL(DEST_PS);
1399 TX_STATUS_FAIL(HOST_ABORTED);
1400 TX_STATUS_FAIL(BT_RETRY);
1401 TX_STATUS_FAIL(STA_INVALID);
1402 TX_STATUS_FAIL(FRAG_DROPPED);
1403 TX_STATUS_FAIL(TID_DISABLE);
1404 TX_STATUS_FAIL(FIFO_FLUSHED);
1405 TX_STATUS_FAIL(SMALL_CF_POLL);
1406 TX_STATUS_FAIL(FW_DROP);
1407 TX_STATUS_FAIL(STA_COLOR_MISMATCH);
1412 #undef TX_STATUS_FAIL
1413 #undef TX_STATUS_POSTPONE
1415 #endif /* CONFIG_IWLWIFI_DEBUG */
1417 static int iwl_mvm_get_hwrate_chan_width(u32 chan_width)
1419 switch (chan_width) {
1420 case RATE_MCS_CHAN_WIDTH_20:
1422 case RATE_MCS_CHAN_WIDTH_40:
1423 return IEEE80211_TX_RC_40_MHZ_WIDTH;
1424 case RATE_MCS_CHAN_WIDTH_80:
1425 return IEEE80211_TX_RC_80_MHZ_WIDTH;
1426 case RATE_MCS_CHAN_WIDTH_160:
1427 return IEEE80211_TX_RC_160_MHZ_WIDTH;
1433 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
1434 enum nl80211_band band,
1435 struct ieee80211_tx_rate *r)
1437 u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
1438 u32 rate = format == RATE_MCS_HT_MSK ?
1439 RATE_HT_MCS_INDEX(rate_n_flags) :
1440 rate_n_flags & RATE_MCS_CODE_MSK;
1443 iwl_mvm_get_hwrate_chan_width(rate_n_flags &
1444 RATE_MCS_CHAN_WIDTH_MSK);
1446 if (rate_n_flags & RATE_MCS_SGI_MSK)
1447 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1448 if (format == RATE_MCS_HT_MSK) {
1449 r->flags |= IEEE80211_TX_RC_MCS;
1451 } else if (format == RATE_MCS_VHT_MSK) {
1452 ieee80211_rate_set_vht(r, rate,
1453 FIELD_GET(RATE_MCS_NSS_MSK,
1455 r->flags |= IEEE80211_TX_RC_VHT_MCS;
1456 } else if (format == RATE_MCS_HE_MSK) {
1457 /* mac80211 cannot do this without ieee80211_tx_status_ext()
1458 * but it only matters for radiotap */
1461 r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
1466 void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags,
1467 enum nl80211_band band,
1468 struct ieee80211_tx_rate *r)
1470 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
1471 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1474 iwl_mvm_get_hwrate_chan_width(rate_n_flags &
1475 RATE_MCS_CHAN_WIDTH_MSK_V1);
1477 if (rate_n_flags & RATE_MCS_SGI_MSK_V1)
1478 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1479 if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
1480 r->flags |= IEEE80211_TX_RC_MCS;
1481 r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1;
1482 } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) {
1483 ieee80211_rate_set_vht(
1484 r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
1485 FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags) + 1);
1486 r->flags |= IEEE80211_TX_RC_VHT_MCS;
1488 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1494 * translate ucode response to mac80211 tx status control values
1496 static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
1498 struct ieee80211_tx_info *info)
1500 struct ieee80211_tx_rate *r = &info->status.rates[0];
1502 if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
1504 rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
1506 info->status.antenna =
1507 ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS);
1508 iwl_mvm_hwrate_to_tx_rate(rate_n_flags,
1512 static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
1513 u32 status, __le16 frame_control)
1515 struct iwl_fw_dbg_trigger_tlv *trig;
1516 struct iwl_fw_dbg_trigger_tx_status *status_trig;
1519 if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS) {
1520 enum iwl_fw_ini_time_point tp =
1521 IWL_FW_INI_TIME_POINT_TX_FAILED;
1523 if (ieee80211_is_action(frame_control))
1524 tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
1526 iwl_dbg_tlv_time_point(&mvm->fwrt,
1531 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
1532 FW_DBG_TRIGGER_TX_STATUS);
1536 status_trig = (void *)trig->data;
1538 for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
1539 /* don't collect on status 0 */
1540 if (!status_trig->statuses[i].status)
1543 if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
1546 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1547 "Tx status %d was received",
1548 status & TX_STATUS_MSK);
1554 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
1555 * @tx_resp: the Tx response from the fw (agg or non-agg)
1557 * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
1558 * it can't know that everything will go well until the end of the AMPDU, it
1559 * can't know in advance the number of MPDUs that will be sent in the current
1560 * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
1561 * Hence, it can't know in advance what the SSN of the SCD will be at the end
1562 * of the batch. This is why the SSN of the SCD is written at the end of the
1563 * whole struct at a variable offset. This function knows how to cope with the
1564 * variable offset and returns the SSN of the SCD.
1566 static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
1567 struct iwl_mvm_tx_resp *tx_resp)
1569 return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
1570 tx_resp->frame_count) & 0xfff;
1573 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1574 struct iwl_rx_packet *pkt)
1576 struct ieee80211_sta *sta;
1577 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1578 int txq_id = SEQ_TO_QUEUE(sequence);
1579 /* struct iwl_mvm_tx_resp_v3 is almost the same */
1580 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1581 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1582 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1583 struct agg_tx_status *agg_status =
1584 iwl_mvm_get_agg_status(mvm, tx_resp);
1585 u32 status = le16_to_cpu(agg_status->status);
1586 u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
1587 struct sk_buff_head skbs;
1590 u16 next_reclaimed, seq_ctl;
1591 bool is_ndp = false;
1593 __skb_queue_head_init(&skbs);
1595 if (iwl_mvm_has_new_tx_api(mvm))
1596 txq_id = le16_to_cpu(tx_resp->tx_queue);
1598 seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
1600 /* we can free until ssn % q.n_bd not inclusive */
1601 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
1603 while (!skb_queue_empty(&skbs)) {
1604 struct sk_buff *skb = __skb_dequeue(&skbs);
1605 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1606 struct ieee80211_hdr *hdr = (void *)skb->data;
1607 bool flushed = false;
1611 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1613 memset(&info->status, 0, sizeof(info->status));
1615 /* inform mac80211 about what happened with the frame */
1616 switch (status & TX_STATUS_MSK) {
1617 case TX_STATUS_SUCCESS:
1618 case TX_STATUS_DIRECT_DONE:
1619 info->flags |= IEEE80211_TX_STAT_ACK;
1621 case TX_STATUS_FAIL_FIFO_FLUSHED:
1622 case TX_STATUS_FAIL_DRAIN_FLOW:
1625 case TX_STATUS_FAIL_DEST_PS:
1626 /* the FW should have stopped the queue and not
1627 * return this status
1630 "FW reported TX filtered, status=0x%x, FC=0x%x\n",
1631 status, le16_to_cpu(hdr->frame_control));
1632 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1638 if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
1639 ieee80211_is_mgmt(hdr->frame_control))
1640 iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
1643 * If we are freeing multiple frames, mark all the frames
1644 * but the first one as acked, since they were acknowledged
1648 info->flags |= IEEE80211_TX_STAT_ACK;
1650 iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control);
1652 info->status.rates[0].count = tx_resp->failure_frame + 1;
1654 iwl_mvm_hwrate_to_tx_status(mvm->fw,
1655 le32_to_cpu(tx_resp->initial_rate),
1658 /* Don't assign the converted initial_rate, because driver
1659 * TLC uses this and doesn't support the new FW rate
1661 info->status.status_driver_data[1] =
1662 (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
1664 /* Single frame failure in an AMPDU queue => send BAR */
1665 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1666 !(info->flags & IEEE80211_TX_STAT_ACK) &&
1667 !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed)
1668 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1669 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1671 /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
1672 if (ieee80211_is_back_req(hdr->frame_control))
1674 else if (status != TX_STATUS_SUCCESS)
1675 seq_ctl = le16_to_cpu(hdr->seq_ctrl);
1677 if (unlikely(!seq_ctl)) {
1679 * If it is an NDP, we can't update next_reclaim since
1680 * its sequence control is 0. Note that for that same
1681 * reason, NDPs are never sent to A-MPDU'able queues
1682 * so that we can never have more than one freed frame
1683 * for a single Tx resonse (see WARN_ON below).
1685 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1690 * TODO: this is not accurate if we are freeing more than one
1693 info->status.tx_time =
1694 le16_to_cpu(tx_resp->wireless_media_time);
1695 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
1696 lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1697 info->status.status_driver_data[0] =
1698 RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
1700 if (likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr1)))
1701 ieee80211_tx_status(mvm->hw, skb);
1704 /* This is an aggregation queue or might become one, so we use
1705 * the ssn since: ssn = wifi seq_num % 256.
1706 * The seq_ctl is the sequence control of the packet to which
1707 * this Tx response relates. But if there is a hole in the
1708 * bitmap of the BA we received, this Tx response may allow to
1709 * reclaim the hole and all the subsequent packets that were
1710 * already acked. In that case, seq_ctl != ssn, and the next
1711 * packet to be reclaimed will be ssn and not seq_ctl. In that
1712 * case, several packets will be reclaimed even if
1715 * The ssn is the index (% 256) of the latest packet that has
1716 * treated (acked / dropped) + 1.
1718 next_reclaimed = ssn;
1720 IWL_DEBUG_TX_REPLY(mvm,
1721 "TXQ %d status %s (0x%08x)\n",
1722 txq_id, iwl_mvm_get_tx_fail_reason(status), status);
1724 IWL_DEBUG_TX_REPLY(mvm,
1725 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
1726 le32_to_cpu(tx_resp->initial_rate),
1727 tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
1728 ssn, next_reclaimed, seq_ctl);
1732 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1734 * sta can't be NULL otherwise it'd mean that the sta has been freed in
1735 * the firmware while we still have packets for it in the Tx queues.
1737 if (WARN_ON_ONCE(!sta))
1741 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1743 iwl_mvm_tx_airtime(mvm, mvmsta,
1744 le16_to_cpu(tx_resp->wireless_media_time));
1746 if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
1747 mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)
1748 iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant);
1750 if (sta->wme && tid != IWL_MGMT_TID) {
1751 struct iwl_mvm_tid_data *tid_data =
1752 &mvmsta->tid_data[tid];
1753 bool send_eosp_ndp = false;
1755 spin_lock_bh(&mvmsta->lock);
1758 tid_data->next_reclaimed = next_reclaimed;
1759 IWL_DEBUG_TX_REPLY(mvm,
1760 "Next reclaimed packet:%d\n",
1763 IWL_DEBUG_TX_REPLY(mvm,
1764 "NDP - don't update next_reclaimed\n");
1767 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1769 if (mvmsta->sleep_tx_count) {
1770 mvmsta->sleep_tx_count--;
1771 if (mvmsta->sleep_tx_count &&
1772 !iwl_mvm_tid_queued(mvm, tid_data)) {
1774 * The number of frames in the queue
1775 * dropped to 0 even if we sent less
1776 * frames than we thought we had on the
1778 * This means we had holes in the BA
1779 * window that we just filled, ask
1780 * mac80211 to send EOSP since the
1781 * firmware won't know how to do that.
1782 * Send NDP and the firmware will send
1783 * EOSP notification that will trigger
1784 * a call to ieee80211_sta_eosp().
1786 send_eosp_ndp = true;
1790 spin_unlock_bh(&mvmsta->lock);
1791 if (send_eosp_ndp) {
1792 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta,
1793 IEEE80211_FRAME_RELEASE_UAPSD,
1794 1, tid, false, false);
1795 mvmsta->sleep_tx_count = 0;
1796 ieee80211_send_eosp_nullfunc(sta, tid);
1800 if (mvmsta->next_status_eosp) {
1801 mvmsta->next_status_eosp = false;
1802 ieee80211_sta_eosp(sta);
1809 #ifdef CONFIG_IWLWIFI_DEBUG
1810 #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
1811 static const char *iwl_get_agg_tx_status(u16 status)
1813 switch (status & AGG_TX_STATE_STATUS_MSK) {
1814 AGG_TX_STATE_(TRANSMITTED);
1815 AGG_TX_STATE_(UNDERRUN);
1816 AGG_TX_STATE_(BT_PRIO);
1817 AGG_TX_STATE_(FEW_BYTES);
1818 AGG_TX_STATE_(ABORT);
1819 AGG_TX_STATE_(TX_ON_AIR_DROP);
1820 AGG_TX_STATE_(LAST_SENT_TRY_CNT);
1821 AGG_TX_STATE_(LAST_SENT_BT_KILL);
1822 AGG_TX_STATE_(SCD_QUERY);
1823 AGG_TX_STATE_(TEST_BAD_CRC32);
1824 AGG_TX_STATE_(RESPONSE);
1825 AGG_TX_STATE_(DUMP_TX);
1826 AGG_TX_STATE_(DELAY_TX);
1832 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1833 struct iwl_rx_packet *pkt)
1835 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1836 struct agg_tx_status *frame_status =
1837 iwl_mvm_get_agg_status(mvm, tx_resp);
1839 bool tirgger_timepoint = false;
1841 for (i = 0; i < tx_resp->frame_count; i++) {
1842 u16 fstatus = le16_to_cpu(frame_status[i].status);
1843 /* In case one frame wasn't transmitted trigger time point */
1844 tirgger_timepoint |= ((fstatus & AGG_TX_STATE_STATUS_MSK) !=
1845 AGG_TX_STATE_TRANSMITTED);
1846 IWL_DEBUG_TX_REPLY(mvm,
1847 "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1848 iwl_get_agg_tx_status(fstatus),
1849 fstatus & AGG_TX_STATE_STATUS_MSK,
1850 (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
1851 AGG_TX_STATE_TRY_CNT_POS,
1852 le16_to_cpu(frame_status[i].sequence));
1855 if (tirgger_timepoint)
1856 iwl_dbg_tlv_time_point(&mvm->fwrt,
1857 IWL_FW_INI_TIME_POINT_TX_FAILED, NULL);
1861 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1862 struct iwl_rx_packet *pkt)
1864 #endif /* CONFIG_IWLWIFI_DEBUG */
1866 static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1867 struct iwl_rx_packet *pkt)
1869 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1870 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1871 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1872 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1873 struct iwl_mvm_sta *mvmsta;
1874 int queue = SEQ_TO_QUEUE(sequence);
1875 struct ieee80211_sta *sta;
1877 if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
1878 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
1881 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
1885 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1887 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1888 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) {
1893 if (!WARN_ON_ONCE(!mvmsta)) {
1894 mvmsta->tid_data[tid].rate_n_flags =
1895 le32_to_cpu(tx_resp->initial_rate);
1896 mvmsta->tid_data[tid].tx_time =
1897 le16_to_cpu(tx_resp->wireless_media_time);
1898 mvmsta->tid_data[tid].lq_color =
1899 TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1900 iwl_mvm_tx_airtime(mvm, mvmsta,
1901 le16_to_cpu(tx_resp->wireless_media_time));
1907 void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1909 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1910 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1912 if (tx_resp->frame_count == 1)
1913 iwl_mvm_rx_tx_cmd_single(mvm, pkt);
1915 iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
1918 static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
1920 struct ieee80211_tx_info *tx_info, u32 rate,
1923 struct sk_buff_head reclaimed_skbs;
1924 struct iwl_mvm_tid_data *tid_data = NULL;
1925 struct ieee80211_sta *sta;
1926 struct iwl_mvm_sta *mvmsta = NULL;
1927 struct sk_buff *skb;
1930 if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations ||
1931 tid > IWL_MAX_TID_COUNT,
1932 "sta_id %d tid %d", sta_id, tid))
1937 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1939 /* Reclaiming frames for a station that has been deleted ? */
1940 if (WARN_ON_ONCE(!sta)) {
1945 __skb_queue_head_init(&reclaimed_skbs);
1948 * Release all TFDs before the SSN, i.e. all TFDs in front of
1949 * block-ack window (we assume that they've been successfully
1950 * transmitted ... if not, it's too late anyway).
1952 iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
1954 skb_queue_walk(&reclaimed_skbs, skb) {
1955 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1957 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1959 memset(&info->status, 0, sizeof(info->status));
1960 /* Packet was transmitted successfully, failures come as single
1961 * frames because before failing a frame the firmware transmits
1962 * it without aggregation at least once.
1965 info->flags |= IEEE80211_TX_STAT_ACK;
1969 * It's possible to get a BA response after invalidating the rcu (rcu is
1970 * invalidated in order to prevent new Tx from being sent, but there may
1971 * be some frames already in-flight).
1972 * In this case we just want to reclaim, and could skip all the
1973 * sta-dependent stuff since it's in the middle of being removed
1979 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1980 tid_data = &mvmsta->tid_data[tid];
1982 if (tid_data->txq_id != txq) {
1984 "invalid reclaim request: Q %d, tid %d\n",
1985 tid_data->txq_id, tid);
1990 spin_lock_bh(&mvmsta->lock);
1992 tid_data->next_reclaimed = index;
1994 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1998 /* pack lq color from tid_data along the reduced txp */
1999 tx_info->status.status_driver_data[0] =
2000 RS_DRV_DATA_PACK(tid_data->lq_color,
2001 tx_info->status.status_driver_data[0]);
2002 tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
2004 skb_queue_walk(&reclaimed_skbs, skb) {
2005 struct ieee80211_hdr *hdr = (void *)skb->data;
2006 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2009 if (ieee80211_is_data_qos(hdr->frame_control))
2012 WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
2015 /* this is the first skb we deliver in this batch */
2016 /* put the rate scaling data there */
2018 info->flags |= IEEE80211_TX_STAT_AMPDU;
2019 memcpy(&info->status, &tx_info->status,
2020 sizeof(tx_info->status));
2021 iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info);
2025 spin_unlock_bh(&mvmsta->lock);
2027 /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
2028 * possible (i.e. first MPDU in the aggregation wasn't acked)
2029 * Still it's important to update RS about sent vs. acked.
2031 if (!is_flush && skb_queue_empty(&reclaimed_skbs) &&
2032 !iwl_mvm_has_tlc_offload(mvm)) {
2033 struct ieee80211_chanctx_conf *chanctx_conf = NULL;
2035 /* no TLC offload, so non-MLD mode */
2038 rcu_dereference(mvmsta->vif->bss_conf.chanctx_conf);
2040 if (WARN_ON_ONCE(!chanctx_conf))
2043 tx_info->band = chanctx_conf->def.chan->band;
2044 iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info);
2046 IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
2047 iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false);
2053 while (!skb_queue_empty(&reclaimed_skbs)) {
2054 skb = __skb_dequeue(&reclaimed_skbs);
2055 ieee80211_tx_status(mvm->hw, skb);
2059 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2061 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2062 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
2063 int sta_id, tid, txq, index;
2064 struct ieee80211_tx_info ba_info = {};
2065 struct iwl_mvm_ba_notif *ba_notif;
2066 struct iwl_mvm_tid_data *tid_data;
2067 struct iwl_mvm_sta *mvmsta;
2069 ba_info.flags = IEEE80211_TX_STAT_AMPDU;
2071 if (iwl_mvm_has_new_tx_api(mvm)) {
2072 struct iwl_mvm_compressed_ba_notif *ba_res =
2074 u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
2078 if (unlikely(sizeof(*ba_res) > pkt_len))
2081 sta_id = ba_res->sta_id;
2082 ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
2083 ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
2084 ba_info.status.tx_time =
2085 (u16)le32_to_cpu(ba_res->wireless_time);
2086 ba_info.status.status_driver_data[0] =
2087 (void *)(uintptr_t)ba_res->reduced_txp;
2089 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
2090 if (!tfd_cnt || struct_size(ba_res, tfd, tfd_cnt) > pkt_len)
2095 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
2097 * It's possible to get a BA response after invalidating the rcu
2098 * (rcu is invalidated in order to prevent new Tx from being
2099 * sent, but there may be some frames already in-flight).
2100 * In this case we just want to reclaim, and could skip all the
2101 * sta-dependent stuff since it's in the middle of being removed
2106 for (i = 0; i < tfd_cnt; i++) {
2107 struct iwl_mvm_compressed_ba_tfd *ba_tfd =
2111 if (tid == IWL_MGMT_TID)
2112 tid = IWL_MAX_TID_COUNT;
2115 mvmsta->tid_data[i].lq_color = lq_color;
2117 iwl_mvm_tx_reclaim(mvm, sta_id, tid,
2118 (int)(le16_to_cpu(ba_tfd->q_num)),
2119 le16_to_cpu(ba_tfd->tfd_index),
2121 le32_to_cpu(ba_res->tx_rate), false);
2125 iwl_mvm_tx_airtime(mvm, mvmsta,
2126 le32_to_cpu(ba_res->wireless_time));
2129 IWL_DEBUG_TX_REPLY(mvm,
2130 "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
2131 sta_id, le32_to_cpu(ba_res->flags),
2132 le16_to_cpu(ba_res->txed),
2133 le16_to_cpu(ba_res->done));
2137 ba_notif = (void *)pkt->data;
2138 sta_id = ba_notif->sta_id;
2139 tid = ba_notif->tid;
2140 /* "flow" corresponds to Tx queue */
2141 txq = le16_to_cpu(ba_notif->scd_flow);
2142 /* "ssn" is start of block-ack Tx window, corresponds to index
2143 * (in Tx queue's circular buffer) of first TFD/frame in window */
2144 index = le16_to_cpu(ba_notif->scd_ssn);
2147 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
2148 if (WARN_ON_ONCE(!mvmsta)) {
2153 tid_data = &mvmsta->tid_data[tid];
2155 ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
2156 ba_info.status.ampdu_len = ba_notif->txed;
2157 ba_info.status.tx_time = tid_data->tx_time;
2158 ba_info.status.status_driver_data[0] =
2159 (void *)(uintptr_t)ba_notif->reduced_txp;
2163 iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
2164 tid_data->rate_n_flags, false);
2166 IWL_DEBUG_TX_REPLY(mvm,
2167 "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
2168 ba_notif->sta_addr, ba_notif->sta_id);
2170 IWL_DEBUG_TX_REPLY(mvm,
2171 "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
2172 ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
2173 le64_to_cpu(ba_notif->bitmap), txq, index,
2174 ba_notif->txed, ba_notif->txed_2_done);
2176 IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
2177 ba_notif->reduced_txp);
2181 * Note that there are transports that buffer frames before they reach
2182 * the firmware. This means that after flush_tx_path is called, the
2183 * queue might not be empty. The race-free way to handle this is to:
2184 * 1) set the station as draining
2185 * 2) flush the Tx path
2186 * 3) wait for the transport queues to be empty
2188 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk)
2191 struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
2192 .queues_ctl = cpu_to_le32(tfd_msk),
2193 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
2196 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2197 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, 0,
2198 sizeof(flush_cmd), &flush_cmd);
2200 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
2204 int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
2207 struct iwl_tx_path_flush_cmd_rsp *rsp;
2208 struct iwl_tx_path_flush_cmd flush_cmd = {
2209 .sta_id = cpu_to_le32(sta_id),
2210 .tid_mask = cpu_to_le16(tids),
2213 struct iwl_host_cmd cmd = {
2215 .len = { sizeof(flush_cmd), },
2216 .data = { &flush_cmd, },
2219 WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2221 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
2222 cmd.flags |= CMD_WANT_SKB;
2224 IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n",
2227 ret = iwl_mvm_send_cmd(mvm, &cmd);
2230 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
2234 if (cmd.flags & CMD_WANT_SKB) {
2236 int num_flushed_queues;
2238 if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != sizeof(*rsp))) {
2243 rsp = (void *)cmd.resp_pkt->data;
2245 if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id,
2246 "sta_id %d != rsp_sta_id %d",
2247 sta_id, le16_to_cpu(rsp->sta_id))) {
2252 num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
2253 if (WARN_ONCE(num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
2254 "num_flushed_queues %d", num_flushed_queues)) {
2259 for (i = 0; i < num_flushed_queues; i++) {
2260 struct ieee80211_tx_info tx_info = {};
2261 struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
2262 int tid = le16_to_cpu(queue_info->tid);
2263 int read_before = le16_to_cpu(queue_info->read_before_flush);
2264 int read_after = le16_to_cpu(queue_info->read_after_flush);
2265 int queue_num = le16_to_cpu(queue_info->queue_num);
2267 if (tid == IWL_MGMT_TID)
2268 tid = IWL_MAX_TID_COUNT;
2270 IWL_DEBUG_TX_QUEUES(mvm,
2271 "tid %d queue_id %d read-before %d read-after %d\n",
2272 tid, queue_num, read_before, read_after);
2274 iwl_mvm_tx_reclaim(mvm, sta_id, tid, queue_num, read_after,
2278 iwl_free_resp(&cmd);
2283 int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
2285 u32 sta_id, tfd_queue_msk;
2288 struct iwl_mvm_int_sta *int_sta = sta;
2290 sta_id = int_sta->sta_id;
2291 tfd_queue_msk = int_sta->tfd_queue_msk;
2293 struct iwl_mvm_sta *mvm_sta = sta;
2295 sta_id = mvm_sta->deflink.sta_id;
2296 tfd_queue_msk = mvm_sta->tfd_queue_msk;
2299 if (iwl_mvm_has_new_tx_api(mvm))
2300 return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff);
2302 return iwl_mvm_flush_tx_path(mvm, tfd_queue_msk);