1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2015, 2018-2023 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #include <net/mac80211.h>
14 * New version of ADD_STA_sta command added new fields at the end of the
15 * structure, so sending the size of the relevant API's structure is enough to
16 * support both API versions.
18 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
20 if (iwl_mvm_has_new_rx_api(mvm) ||
21 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
22 return sizeof(struct iwl_mvm_add_sta_cmd);
24 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
27 int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype)
32 BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
33 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
35 lockdep_assert_held(&mvm->mutex);
37 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
38 if (iftype != NL80211_IFTYPE_STATION)
39 reserved_ids = BIT(0);
41 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
42 for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
43 if (BIT(sta_id) & reserved_ids)
46 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
47 lockdep_is_held(&mvm->mutex)))
50 return IWL_MVM_INVALID_STA;
53 /* Calculate the ampdu density and max size */
54 u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta,
55 struct ieee80211_bss_conf *link_conf,
58 u32 agg_size = 0, mpdu_dens = 0;
60 if (WARN_ON(!link_sta))
63 /* Note that we always use only legacy & highest supported PPDUs, so
64 * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating
65 * the maximum A-MPDU size of various PPDU types in different bands,
66 * we only need to worry about the highest supported PPDU type here.
69 if (link_sta->ht_cap.ht_supported) {
70 agg_size = link_sta->ht_cap.ampdu_factor;
71 mpdu_dens = link_sta->ht_cap.ampdu_density;
74 if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
75 /* overwrite HT values on 6 GHz */
76 mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa,
77 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
78 agg_size = le16_get_bits(link_sta->he_6ghz_capa.capa,
79 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
80 } else if (link_sta->vht_cap.vht_supported) {
81 /* if VHT supported overwrite HT value */
82 agg_size = u32_get_bits(link_sta->vht_cap.cap,
83 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
86 /* D6.0 10.12.2 A-MPDU length limit rules
87 * A STA indicates the maximum length of the A-MPDU preEOF padding
88 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
89 * Exponent field in its HT Capabilities, VHT Capabilities,
90 * and HE 6 GHz Band Capabilities elements (if present) and the
91 * Maximum AMPDU Length Exponent Extension field in its HE
92 * Capabilities element
94 if (link_sta->he_cap.has_he)
96 u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3],
97 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
99 if (link_sta->eht_cap.has_eht)
100 agg_size += u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1],
101 IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK);
103 /* Limit to max A-MPDU supported by FW */
104 agg_size = min_t(u32, agg_size,
105 STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT);
107 *_agg_size = agg_size;
111 u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta)
115 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
116 uapsd_acs |= BIT(AC_BK);
117 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
118 uapsd_acs |= BIT(AC_BE);
119 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
120 uapsd_acs |= BIT(AC_VI);
121 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
122 uapsd_acs |= BIT(AC_VO);
124 return uapsd_acs | uapsd_acs << 4;
127 /* send station add/update command to firmware */
128 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
129 bool update, unsigned int flags)
131 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
132 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
133 .sta_id = mvm_sta->deflink.sta_id,
134 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
135 .add_modify = update ? 1 : 0,
136 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
137 STA_FLG_MIMO_EN_MSK |
138 STA_FLG_RTS_MIMO_PROT),
139 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
143 u32 agg_size = 0, mpdu_dens = 0;
145 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
146 add_sta_cmd.station_type = mvm_sta->sta_type;
148 if (!update || (flags & STA_MODIFY_QUEUES)) {
149 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
151 if (!iwl_mvm_has_new_tx_api(mvm)) {
152 add_sta_cmd.tfd_queue_msk =
153 cpu_to_le32(mvm_sta->tfd_queue_msk);
155 if (flags & STA_MODIFY_QUEUES)
156 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
158 WARN_ON(flags & STA_MODIFY_QUEUES);
162 switch (sta->deflink.bandwidth) {
163 case IEEE80211_STA_RX_BW_320:
164 case IEEE80211_STA_RX_BW_160:
165 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
167 case IEEE80211_STA_RX_BW_80:
168 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
170 case IEEE80211_STA_RX_BW_40:
171 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
173 case IEEE80211_STA_RX_BW_20:
174 if (sta->deflink.ht_cap.ht_supported)
175 add_sta_cmd.station_flags |=
176 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
180 switch (sta->deflink.rx_nss) {
182 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
185 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
188 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
192 switch (sta->deflink.smps_mode) {
193 case IEEE80211_SMPS_AUTOMATIC:
194 case IEEE80211_SMPS_NUM_MODES:
197 case IEEE80211_SMPS_STATIC:
199 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
200 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
202 case IEEE80211_SMPS_DYNAMIC:
203 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
205 case IEEE80211_SMPS_OFF:
210 if (sta->deflink.ht_cap.ht_supported ||
211 mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ)
212 add_sta_cmd.station_flags_msk |=
213 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
214 STA_FLG_AGG_MPDU_DENS_MSK);
216 mpdu_dens = iwl_mvm_get_sta_ampdu_dens(&sta->deflink,
217 &mvm_sta->vif->bss_conf,
219 add_sta_cmd.station_flags |=
220 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
221 add_sta_cmd.station_flags |=
222 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
224 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
225 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
228 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
229 add_sta_cmd.uapsd_acs = iwl_mvm_get_sta_uapsd_acs(sta);
230 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
233 status = ADD_STA_SUCCESS;
234 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
235 iwl_mvm_add_sta_cmd_size(mvm),
236 &add_sta_cmd, &status);
240 switch (status & IWL_ADD_STA_STATUS_MASK) {
241 case ADD_STA_SUCCESS:
242 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
246 IWL_ERR(mvm, "ADD_STA failed\n");
253 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
255 struct iwl_mvm_baid_data *data =
256 from_timer(data, t, session_timer);
257 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
258 struct iwl_mvm_baid_data *ba_data;
259 struct ieee80211_sta *sta;
260 struct iwl_mvm_sta *mvm_sta;
261 unsigned long timeout;
266 ba_data = rcu_dereference(*rcu_ptr);
268 if (WARN_ON(!ba_data))
271 if (!ba_data->timeout)
274 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
275 if (time_is_after_jiffies(timeout)) {
276 mod_timer(&ba_data->session_timer, timeout);
281 sta_id = ffs(ba_data->sta_mask) - 1; /* don't care which one */
282 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[sta_id]);
285 * sta should be valid unless the following happens:
286 * The firmware asserts which triggers a reconfig flow, but
287 * the reconfig fails before we set the pointer to sta into
288 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
289 * A-MDPU and hence the timer continues to run. Then, the
290 * timer expires and sta is NULL.
292 if (IS_ERR_OR_NULL(sta))
295 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
296 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
297 sta->addr, ba_data->tid);
302 /* Disable aggregations for a bitmap of TIDs for a given station */
303 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
304 unsigned long disable_agg_tids,
307 struct iwl_mvm_add_sta_cmd cmd = {};
308 struct ieee80211_sta *sta;
309 struct iwl_mvm_sta *mvmsta;
313 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
316 sta_id = mvm->queue_info[queue].ra_sta_id;
320 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
322 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
327 mvmsta = iwl_mvm_sta_from_mac80211(sta);
329 mvmsta->tid_disable_agg |= disable_agg_tids;
331 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
332 cmd.sta_id = mvmsta->deflink.sta_id;
333 cmd.add_modify = STA_MODE_MODIFY;
334 cmd.modify_mask = STA_MODIFY_QUEUES;
335 if (disable_agg_tids)
336 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
338 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
339 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
340 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
344 /* Notify FW of queue removal from the STA queues */
345 status = ADD_STA_SUCCESS;
346 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
347 iwl_mvm_add_sta_cmd_size(mvm),
351 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
352 int sta_id, u16 *queueptr, u8 tid)
354 int queue = *queueptr;
355 struct iwl_scd_txq_cfg_cmd cmd = {
357 .action = SCD_CFG_DISABLE_QUEUE,
361 lockdep_assert_held(&mvm->mutex);
363 if (iwl_mvm_has_new_tx_api(mvm)) {
364 if (mvm->sta_remove_requires_queue_remove) {
365 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
366 SCD_QUEUE_CONFIG_CMD);
367 struct iwl_scd_queue_cfg_cmd remove_cmd = {
368 .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
369 .u.remove.sta_mask = cpu_to_le32(BIT(sta_id)),
372 if (tid == IWL_MAX_TID_COUNT)
375 remove_cmd.u.remove.tid = cpu_to_le32(tid);
377 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
384 iwl_trans_txq_free(mvm->trans, queue);
385 *queueptr = IWL_MVM_INVALID_QUEUE;
390 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
393 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
395 cmd.action = mvm->queue_info[queue].tid_bitmap ?
396 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
397 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
398 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
400 IWL_DEBUG_TX_QUEUES(mvm,
401 "Disabling TXQ #%d tids=0x%x\n",
403 mvm->queue_info[queue].tid_bitmap);
405 /* If the queue is still enabled - nothing left to do in this func */
406 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
409 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
410 cmd.tid = mvm->queue_info[queue].txq_tid;
412 /* Make sure queue info is correct even though we overwrite it */
413 WARN(mvm->queue_info[queue].tid_bitmap,
414 "TXQ #%d info out-of-sync - tids=0x%x\n",
415 queue, mvm->queue_info[queue].tid_bitmap);
417 /* If we are here - the queue is freed and we can zero out these vals */
418 mvm->queue_info[queue].tid_bitmap = 0;
421 struct iwl_mvm_txq *mvmtxq =
422 iwl_mvm_txq_from_tid(sta, tid);
424 spin_lock_bh(&mvm->add_stream_lock);
425 list_del_init(&mvmtxq->list);
426 clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
427 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
428 spin_unlock_bh(&mvm->add_stream_lock);
431 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
432 mvm->queue_info[queue].reserved = false;
434 iwl_trans_txq_disable(mvm->trans, queue, false);
435 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
436 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
439 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
444 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
446 struct ieee80211_sta *sta;
447 struct iwl_mvm_sta *mvmsta;
448 unsigned long tid_bitmap;
449 unsigned long agg_tids = 0;
453 lockdep_assert_held(&mvm->mutex);
455 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
458 sta_id = mvm->queue_info[queue].ra_sta_id;
459 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
461 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
462 lockdep_is_held(&mvm->mutex));
464 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
467 mvmsta = iwl_mvm_sta_from_mac80211(sta);
469 spin_lock_bh(&mvmsta->lock);
470 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
471 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
472 agg_tids |= BIT(tid);
474 spin_unlock_bh(&mvmsta->lock);
480 * Remove a queue from a station's resources.
481 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
482 * doesn't disable the queue
484 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
486 struct ieee80211_sta *sta;
487 struct iwl_mvm_sta *mvmsta;
488 unsigned long tid_bitmap;
489 unsigned long disable_agg_tids = 0;
493 lockdep_assert_held(&mvm->mutex);
495 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
498 sta_id = mvm->queue_info[queue].ra_sta_id;
499 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
503 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
505 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
510 mvmsta = iwl_mvm_sta_from_mac80211(sta);
512 spin_lock_bh(&mvmsta->lock);
513 /* Unmap MAC queues and TIDs from this queue */
514 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
515 struct iwl_mvm_txq *mvmtxq =
516 iwl_mvm_txq_from_tid(sta, tid);
518 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
519 disable_agg_tids |= BIT(tid);
520 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
522 spin_lock_bh(&mvm->add_stream_lock);
523 list_del_init(&mvmtxq->list);
524 clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
525 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
526 spin_unlock_bh(&mvm->add_stream_lock);
529 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
530 spin_unlock_bh(&mvmsta->lock);
535 * The TX path may have been using this TXQ_ID from the tid_data,
536 * so make sure it's no longer running so that we can safely reuse
537 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
538 * above, but nothing guarantees we've stopped using them. Thus,
539 * without this, we could get to iwl_mvm_disable_txq() and remove
540 * the queue while still sending frames to it.
544 return disable_agg_tids;
547 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
548 struct ieee80211_sta *old_sta,
551 struct iwl_mvm_sta *mvmsta;
553 unsigned long disable_agg_tids = 0;
555 u16 queue_tmp = queue;
558 lockdep_assert_held(&mvm->mutex);
560 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
563 sta_id = mvm->queue_info[queue].ra_sta_id;
564 tid = mvm->queue_info[queue].txq_tid;
566 same_sta = sta_id == new_sta_id;
568 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
569 if (WARN_ON(!mvmsta))
572 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
573 /* Disable the queue */
574 if (disable_agg_tids)
575 iwl_mvm_invalidate_sta_queue(mvm, queue,
576 disable_agg_tids, false);
578 ret = iwl_mvm_disable_txq(mvm, old_sta, sta_id, &queue_tmp, tid);
581 "Failed to free inactive queue %d (ret=%d)\n",
587 /* If TXQ is allocated to another STA, update removal in FW */
589 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
594 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
595 unsigned long tfd_queue_mask, u8 ac)
598 u8 ac_to_queue[IEEE80211_NUM_ACS];
602 * This protects us against grabbing a queue that's being reconfigured
603 * by the inactivity checker.
605 lockdep_assert_held(&mvm->mutex);
607 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
610 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
612 /* See what ACs the existing queues for this STA have */
613 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
614 /* Only DATA queues can be shared */
615 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
616 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
619 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
623 * The queue to share is chosen only from DATA queues as follows (in
624 * descending priority):
627 * 3. Highest AC queue that is lower than new AC
628 * 4. Any existing AC (there always is at least 1 DATA queue)
631 /* Priority 1: An AC_BE queue */
632 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
633 queue = ac_to_queue[IEEE80211_AC_BE];
634 /* Priority 2: Same AC queue */
635 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
636 queue = ac_to_queue[ac];
637 /* Priority 3a: If new AC is VO and VI exists - use VI */
638 else if (ac == IEEE80211_AC_VO &&
639 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
640 queue = ac_to_queue[IEEE80211_AC_VI];
641 /* Priority 3b: No BE so only AC less than the new one is BK */
642 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
643 queue = ac_to_queue[IEEE80211_AC_BK];
644 /* Priority 4a: No BE nor BK - use VI if exists */
645 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
646 queue = ac_to_queue[IEEE80211_AC_VI];
647 /* Priority 4b: No BE, BK nor VI - use VO if exists */
648 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
649 queue = ac_to_queue[IEEE80211_AC_VO];
651 /* Make sure queue found (or not) is legal */
652 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
653 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
654 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
655 IWL_ERR(mvm, "No DATA queues available to share\n");
662 /* Re-configure the SCD for a queue that has already been configured */
663 static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
664 int sta_id, int tid, int frame_limit, u16 ssn)
666 struct iwl_scd_txq_cfg_cmd cmd = {
668 .action = SCD_CFG_ENABLE_QUEUE,
669 .window = frame_limit,
671 .ssn = cpu_to_le16(ssn),
673 .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
674 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
679 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
682 if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
683 "Trying to reconfig unallocated queue %d\n", queue))
686 IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
688 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
689 WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
696 * If a given queue has a higher AC than the TID stream that is being compared
697 * to, the queue needs to be redirected to the lower AC. This function does that
698 * in such a case, otherwise - if no redirection required - it does nothing,
699 * unless the %force param is true.
701 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
702 int ac, int ssn, unsigned int wdg_timeout,
703 bool force, struct iwl_mvm_txq *txq)
705 struct iwl_scd_txq_cfg_cmd cmd = {
707 .action = SCD_CFG_DISABLE_QUEUE,
712 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
716 * If the AC is lower than current one - FIFO needs to be redirected to
717 * the lowest one of the streams in the queue. Check if this is needed
719 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
720 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
721 * we need to check if the numerical value of X is LARGER than of Y.
723 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
724 IWL_DEBUG_TX_QUEUES(mvm,
725 "No redirection needed on TXQ #%d\n",
730 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
731 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
732 cmd.tid = mvm->queue_info[queue].txq_tid;
733 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
735 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
736 queue, iwl_mvm_ac_to_tx_fifo[ac]);
738 /* Stop the queue and wait for it to empty */
739 set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
741 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
743 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
749 /* Before redirecting the queue we need to de-activate it */
750 iwl_trans_txq_disable(mvm->trans, queue, false);
751 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
753 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
756 /* Make sure the SCD wrptr is correctly set before reconfiguring */
757 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
759 /* Update the TID "owner" of the queue */
760 mvm->queue_info[queue].txq_tid = tid;
762 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
764 /* Redirect to lower AC */
765 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
766 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
768 /* Update AC marking of the queue */
769 mvm->queue_info[queue].mac80211_ac = ac;
772 * Mark queue as shared in transport if shared
773 * Note this has to be done after queue enablement because enablement
774 * can also set this value, and there is no indication there to shared
778 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
781 /* Continue using the queue */
782 clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
787 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
792 lockdep_assert_held(&mvm->mutex);
794 if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
795 "max queue %d >= num_of_queues (%d)", maxq,
796 mvm->trans->trans_cfg->base_params->num_of_queues))
797 maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
799 /* This should not be hit with new TX path */
800 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
803 /* Start by looking for a free queue */
804 for (i = minq; i <= maxq; i++)
805 if (mvm->queue_info[i].tid_bitmap == 0 &&
806 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
812 static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta)
814 int max_size = IWL_DEFAULT_QUEUE_SIZE;
815 unsigned int link_id;
817 /* this queue isn't used for traffic (cab_queue) */
819 return IWL_MGMT_QUEUE_SIZE;
823 for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
824 struct ieee80211_link_sta *link =
825 rcu_dereference(sta->link[link_id]);
830 /* support for 1k ba size */
831 if (link->eht_cap.has_eht &&
832 max_size < IWL_DEFAULT_QUEUE_SIZE_EHT)
833 max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
835 /* support for 256 ba size */
836 if (link->he_cap.has_he &&
837 max_size < IWL_DEFAULT_QUEUE_SIZE_HE)
838 max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
845 int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
846 struct ieee80211_sta *sta,
847 u8 sta_id, u8 tid, unsigned int timeout)
852 if (tid == IWL_MAX_TID_COUNT) {
854 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
855 mvm->trans->cfg->min_txq_size);
857 size = iwl_mvm_get_queue_size(sta);
860 /* take the min with bc tbl entries allowed */
861 size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
863 /* size needs to be power of 2 values for calculating read/write pointers */
864 size = rounddown_pow_of_two(size);
867 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
868 unsigned int link_id;
871 link_id < ARRAY_SIZE(mvmsta->link);
873 struct iwl_mvm_link_sta *link =
874 rcu_dereference_protected(mvmsta->link[link_id],
875 lockdep_is_held(&mvm->mutex));
880 sta_mask |= BIT(link->sta_id);
883 sta_mask |= BIT(sta_id);
890 queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
894 IWL_DEBUG_TX_QUEUES(mvm,
895 "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %d\n",
896 size, sta_mask, tid, queue);
898 } while (queue < 0 && size >= 16);
903 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
904 queue, sta_mask, tid);
909 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
910 struct ieee80211_sta *sta, u8 ac,
913 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
914 struct iwl_mvm_txq *mvmtxq =
915 iwl_mvm_txq_from_tid(sta, tid);
916 unsigned int wdg_timeout =
917 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
920 lockdep_assert_held(&mvm->mutex);
922 IWL_DEBUG_TX_QUEUES(mvm,
923 "Allocating queue for sta %d on tid %d\n",
924 mvmsta->deflink.sta_id, tid);
925 queue = iwl_mvm_tvqm_enable_txq(mvm, sta, mvmsta->deflink.sta_id,
930 mvmtxq->txq_id = queue;
931 mvm->tvqm_info[queue].txq_tid = tid;
932 mvm->tvqm_info[queue].sta_id = mvmsta->deflink.sta_id;
934 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
936 spin_lock_bh(&mvmsta->lock);
937 mvmsta->tid_data[tid].txq_id = queue;
938 spin_unlock_bh(&mvmsta->lock);
943 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
944 struct ieee80211_sta *sta,
945 int queue, u8 sta_id, u8 tid)
947 bool enable_queue = true;
949 /* Make sure this TID isn't already enabled */
950 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
951 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
956 /* Update mappings and refcounts */
957 if (mvm->queue_info[queue].tid_bitmap)
958 enable_queue = false;
960 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
961 mvm->queue_info[queue].ra_sta_id = sta_id;
964 if (tid != IWL_MAX_TID_COUNT)
965 mvm->queue_info[queue].mac80211_ac =
966 tid_to_mac80211_ac[tid];
968 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
970 mvm->queue_info[queue].txq_tid = tid;
974 struct iwl_mvm_txq *mvmtxq =
975 iwl_mvm_txq_from_tid(sta, tid);
977 mvmtxq->txq_id = queue;
980 IWL_DEBUG_TX_QUEUES(mvm,
981 "Enabling TXQ #%d tids=0x%x\n",
982 queue, mvm->queue_info[queue].tid_bitmap);
987 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
989 const struct iwl_trans_txq_scd_cfg *cfg,
990 unsigned int wdg_timeout)
992 struct iwl_scd_txq_cfg_cmd cmd = {
994 .action = SCD_CFG_ENABLE_QUEUE,
995 .window = cfg->frame_limit,
996 .sta_id = cfg->sta_id,
997 .ssn = cpu_to_le16(ssn),
998 .tx_fifo = cfg->fifo,
999 .aggregate = cfg->aggregate,
1004 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1007 /* Send the enabling command if we need to */
1008 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
1011 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
1014 le16_add_cpu(&cmd.ssn, 1);
1016 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
1017 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
1022 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
1024 struct iwl_scd_txq_cfg_cmd cmd = {
1026 .action = SCD_CFG_UPDATE_QUEUE_TID,
1029 unsigned long tid_bitmap;
1032 lockdep_assert_held(&mvm->mutex);
1034 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1037 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1039 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
1042 /* Find any TID for queue */
1043 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1045 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1047 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
1049 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
1054 mvm->queue_info[queue].txq_tid = tid;
1055 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
1059 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
1061 struct ieee80211_sta *sta;
1062 struct iwl_mvm_sta *mvmsta;
1065 unsigned long tid_bitmap;
1066 unsigned int wdg_timeout;
1070 /* queue sharing is disabled on new TX path */
1071 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1074 lockdep_assert_held(&mvm->mutex);
1076 sta_id = mvm->queue_info[queue].ra_sta_id;
1077 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1079 /* Find TID for queue, and make sure it is the only one on the queue */
1080 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1081 if (tid_bitmap != BIT(tid)) {
1082 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1087 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1090 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1091 lockdep_is_held(&mvm->mutex));
1093 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1096 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1097 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1099 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1101 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
1102 tid_to_mac80211_ac[tid], ssn,
1104 iwl_mvm_txq_from_tid(sta, tid));
1106 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1110 /* If aggs should be turned back on - do it */
1111 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1112 struct iwl_mvm_add_sta_cmd cmd = {0};
1114 mvmsta->tid_disable_agg &= ~BIT(tid);
1116 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1117 cmd.sta_id = mvmsta->deflink.sta_id;
1118 cmd.add_modify = STA_MODE_MODIFY;
1119 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1120 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1121 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1123 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1124 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1126 IWL_DEBUG_TX_QUEUES(mvm,
1127 "TXQ #%d is now aggregated again\n",
1130 /* Mark queue intenally as aggregating again */
1131 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1135 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1139 * Remove inactive TIDs of a given queue.
1140 * If all queue TIDs are inactive - mark the queue as inactive
1141 * If only some the queue TIDs are inactive - unmap them from the queue
1143 * Returns %true if all TIDs were removed and the queue could be reused.
1145 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1146 struct iwl_mvm_sta *mvmsta, int queue,
1147 unsigned long tid_bitmap,
1148 unsigned long *unshare_queues,
1149 unsigned long *changetid_queues)
1153 lockdep_assert_held(&mvmsta->lock);
1154 lockdep_assert_held(&mvm->mutex);
1156 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1159 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1160 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1161 /* If some TFDs are still queued - don't mark TID as inactive */
1162 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1163 tid_bitmap &= ~BIT(tid);
1165 /* Don't mark as inactive any TID that has an active BA */
1166 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1167 tid_bitmap &= ~BIT(tid);
1170 /* If all TIDs in the queue are inactive - return it can be reused */
1171 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1172 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1177 * If we are here, this is a shared queue and not all TIDs timed-out.
1178 * Remove the ones that did.
1180 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1183 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1184 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1186 q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1189 * We need to take into account a situation in which a TXQ was
1190 * allocated to TID x, and then turned shared by adding TIDs y
1191 * and z. If TID x becomes inactive and is removed from the TXQ,
1192 * ownership must be given to one of the remaining TIDs.
1193 * This is mainly because if TID x continues - a new queue can't
1194 * be allocated for it as long as it is an owner of another TXQ.
1196 * Mark this queue in the right bitmap, we'll send the command
1197 * to the firmware later.
1199 if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1200 set_bit(queue, changetid_queues);
1202 IWL_DEBUG_TX_QUEUES(mvm,
1203 "Removing inactive TID %d from shared Q:%d\n",
1207 IWL_DEBUG_TX_QUEUES(mvm,
1208 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1209 mvm->queue_info[queue].tid_bitmap);
1212 * There may be different TIDs with the same mac queues, so make
1213 * sure all TIDs have existing corresponding mac queues enabled
1215 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1217 /* If the queue is marked as shared - "unshare" it */
1218 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1219 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1220 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1222 set_bit(queue, unshare_queues);
1229 * Check for inactivity - this includes checking if any queue
1230 * can be unshared and finding one (and only one) that can be
1232 * This function is also invoked as a sort of clean-up task,
1233 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1235 * Returns the queue number, or -ENOSPC.
1237 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1239 unsigned long now = jiffies;
1240 unsigned long unshare_queues = 0;
1241 unsigned long changetid_queues = 0;
1242 int i, ret, free_queue = -ENOSPC;
1243 struct ieee80211_sta *queue_owner = NULL;
1245 lockdep_assert_held(&mvm->mutex);
1247 if (iwl_mvm_has_new_tx_api(mvm))
1252 /* we skip the CMD queue below by starting at 1 */
1253 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1255 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1256 struct ieee80211_sta *sta;
1257 struct iwl_mvm_sta *mvmsta;
1260 unsigned long inactive_tid_bitmap = 0;
1261 unsigned long queue_tid_bitmap;
1263 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1264 if (!queue_tid_bitmap)
1267 /* If TXQ isn't in active use anyway - nothing to do here... */
1268 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1269 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1272 /* Check to see if there are inactive TIDs on this queue */
1273 for_each_set_bit(tid, &queue_tid_bitmap,
1274 IWL_MAX_TID_COUNT + 1) {
1275 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1276 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1279 inactive_tid_bitmap |= BIT(tid);
1282 /* If all TIDs are active - finish check on this queue */
1283 if (!inactive_tid_bitmap)
1287 * If we are here - the queue hadn't been served recently and is
1291 sta_id = mvm->queue_info[i].ra_sta_id;
1292 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1295 * If the STA doesn't exist anymore, it isn't an error. It could
1296 * be that it was removed since getting the queues, and in this
1297 * case it should've inactivated its queues anyway.
1299 if (IS_ERR_OR_NULL(sta))
1302 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1304 spin_lock_bh(&mvmsta->lock);
1305 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1306 inactive_tid_bitmap,
1309 if (ret && free_queue < 0) {
1313 /* only unlock sta lock - we still need the queue info lock */
1314 spin_unlock_bh(&mvmsta->lock);
1318 /* Reconfigure queues requiring reconfiguation */
1319 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1320 iwl_mvm_unshare_queue(mvm, i);
1321 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1322 iwl_mvm_change_queue_tid(mvm, i);
1326 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1327 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1336 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1337 struct ieee80211_sta *sta, u8 ac, int tid)
1339 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1340 struct iwl_trans_txq_scd_cfg cfg = {
1341 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1342 .sta_id = mvmsta->deflink.sta_id,
1344 .frame_limit = IWL_FRAME_LIMIT,
1346 unsigned int wdg_timeout =
1347 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1350 unsigned long disable_agg_tids = 0;
1351 enum iwl_mvm_agg_state queue_state;
1352 bool shared_queue = false, inc_ssn;
1354 unsigned long tfd_queue_mask;
1357 lockdep_assert_held(&mvm->mutex);
1359 if (iwl_mvm_has_new_tx_api(mvm))
1360 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1362 spin_lock_bh(&mvmsta->lock);
1363 tfd_queue_mask = mvmsta->tfd_queue_msk;
1364 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1365 spin_unlock_bh(&mvmsta->lock);
1367 if (tid == IWL_MAX_TID_COUNT) {
1368 queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
1369 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1370 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1371 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1372 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1375 /* If no such queue is found, we'll use a DATA queue instead */
1378 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1379 (mvm->queue_info[mvmsta->reserved_queue].status ==
1380 IWL_MVM_QUEUE_RESERVED)) {
1381 queue = mvmsta->reserved_queue;
1382 mvm->queue_info[queue].reserved = true;
1383 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1387 queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
1388 IWL_MVM_DQA_MIN_DATA_QUEUE,
1389 IWL_MVM_DQA_MAX_DATA_QUEUE);
1391 /* try harder - perhaps kill an inactive queue */
1392 queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id);
1395 /* No free queue - we'll have to share */
1397 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1399 shared_queue = true;
1400 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1405 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1406 * to make sure no one else takes it.
1407 * This will allow avoiding re-acquiring the lock at the end of the
1408 * configuration. On error we'll mark it back as free.
1410 if (queue > 0 && !shared_queue)
1411 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1413 /* This shouldn't happen - out of queues */
1414 if (WARN_ON(queue <= 0)) {
1415 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1421 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1422 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1424 * Mark all DATA queues as allowing to be aggregated at some point
1426 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1427 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1429 IWL_DEBUG_TX_QUEUES(mvm,
1430 "Allocating %squeue #%d to sta %d on tid %d\n",
1431 shared_queue ? "shared " : "", queue,
1432 mvmsta->deflink.sta_id, tid);
1435 /* Disable any open aggs on this queue */
1436 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1438 if (disable_agg_tids) {
1439 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1441 iwl_mvm_invalidate_sta_queue(mvm, queue,
1442 disable_agg_tids, false);
1446 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1449 * Mark queue as shared in transport if shared
1450 * Note this has to be done after queue enablement because enablement
1451 * can also set this value, and there is no indication there to shared
1455 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1457 spin_lock_bh(&mvmsta->lock);
1459 * This looks racy, but it is not. We have only one packet for
1460 * this ra/tid in our Tx path since we stop the Qdisc when we
1461 * need to allocate a new TFD queue.
1464 mvmsta->tid_data[tid].seq_number += 0x10;
1465 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1467 mvmsta->tid_data[tid].txq_id = queue;
1468 mvmsta->tfd_queue_msk |= BIT(queue);
1469 queue_state = mvmsta->tid_data[tid].state;
1471 if (mvmsta->reserved_queue == queue)
1472 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1473 spin_unlock_bh(&mvmsta->lock);
1475 if (!shared_queue) {
1476 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1480 /* If we need to re-enable aggregations... */
1481 if (queue_state == IWL_AGG_ON) {
1482 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1487 /* Redirect queue, if needed */
1488 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1490 iwl_mvm_txq_from_tid(sta, tid));
1499 iwl_mvm_disable_txq(mvm, sta, mvmsta->deflink.sta_id, &queue_tmp, tid);
1504 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1506 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1509 mutex_lock(&mvm->mutex);
1511 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1513 while (!list_empty(&mvm->add_stream_txqs)) {
1514 struct iwl_mvm_txq *mvmtxq;
1515 struct ieee80211_txq *txq;
1518 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1519 struct iwl_mvm_txq, list);
1521 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1524 if (tid == IEEE80211_NUM_TIDS)
1525 tid = IWL_MAX_TID_COUNT;
1528 * We can't really do much here, but if this fails we can't
1529 * transmit anyway - so just don't transmit the frame etc.
1530 * and let them back up ... we've tried our best to allocate
1531 * a queue in the function itself.
1533 if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1534 spin_lock_bh(&mvm->add_stream_lock);
1535 list_del_init(&mvmtxq->list);
1536 spin_unlock_bh(&mvm->add_stream_lock);
1540 /* now we're ready, any remaining races/concurrency will be
1541 * handled in iwl_mvm_mac_itxq_xmit()
1543 set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
1546 spin_lock(&mvm->add_stream_lock);
1547 list_del_init(&mvmtxq->list);
1548 spin_unlock(&mvm->add_stream_lock);
1550 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1554 mutex_unlock(&mvm->mutex);
1557 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1558 struct ieee80211_sta *sta,
1559 enum nl80211_iftype vif_type)
1561 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1564 /* queue reserving is disabled on new TX path */
1565 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1568 /* run the general cleanup/unsharing of queues */
1569 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1571 /* Make sure we have free resources for this STA */
1572 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1573 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1574 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1575 IWL_MVM_QUEUE_FREE))
1576 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1578 queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
1579 IWL_MVM_DQA_MIN_DATA_QUEUE,
1580 IWL_MVM_DQA_MAX_DATA_QUEUE);
1582 /* try again - this time kick out a queue if needed */
1583 queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id);
1585 IWL_ERR(mvm, "No available queues for new station\n");
1589 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1591 mvmsta->reserved_queue = queue;
1593 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1594 queue, mvmsta->deflink.sta_id);
1600 * In DQA mode, after a HW restart the queues should be allocated as before, in
1601 * order to avoid race conditions when there are shared queues. This function
1602 * does the re-mapping and queue allocation.
1604 * Note that re-enabling aggregations isn't done in this function.
1606 void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1607 struct ieee80211_sta *sta)
1609 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1611 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1613 struct iwl_trans_txq_scd_cfg cfg = {
1614 .sta_id = mvm_sta->deflink.sta_id,
1615 .frame_limit = IWL_FRAME_LIMIT,
1618 /* Make sure reserved queue is still marked as such (if allocated) */
1619 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1620 mvm->queue_info[mvm_sta->reserved_queue].status =
1621 IWL_MVM_QUEUE_RESERVED;
1623 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1624 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1625 int txq_id = tid_data->txq_id;
1628 if (txq_id == IWL_MVM_INVALID_QUEUE)
1631 ac = tid_to_mac80211_ac[i];
1633 if (iwl_mvm_has_new_tx_api(mvm)) {
1634 IWL_DEBUG_TX_QUEUES(mvm,
1635 "Re-mapping sta %d tid %d\n",
1636 mvm_sta->deflink.sta_id, i);
1637 txq_id = iwl_mvm_tvqm_enable_txq(mvm, sta,
1638 mvm_sta->deflink.sta_id,
1641 * on failures, just set it to IWL_MVM_INVALID_QUEUE
1642 * to try again later, we have no other good way of
1646 txq_id = IWL_MVM_INVALID_QUEUE;
1647 tid_data->txq_id = txq_id;
1650 * Since we don't set the seq number after reset, and HW
1651 * sets it now, FW reset will cause the seq num to start
1652 * at 0 again, so driver will need to update it
1653 * internally as well, so it keeps in sync with real val
1655 tid_data->seq_number = 0;
1657 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1660 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1661 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1663 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1665 IWL_DEBUG_TX_QUEUES(mvm,
1666 "Re-mapping sta %d tid %d to queue %d\n",
1667 mvm_sta->deflink.sta_id, i,
1670 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1671 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1676 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1677 struct iwl_mvm_int_sta *sta,
1679 u16 mac_id, u16 color)
1681 struct iwl_mvm_add_sta_cmd cmd;
1683 u32 status = ADD_STA_SUCCESS;
1685 lockdep_assert_held(&mvm->mutex);
1687 memset(&cmd, 0, sizeof(cmd));
1688 cmd.sta_id = sta->sta_id;
1690 if (iwl_mvm_has_new_station_api(mvm->fw) &&
1691 sta->type == IWL_STA_AUX_ACTIVITY)
1692 cmd.mac_id_n_color = cpu_to_le32(mac_id);
1694 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1697 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1698 cmd.station_type = sta->type;
1700 if (!iwl_mvm_has_new_tx_api(mvm))
1701 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1702 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1705 memcpy(cmd.addr, addr, ETH_ALEN);
1707 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1708 iwl_mvm_add_sta_cmd_size(mvm),
1713 switch (status & IWL_ADD_STA_STATUS_MASK) {
1714 case ADD_STA_SUCCESS:
1715 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1719 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1726 /* Initialize driver data of a new sta */
1727 int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1728 struct ieee80211_sta *sta, int sta_id, u8 sta_type)
1730 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1731 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1732 struct iwl_mvm_rxq_dup_data *dup_data;
1735 lockdep_assert_held(&mvm->mutex);
1737 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1741 /* for MLD sta_id(s) should be allocated for each link before calling
1744 if (!mvm->mld_api_is_used) {
1745 if (WARN_ON(sta_id == IWL_MVM_INVALID_STA))
1748 mvm_sta->deflink.sta_id = sta_id;
1749 rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink);
1751 if (!mvm->trans->trans_cfg->gen2)
1752 mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
1753 LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1755 mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
1756 LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1759 mvm_sta->tt_tx_protection = false;
1760 mvm_sta->sta_type = sta_type;
1762 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1764 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1766 * Mark all queues for this STA as unallocated and defer TX
1767 * frames until the queue is allocated
1769 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1772 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1773 struct iwl_mvm_txq *mvmtxq =
1774 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1776 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1777 INIT_LIST_HEAD(&mvmtxq->list);
1778 atomic_set(&mvmtxq->tx_request, 0);
1781 if (iwl_mvm_has_new_rx_api(mvm)) {
1784 dup_data = kcalloc(mvm->trans->num_rx_queues,
1785 sizeof(*dup_data), GFP_KERNEL);
1789 * Initialize all the last_seq values to 0xffff which can never
1790 * compare equal to the frame's seq_ctrl in the check in
1791 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1792 * number and fragmented packets don't reach that function.
1794 * This thus allows receiving a packet with seqno 0 and the
1795 * retry bit set as the very first packet on a new TID.
1797 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1798 memset(dup_data[q].last_seq, 0xff,
1799 sizeof(dup_data[q].last_seq));
1800 mvm_sta->dup_data = dup_data;
1803 if (!iwl_mvm_has_new_tx_api(mvm)) {
1804 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1805 ieee80211_vif_type_p2p(vif));
1811 * if rs is registered with mac80211, then "add station" will be handled
1812 * via the corresponding ops, otherwise need to notify rate scaling here
1814 if (iwl_mvm_has_tlc_offload(mvm))
1815 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1817 spin_lock_init(&mvm_sta->deflink.lq_sta.rs_drv.pers.lock);
1819 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1824 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1825 struct ieee80211_vif *vif,
1826 struct ieee80211_sta *sta)
1828 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1829 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1831 bool sta_update = false;
1832 unsigned int sta_flags = 0;
1834 lockdep_assert_held(&mvm->mutex);
1836 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1837 sta_id = iwl_mvm_find_free_sta_id(mvm,
1838 ieee80211_vif_type_p2p(vif));
1840 sta_id = mvm_sta->deflink.sta_id;
1842 if (sta_id == IWL_MVM_INVALID_STA)
1845 spin_lock_init(&mvm_sta->lock);
1847 /* if this is a HW restart re-alloc existing queues */
1848 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1849 struct iwl_mvm_int_sta tmp_sta = {
1851 .type = mvm_sta->sta_type,
1854 /* First add an empty station since allocating
1855 * a queue requires a valid station
1857 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1858 mvmvif->id, mvmvif->color);
1862 iwl_mvm_realloc_queues_after_restart(mvm, sta);
1864 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1868 ret = iwl_mvm_sta_init(mvm, vif, sta, sta_id,
1869 sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK);
1874 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1878 if (vif->type == NL80211_IFTYPE_STATION) {
1880 WARN_ON(mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA);
1881 mvmvif->deflink.ap_sta_id = sta_id;
1883 WARN_ON(mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA);
1887 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1895 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1898 struct iwl_mvm_add_sta_cmd cmd = {};
1902 lockdep_assert_held(&mvm->mutex);
1904 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1905 cmd.sta_id = mvmsta->deflink.sta_id;
1906 cmd.add_modify = STA_MODE_MODIFY;
1907 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1908 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1910 status = ADD_STA_SUCCESS;
1911 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1912 iwl_mvm_add_sta_cmd_size(mvm),
1917 switch (status & IWL_ADD_STA_STATUS_MASK) {
1918 case ADD_STA_SUCCESS:
1919 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1920 mvmsta->deflink.sta_id);
1924 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1925 mvmsta->deflink.sta_id);
1933 * Remove a station from the FW table. Before sending the command to remove
1934 * the station validate that the station is indeed known to the driver (sanity
1937 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1939 struct ieee80211_sta *sta;
1940 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1945 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1946 lockdep_is_held(&mvm->mutex));
1948 /* Note: internal stations are marked as error values */
1950 IWL_ERR(mvm, "Invalid station id\n");
1954 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1955 sizeof(rm_sta_cmd), &rm_sta_cmd);
1957 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1964 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1965 struct ieee80211_vif *vif,
1966 struct ieee80211_sta *sta)
1968 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1971 lockdep_assert_held(&mvm->mutex);
1973 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1974 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1977 iwl_mvm_disable_txq(mvm, sta, mvm_sta->deflink.sta_id,
1978 &mvm_sta->tid_data[i].txq_id, i);
1979 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1982 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1983 struct iwl_mvm_txq *mvmtxq =
1984 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1986 spin_lock_bh(&mvm->add_stream_lock);
1987 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1988 list_del_init(&mvmtxq->list);
1989 clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
1990 spin_unlock_bh(&mvm->add_stream_lock);
1994 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1995 struct iwl_mvm_sta *mvm_sta)
1999 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
2003 spin_lock_bh(&mvm_sta->lock);
2004 txq_id = mvm_sta->tid_data[i].txq_id;
2005 spin_unlock_bh(&mvm_sta->lock);
2007 if (txq_id == IWL_MVM_INVALID_QUEUE)
2010 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
2018 /* Execute the common part for both MLD and non-MLD modes.
2019 * Returns if we're done with removing the station, either
2020 * with error or success
2022 bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2023 struct ieee80211_sta *sta,
2024 struct ieee80211_link_sta *link_sta, int *ret)
2026 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2027 struct iwl_mvm_vif_link_info *mvm_link =
2028 mvmvif->link[link_sta->link_id];
2029 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2030 struct iwl_mvm_link_sta *mvm_link_sta;
2033 lockdep_assert_held(&mvm->mutex);
2036 rcu_dereference_protected(mvm_sta->link[link_sta->link_id],
2037 lockdep_is_held(&mvm->mutex));
2038 sta_id = mvm_link_sta->sta_id;
2040 /* If there is a TXQ still marked as reserved - free it */
2041 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
2042 u8 reserved_txq = mvm_sta->reserved_queue;
2043 enum iwl_mvm_queue_status *status;
2046 * If no traffic has gone through the reserved TXQ - it
2047 * is still marked as IWL_MVM_QUEUE_RESERVED, and
2048 * should be manually marked as free again
2050 status = &mvm->queue_info[reserved_txq].status;
2051 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
2052 (*status != IWL_MVM_QUEUE_FREE),
2053 "sta_id %d reserved txq %d status %d",
2054 sta_id, reserved_txq, *status)) {
2059 *status = IWL_MVM_QUEUE_FREE;
2062 if (vif->type == NL80211_IFTYPE_STATION) {
2063 /* if associated - we can't remove the AP STA now */
2067 /* first remove remaining keys */
2068 iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, 0);
2070 /* unassoc - go ahead - remove the AP STA now */
2071 mvm_link->ap_sta_id = IWL_MVM_INVALID_STA;
2075 * This shouldn't happen - the TDLS channel switch should be canceled
2076 * before the STA is removed.
2078 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
2079 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
2080 cancel_delayed_work(&mvm->tdls_cs.dwork);
2086 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
2087 struct ieee80211_vif *vif,
2088 struct ieee80211_sta *sta)
2090 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2093 lockdep_assert_held(&mvm->mutex);
2095 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
2099 /* flush its queues here since we are freeing mvm_sta */
2100 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
2103 if (iwl_mvm_has_new_tx_api(mvm)) {
2104 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
2106 u32 q_mask = mvm_sta->tfd_queue_msk;
2108 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2114 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
2116 iwl_mvm_disable_sta_queues(mvm, vif, sta);
2118 if (iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink, &ret))
2121 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id);
2122 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL);
2127 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
2128 struct ieee80211_vif *vif,
2131 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2133 lockdep_assert_held(&mvm->mutex);
2135 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
2139 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2140 struct iwl_mvm_int_sta *sta,
2141 u32 qmask, enum nl80211_iftype iftype,
2144 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2145 sta->sta_id == IWL_MVM_INVALID_STA) {
2146 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2147 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
2151 sta->tfd_queue_msk = qmask;
2154 /* put a non-NULL value so iterating over the stations won't stop */
2155 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2159 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2161 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2162 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2163 sta->sta_id = IWL_MVM_INVALID_STA;
2166 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
2169 unsigned int wdg_timeout =
2170 mvm->trans->trans_cfg->base_params->wd_timeout;
2171 struct iwl_trans_txq_scd_cfg cfg = {
2174 .tid = IWL_MAX_TID_COUNT,
2176 .frame_limit = IWL_FRAME_LIMIT,
2179 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2181 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2184 static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
2186 unsigned int wdg_timeout =
2187 mvm->trans->trans_cfg->base_params->wd_timeout;
2189 WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2191 return iwl_mvm_tvqm_enable_txq(mvm, NULL, sta_id, IWL_MAX_TID_COUNT,
2195 static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2196 int maccolor, u8 *addr,
2197 struct iwl_mvm_int_sta *sta,
2198 u16 *queue, int fifo)
2202 /* Map queue to fifo - needs to happen before adding station */
2203 if (!iwl_mvm_has_new_tx_api(mvm))
2204 iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2206 ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
2208 if (!iwl_mvm_has_new_tx_api(mvm))
2209 iwl_mvm_disable_txq(mvm, NULL, sta->sta_id, queue,
2215 * For 22000 firmware and on we cannot add queue to a station unknown
2216 * to firmware so enable queue here - after the station was added
2218 if (iwl_mvm_has_new_tx_api(mvm)) {
2221 txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2223 iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2233 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2236 u32 qmask = mvm->aux_queue == IWL_MVM_INVALID_QUEUE ? 0 :
2237 BIT(mvm->aux_queue);
2239 lockdep_assert_held(&mvm->mutex);
2241 /* Allocate aux station and assign to it the aux queue */
2242 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, qmask,
2243 NL80211_IFTYPE_UNSPECIFIED,
2244 IWL_STA_AUX_ACTIVITY);
2249 * In CDB NICs we need to specify which lmac to use for aux activity
2250 * using the mac_id argument place to send lmac_id to the function
2252 ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2253 &mvm->aux_sta, &mvm->aux_queue,
2254 IWL_MVM_TX_FIFO_MCAST);
2256 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2263 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2265 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2267 lockdep_assert_held(&mvm->mutex);
2269 return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2270 NULL, &mvm->snif_sta,
2272 IWL_MVM_TX_FIFO_BE);
2275 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2279 lockdep_assert_held(&mvm->mutex);
2281 if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2284 iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id,
2285 &mvm->snif_queue, IWL_MAX_TID_COUNT);
2286 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2288 IWL_WARN(mvm, "Failed sending remove station\n");
2293 int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2297 lockdep_assert_held(&mvm->mutex);
2299 if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2302 iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id,
2303 &mvm->aux_queue, IWL_MAX_TID_COUNT);
2304 ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2306 IWL_WARN(mvm, "Failed sending remove station\n");
2307 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2312 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2314 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2318 * Send the add station command for the vif's broadcast station.
2319 * Assumes that the station was already allocated.
2321 * @mvm: the mvm component
2322 * @vif: the interface to which the broadcast station is added
2323 * @bsta: the broadcast station to add.
2325 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2327 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2328 struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta;
2329 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2330 const u8 *baddr = _baddr;
2333 unsigned int wdg_timeout =
2334 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2335 struct iwl_trans_txq_scd_cfg cfg = {
2336 .fifo = IWL_MVM_TX_FIFO_VO,
2337 .sta_id = mvmvif->deflink.bcast_sta.sta_id,
2338 .tid = IWL_MAX_TID_COUNT,
2340 .frame_limit = IWL_FRAME_LIMIT,
2343 lockdep_assert_held(&mvm->mutex);
2345 if (!iwl_mvm_has_new_tx_api(mvm)) {
2346 if (vif->type == NL80211_IFTYPE_AP ||
2347 vif->type == NL80211_IFTYPE_ADHOC) {
2348 queue = mvm->probe_queue;
2349 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2350 queue = mvm->p2p_dev_queue;
2352 WARN(1, "Missing required TXQ for adding bcast STA\n");
2356 bsta->tfd_queue_msk |= BIT(queue);
2358 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2361 if (vif->type == NL80211_IFTYPE_ADHOC)
2362 baddr = vif->bss_conf.bssid;
2364 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2367 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2368 mvmvif->id, mvmvif->color);
2373 * For 22000 firmware and on we cannot add queue to a station unknown
2374 * to firmware so enable queue here - after the station was added
2376 if (iwl_mvm_has_new_tx_api(mvm)) {
2377 queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, bsta->sta_id,
2381 iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2385 if (vif->type == NL80211_IFTYPE_AP ||
2386 vif->type == NL80211_IFTYPE_ADHOC) {
2387 /* for queue management */
2388 mvm->probe_queue = queue;
2390 mvmvif->deflink.mgmt_queue = queue;
2391 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2392 mvm->p2p_dev_queue = queue;
2394 } else if (vif->type == NL80211_IFTYPE_AP ||
2395 vif->type == NL80211_IFTYPE_ADHOC) {
2396 /* set it for use in TX */
2397 mvmvif->deflink.mgmt_queue = mvm->probe_queue;
2403 void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2404 struct ieee80211_vif *vif)
2406 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2407 u16 *queueptr, queue;
2409 lockdep_assert_held(&mvm->mutex);
2411 iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, true);
2413 switch (vif->type) {
2414 case NL80211_IFTYPE_AP:
2415 case NL80211_IFTYPE_ADHOC:
2416 queueptr = &mvm->probe_queue;
2418 case NL80211_IFTYPE_P2P_DEVICE:
2419 queueptr = &mvm->p2p_dev_queue;
2422 WARN(1, "Can't free bcast queue on vif type %d\n",
2428 iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.bcast_sta.sta_id,
2429 queueptr, IWL_MAX_TID_COUNT);
2431 if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC)
2432 mvmvif->deflink.mgmt_queue = mvm->probe_queue;
2434 if (iwl_mvm_has_new_tx_api(mvm))
2437 WARN_ON(!(mvmvif->deflink.bcast_sta.tfd_queue_msk & BIT(queue)));
2438 mvmvif->deflink.bcast_sta.tfd_queue_msk &= ~BIT(queue);
2441 /* Send the FW a request to remove the station from it's internal data
2442 * structures, but DO NOT remove the entry from the local data structures. */
2443 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2445 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2448 lockdep_assert_held(&mvm->mutex);
2450 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2452 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.bcast_sta.sta_id);
2454 IWL_WARN(mvm, "Failed sending remove station\n");
2458 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2460 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2462 lockdep_assert_held(&mvm->mutex);
2464 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.bcast_sta, 0,
2465 ieee80211_vif_type_p2p(vif),
2466 IWL_STA_GENERAL_PURPOSE);
2469 /* Allocate a new station entry for the broadcast station to the given vif,
2470 * and send it to the FW.
2471 * Note that each P2P mac should have its own broadcast station.
2473 * @mvm: the mvm component
2474 * @vif: the interface to which the broadcast station is added
2475 * @bsta: the broadcast station to add. */
2476 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2478 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2479 struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta;
2482 lockdep_assert_held(&mvm->mutex);
2484 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2488 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2491 iwl_mvm_dealloc_int_sta(mvm, bsta);
2496 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2498 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2500 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.bcast_sta);
2504 * Send the FW a request to remove the station from it's internal data
2505 * structures, and in addition remove it from the local data structure.
2507 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2511 lockdep_assert_held(&mvm->mutex);
2513 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2515 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2521 * Allocate a new station entry for the multicast station to the given vif,
2522 * and send it to the FW.
2523 * Note that each AP/GO mac should have its own multicast station.
2525 * @mvm: the mvm component
2526 * @vif: the interface to which the multicast station is added
2528 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2530 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2531 struct iwl_mvm_int_sta *msta = &mvmvif->deflink.mcast_sta;
2532 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2533 const u8 *maddr = _maddr;
2534 struct iwl_trans_txq_scd_cfg cfg = {
2535 .fifo = vif->type == NL80211_IFTYPE_AP ?
2536 IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2537 .sta_id = msta->sta_id,
2540 .frame_limit = IWL_FRAME_LIMIT,
2542 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2545 lockdep_assert_held(&mvm->mutex);
2547 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2548 vif->type != NL80211_IFTYPE_ADHOC))
2552 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2553 * invalid, so make sure we use the queue we want.
2554 * Note that this is done here as we want to avoid making DQA
2555 * changes in mac80211 layer.
2557 if (vif->type == NL80211_IFTYPE_ADHOC)
2558 mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2561 * While in previous FWs we had to exclude cab queue from TFD queue
2562 * mask, now it is needed as any other queue.
2564 if (!iwl_mvm_has_new_tx_api(mvm) &&
2565 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2566 iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0,
2569 msta->tfd_queue_msk |= BIT(mvmvif->deflink.cab_queue);
2571 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2572 mvmvif->id, mvmvif->color);
2577 * Enable cab queue after the ADD_STA command is sent.
2578 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2579 * command with unknown station id, and for FW that doesn't support
2580 * station API since the cab queue is not included in the
2583 if (iwl_mvm_has_new_tx_api(mvm)) {
2584 int queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, msta->sta_id,
2590 mvmvif->deflink.cab_queue = queue;
2591 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2592 IWL_UCODE_TLV_API_STA_TYPE))
2593 iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0,
2599 iwl_mvm_dealloc_int_sta(mvm, msta);
2603 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2604 struct ieee80211_key_conf *keyconf,
2608 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2609 struct iwl_mvm_add_sta_key_cmd cmd;
2611 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2612 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2617 /* This is a valid situation for GTK removal */
2618 if (sta_id == IWL_MVM_INVALID_STA)
2621 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2622 STA_KEY_FLG_KEYID_MSK);
2623 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2624 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2627 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2630 * The fields assigned here are in the same location at the start
2631 * of the command, so we can do this union trick.
2633 u.cmd.common.key_flags = key_flags;
2634 u.cmd.common.key_offset = keyconf->hw_key_idx;
2635 u.cmd.common.sta_id = sta_id;
2637 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2639 status = ADD_STA_SUCCESS;
2640 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2644 case ADD_STA_SUCCESS:
2645 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2649 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2657 * Send the FW a request to remove the station from it's internal data
2658 * structures, and in addition remove it from the local data structure.
2660 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2662 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2665 lockdep_assert_held(&mvm->mutex);
2667 iwl_mvm_flush_sta(mvm, &mvmvif->deflink.mcast_sta, true);
2669 iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id,
2670 &mvmvif->deflink.cab_queue, 0);
2672 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.mcast_sta.sta_id);
2674 IWL_WARN(mvm, "Failed sending remove station\n");
2679 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2681 struct iwl_mvm_delba_data notif = {
2685 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
2686 ¬if, sizeof(notif));
2689 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2690 struct iwl_mvm_baid_data *data)
2694 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2696 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2698 struct iwl_mvm_reorder_buffer *reorder_buf =
2699 &data->reorder_buf[i];
2700 struct iwl_mvm_reorder_buf_entry *entries =
2701 &data->entries[i * data->entries_per_queue];
2703 spin_lock_bh(&reorder_buf->lock);
2704 if (likely(!reorder_buf->num_stored)) {
2705 spin_unlock_bh(&reorder_buf->lock);
2710 * This shouldn't happen in regular DELBA since the internal
2711 * delBA notification should trigger a release of all frames in
2712 * the reorder buffer.
2716 for (j = 0; j < reorder_buf->buf_size; j++)
2717 __skb_queue_purge(&entries[j].e.frames);
2719 * Prevent timer re-arm. This prevents a very far fetched case
2720 * where we timed out on the notification. There may be prior
2721 * RX frames pending in the RX queue before the notification
2722 * that might get processed between now and the actual deletion
2723 * and we would re-arm the timer although we are deleting the
2726 reorder_buf->removed = true;
2727 spin_unlock_bh(&reorder_buf->lock);
2728 del_timer_sync(&reorder_buf->reorder_timer);
2732 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2733 struct iwl_mvm_baid_data *data,
2734 u16 ssn, u16 buf_size)
2738 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2739 struct iwl_mvm_reorder_buffer *reorder_buf =
2740 &data->reorder_buf[i];
2741 struct iwl_mvm_reorder_buf_entry *entries =
2742 &data->entries[i * data->entries_per_queue];
2745 reorder_buf->num_stored = 0;
2746 reorder_buf->head_sn = ssn;
2747 reorder_buf->buf_size = buf_size;
2748 /* rx reorder timer */
2749 timer_setup(&reorder_buf->reorder_timer,
2750 iwl_mvm_reorder_timer_expired, 0);
2751 spin_lock_init(&reorder_buf->lock);
2752 reorder_buf->mvm = mvm;
2753 reorder_buf->queue = i;
2754 reorder_buf->valid = false;
2755 for (j = 0; j < reorder_buf->buf_size; j++)
2756 __skb_queue_head_init(&entries[j].e.frames);
2760 static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
2761 struct ieee80211_sta *sta,
2762 bool start, int tid, u16 ssn,
2765 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2766 struct iwl_mvm_add_sta_cmd cmd = {
2767 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
2768 .sta_id = mvm_sta->deflink.sta_id,
2769 .add_modify = STA_MODE_MODIFY,
2775 cmd.add_immediate_ba_tid = tid;
2776 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2777 cmd.rx_ba_window = cpu_to_le16(buf_size);
2778 cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
2780 cmd.remove_immediate_ba_tid = tid;
2781 cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
2784 status = ADD_STA_SUCCESS;
2785 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2786 iwl_mvm_add_sta_cmd_size(mvm),
2791 switch (status & IWL_ADD_STA_STATUS_MASK) {
2792 case ADD_STA_SUCCESS:
2793 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2794 start ? "start" : "stopp");
2795 if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
2796 !(status & IWL_ADD_STA_BAID_VALID_MASK)))
2798 return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
2799 case ADD_STA_IMMEDIATE_BA_FAILURE:
2800 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2803 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2804 start ? "start" : "stopp", status);
2809 static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
2810 struct ieee80211_sta *sta,
2811 bool start, int tid, u16 ssn,
2812 u16 buf_size, int baid)
2814 struct iwl_rx_baid_cfg_cmd cmd = {
2815 .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
2816 cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
2818 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
2821 BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
2824 cmd.alloc.sta_id_mask =
2825 cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1));
2826 cmd.alloc.tid = tid;
2827 cmd.alloc.ssn = cpu_to_le16(ssn);
2828 cmd.alloc.win_size = cpu_to_le16(buf_size);
2830 } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
2831 cmd.remove_v1.baid = cpu_to_le32(baid);
2832 BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
2834 cmd.remove.sta_id_mask =
2835 cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1));
2836 cmd.remove.tid = cpu_to_le32(tid);
2839 ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
2845 /* ignore firmware baid on remove */
2849 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2850 start ? "start" : "stopp");
2852 if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
2858 static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2859 bool start, int tid, u16 ssn, u16 buf_size,
2862 if (fw_has_capa(&mvm->fw->ucode_capa,
2863 IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
2864 return iwl_mvm_fw_baid_op_cmd(mvm, sta, start,
2865 tid, ssn, buf_size, baid);
2867 return iwl_mvm_fw_baid_op_sta(mvm, sta, start,
2868 tid, ssn, buf_size);
2871 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2872 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2874 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2875 struct iwl_mvm_baid_data *baid_data = NULL;
2877 u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
2880 lockdep_assert_held(&mvm->mutex);
2882 if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
2883 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2887 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2888 u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2890 /* sparse doesn't like the __align() so don't check */
2893 * The division below will be OK if either the cache line size
2894 * can be divided by the entry size (ALIGN will round up) or if
2895 * if the entry size can be divided by the cache line size, in
2896 * which case the ALIGN() will do nothing.
2898 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2899 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2903 * Upward align the reorder buffer size to fill an entire cache
2904 * line for each queue, to avoid sharing cache lines between
2907 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2910 * Allocate here so if allocation fails we can bail out early
2911 * before starting the BA session in the firmware
2913 baid_data = kzalloc(sizeof(*baid_data) +
2914 mvm->trans->num_rx_queues *
2921 * This division is why we need the above BUILD_BUG_ON(),
2922 * if that doesn't hold then this will not be right.
2924 baid_data->entries_per_queue =
2925 reorder_buf_size / sizeof(baid_data->entries[0]);
2928 if (iwl_mvm_has_new_rx_api(mvm) && !start) {
2929 baid = mvm_sta->tid_to_baid[tid];
2931 /* we don't really need it in this case */
2935 /* Don't send command to remove (start=0) BAID during restart */
2936 if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2937 baid = iwl_mvm_fw_baid_op(mvm, sta, start, tid, ssn, buf_size,
2946 mvm->rx_ba_sessions++;
2948 if (!iwl_mvm_has_new_rx_api(mvm))
2951 baid_data->baid = baid;
2952 baid_data->timeout = timeout;
2953 baid_data->last_rx = jiffies;
2954 baid_data->rcu_ptr = &mvm->baid_map[baid];
2955 timer_setup(&baid_data->session_timer,
2956 iwl_mvm_rx_agg_session_expired, 0);
2957 baid_data->mvm = mvm;
2958 baid_data->tid = tid;
2959 baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
2961 mvm_sta->tid_to_baid[tid] = baid;
2963 mod_timer(&baid_data->session_timer,
2964 TU_TO_EXP_TIME(timeout * 2));
2966 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2968 * protect the BA data with RCU to cover a case where our
2969 * internal RX sync mechanism will timeout (not that it's
2970 * supposed to happen) and we will free the session data while
2971 * RX is being processed in parallel
2973 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2974 mvm_sta->deflink.sta_id, tid, baid);
2975 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2976 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2978 baid = mvm_sta->tid_to_baid[tid];
2980 if (mvm->rx_ba_sessions > 0)
2981 /* check that restart flow didn't zero the counter */
2982 mvm->rx_ba_sessions--;
2983 if (!iwl_mvm_has_new_rx_api(mvm))
2986 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2989 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2990 if (WARN_ON(!baid_data))
2993 /* synchronize all rx queues so we can safely delete */
2994 iwl_mvm_free_reorder(mvm, baid_data);
2995 timer_shutdown_sync(&baid_data->session_timer);
2996 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2997 kfree_rcu(baid_data, rcu_head);
2998 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
3001 * After we've deleted it, do another queue sync
3002 * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
3003 * running it won't find a new session in the old
3004 * BAID. It can find the NULL pointer for the BAID,
3005 * but we must not have it find a different session.
3007 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
3017 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3018 int tid, u8 queue, bool start)
3020 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3021 struct iwl_mvm_add_sta_cmd cmd = {};
3025 lockdep_assert_held(&mvm->mutex);
3028 mvm_sta->tfd_queue_msk |= BIT(queue);
3029 mvm_sta->tid_disable_agg &= ~BIT(tid);
3031 /* In DQA-mode the queue isn't removed on agg termination */
3032 mvm_sta->tid_disable_agg |= BIT(tid);
3035 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
3036 cmd.sta_id = mvm_sta->deflink.sta_id;
3037 cmd.add_modify = STA_MODE_MODIFY;
3038 if (!iwl_mvm_has_new_tx_api(mvm))
3039 cmd.modify_mask = STA_MODIFY_QUEUES;
3040 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
3041 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
3042 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
3044 status = ADD_STA_SUCCESS;
3045 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
3046 iwl_mvm_add_sta_cmd_size(mvm),
3051 switch (status & IWL_ADD_STA_STATUS_MASK) {
3052 case ADD_STA_SUCCESS:
3056 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
3057 start ? "start" : "stopp", status);
3064 const u8 tid_to_mac80211_ac[] = {
3073 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
3076 static const u8 tid_to_ucode_ac[] = {
3087 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3088 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3090 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3091 struct iwl_mvm_tid_data *tid_data;
3096 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
3099 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
3100 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
3102 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
3103 mvmsta->tid_data[tid].state);
3107 lockdep_assert_held(&mvm->mutex);
3109 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
3110 iwl_mvm_has_new_tx_api(mvm)) {
3111 u8 ac = tid_to_mac80211_ac[tid];
3113 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
3118 spin_lock_bh(&mvmsta->lock);
3121 * Note the possible cases:
3122 * 1. An enabled TXQ - TXQ needs to become agg'ed
3123 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
3126 txq_id = mvmsta->tid_data[tid].txq_id;
3127 if (txq_id == IWL_MVM_INVALID_QUEUE) {
3128 ret = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
3129 IWL_MVM_DQA_MIN_DATA_QUEUE,
3130 IWL_MVM_DQA_MAX_DATA_QUEUE);
3132 IWL_ERR(mvm, "Failed to allocate agg queue\n");
3138 /* TXQ hasn't yet been enabled, so mark it only as reserved */
3139 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
3140 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
3142 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
3143 tid, IWL_MAX_HW_QUEUES - 1);
3146 } else if (unlikely(mvm->queue_info[txq_id].status ==
3147 IWL_MVM_QUEUE_SHARED)) {
3149 IWL_DEBUG_TX_QUEUES(mvm,
3150 "Can't start tid %d agg on shared queue!\n",
3155 IWL_DEBUG_TX_QUEUES(mvm,
3156 "AGG for tid %d will be on queue #%d\n",
3159 tid_data = &mvmsta->tid_data[tid];
3160 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3161 tid_data->txq_id = txq_id;
3162 *ssn = tid_data->ssn;
3164 IWL_DEBUG_TX_QUEUES(mvm,
3165 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
3166 mvmsta->deflink.sta_id, tid, txq_id,
3168 tid_data->next_reclaimed);
3171 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3172 * to align the wrap around of ssn so we compare relevant values.
3174 normalized_ssn = tid_data->ssn;
3175 if (mvm->trans->trans_cfg->gen2)
3176 normalized_ssn &= 0xff;
3178 if (normalized_ssn == tid_data->next_reclaimed) {
3179 tid_data->state = IWL_AGG_STARTING;
3180 ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
3182 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
3183 ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
3187 spin_unlock_bh(&mvmsta->lock);
3192 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3193 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
3196 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3197 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3198 unsigned int wdg_timeout =
3199 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
3201 bool alloc_queue = true;
3202 enum iwl_mvm_queue_status queue_status;
3205 struct iwl_trans_txq_scd_cfg cfg = {
3206 .sta_id = mvmsta->deflink.sta_id,
3208 .frame_limit = buf_size,
3213 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3214 * manager, so this function should never be called in this case.
3216 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
3219 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
3220 != IWL_MAX_TID_COUNT);
3222 spin_lock_bh(&mvmsta->lock);
3223 ssn = tid_data->ssn;
3224 queue = tid_data->txq_id;
3225 tid_data->state = IWL_AGG_ON;
3226 mvmsta->agg_tids |= BIT(tid);
3227 tid_data->ssn = 0xffff;
3228 tid_data->amsdu_in_ampdu_allowed = amsdu;
3229 spin_unlock_bh(&mvmsta->lock);
3231 if (iwl_mvm_has_new_tx_api(mvm)) {
3233 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3234 * would have failed, so if we are here there is no need to
3236 * However, if aggregation size is different than the default
3237 * size, the scheduler should be reconfigured.
3238 * We cannot do this with the new TX API, so return unsupported
3239 * for now, until it will be offloaded to firmware..
3240 * Note that if SCD default value changes - this condition
3241 * should be updated as well.
3243 if (buf_size < IWL_FRAME_LIMIT)
3246 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3252 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3254 queue_status = mvm->queue_info[queue].status;
3256 /* Maybe there is no need to even alloc a queue... */
3257 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3258 alloc_queue = false;
3261 * Only reconfig the SCD for the queue if the window size has
3262 * changed from current (become smaller)
3264 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
3266 * If reconfiguring an existing queue, it first must be
3269 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3273 "Error draining queue before reconfig\n");
3277 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3278 mvmsta->deflink.sta_id, tid,
3282 "Error reconfiguring TXQ #%d\n", queue);
3288 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
3291 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3292 if (queue_status != IWL_MVM_QUEUE_SHARED) {
3293 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3298 /* No need to mark as reserved */
3299 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3303 * Even though in theory the peer could have different
3304 * aggregation reorder buffer sizes for different sessions,
3305 * our ucode doesn't allow for that and has a global limit
3306 * for each station. Therefore, use the minimum of all the
3307 * aggregation sessions and our default value.
3309 mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
3310 min(mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize,
3312 mvmsta->deflink.lq_sta.rs_drv.lq.agg_frame_cnt_limit =
3313 mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize;
3315 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3318 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->deflink.lq_sta.rs_drv.lq);
3321 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3322 struct iwl_mvm_sta *mvmsta,
3323 struct iwl_mvm_tid_data *tid_data)
3325 u16 txq_id = tid_data->txq_id;
3327 lockdep_assert_held(&mvm->mutex);
3329 if (iwl_mvm_has_new_tx_api(mvm))
3333 * The TXQ is marked as reserved only if no traffic came through yet
3334 * This means no traffic has been sent on this TID (agg'd or not), so
3335 * we no longer have use for the queue. Since it hasn't even been
3336 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3339 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3340 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3341 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3345 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3346 struct ieee80211_sta *sta, u16 tid)
3348 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3349 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3354 * If mac80211 is cleaning its state, then say that we finished since
3355 * our state has been cleared anyway.
3357 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3358 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3362 spin_lock_bh(&mvmsta->lock);
3364 txq_id = tid_data->txq_id;
3366 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3367 mvmsta->deflink.sta_id, tid, txq_id,
3370 mvmsta->agg_tids &= ~BIT(tid);
3372 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3374 switch (tid_data->state) {
3376 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3378 IWL_DEBUG_TX_QUEUES(mvm,
3379 "ssn = %d, next_recl = %d\n",
3380 tid_data->ssn, tid_data->next_reclaimed);
3382 tid_data->ssn = 0xffff;
3383 tid_data->state = IWL_AGG_OFF;
3384 spin_unlock_bh(&mvmsta->lock);
3386 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3388 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3390 case IWL_AGG_STARTING:
3391 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3393 * The agg session has been stopped before it was set up. This
3394 * can happen when the AddBA timer times out for example.
3397 /* No barriers since we are under mutex */
3398 lockdep_assert_held(&mvm->mutex);
3400 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3401 tid_data->state = IWL_AGG_OFF;
3406 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3407 mvmsta->deflink.sta_id, tid, tid_data->state);
3409 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3413 spin_unlock_bh(&mvmsta->lock);
3418 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3419 struct ieee80211_sta *sta, u16 tid)
3421 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3422 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3424 enum iwl_mvm_agg_state old_state;
3427 * First set the agg state to OFF to avoid calling
3428 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3430 spin_lock_bh(&mvmsta->lock);
3431 txq_id = tid_data->txq_id;
3432 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3433 mvmsta->deflink.sta_id, tid, txq_id,
3435 old_state = tid_data->state;
3436 tid_data->state = IWL_AGG_OFF;
3437 mvmsta->agg_tids &= ~BIT(tid);
3438 spin_unlock_bh(&mvmsta->lock);
3440 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3442 if (old_state >= IWL_AGG_ON) {
3443 iwl_mvm_drain_sta(mvm, mvmsta, true);
3445 if (iwl_mvm_has_new_tx_api(mvm)) {
3446 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->deflink.sta_id,
3448 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3449 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3451 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
3452 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3453 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3456 iwl_mvm_drain_sta(mvm, mvmsta, false);
3458 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3464 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3466 int i, max = -1, max_offs = -1;
3468 lockdep_assert_held(&mvm->mutex);
3470 /* Pick the unused key offset with the highest 'deleted'
3471 * counter. Every time a key is deleted, all the counters
3472 * are incremented and the one that was just deleted is
3473 * reset to zero. Thus, the highest counter is the one
3474 * that was deleted longest ago. Pick that one.
3476 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3477 if (test_bit(i, mvm->fw_key_table))
3479 if (mvm->fw_key_deleted[i] > max) {
3480 max = mvm->fw_key_deleted[i];
3486 return STA_KEY_IDX_INVALID;
3491 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3492 struct ieee80211_vif *vif,
3493 struct ieee80211_sta *sta)
3495 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3498 return iwl_mvm_sta_from_mac80211(sta);
3501 * The device expects GTKs for station interfaces to be
3502 * installed as GTKs for the AP station. If we have no
3503 * station ID, then use AP's station ID.
3505 if (vif->type == NL80211_IFTYPE_STATION &&
3506 mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) {
3507 u8 sta_id = mvmvif->deflink.ap_sta_id;
3509 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3510 lockdep_is_held(&mvm->mutex));
3513 * It is possible that the 'sta' parameter is NULL,
3514 * for example when a GTK is removed - the sta_id will then
3515 * be the AP ID, and no station was passed by mac80211.
3517 if (IS_ERR_OR_NULL(sta))
3520 return iwl_mvm_sta_from_mac80211(sta);
3526 static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
3530 for (i = len - 1; i >= 0; i--) {
3531 if (pn1[i] > pn2[i])
3533 if (pn1[i] < pn2[i])
3540 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3542 struct ieee80211_key_conf *key, bool mcast,
3543 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3544 u8 key_offset, bool mfp)
3547 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3548 struct iwl_mvm_add_sta_key_cmd cmd;
3556 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3557 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3558 int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
3561 if (sta_id == IWL_MVM_INVALID_STA)
3564 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3565 STA_KEY_FLG_KEYID_MSK;
3566 key_flags = cpu_to_le16(keyidx);
3567 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3569 switch (key->cipher) {
3570 case WLAN_CIPHER_SUITE_TKIP:
3571 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3573 memcpy((void *)&u.cmd.tx_mic_key,
3574 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3577 memcpy((void *)&u.cmd.rx_mic_key,
3578 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3580 pn = atomic64_read(&key->tx_pn);
3583 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3584 for (i = 0; i < 5; i++)
3585 u.cmd_v1.tkip_rx_ttak[i] =
3586 cpu_to_le16(tkip_p1k[i]);
3588 memcpy(u.cmd.common.key, key->key, key->keylen);
3590 case WLAN_CIPHER_SUITE_CCMP:
3591 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3592 memcpy(u.cmd.common.key, key->key, key->keylen);
3594 pn = atomic64_read(&key->tx_pn);
3596 case WLAN_CIPHER_SUITE_WEP104:
3597 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3599 case WLAN_CIPHER_SUITE_WEP40:
3600 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3601 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3603 case WLAN_CIPHER_SUITE_GCMP_256:
3604 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3606 case WLAN_CIPHER_SUITE_GCMP:
3607 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3608 memcpy(u.cmd.common.key, key->key, key->keylen);
3610 pn = atomic64_read(&key->tx_pn);
3613 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3614 memcpy(u.cmd.common.key, key->key, key->keylen);
3618 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3620 key_flags |= cpu_to_le16(STA_KEY_MFP);
3622 u.cmd.common.key_offset = key_offset;
3623 u.cmd.common.key_flags = key_flags;
3624 u.cmd.common.sta_id = sta_id;
3626 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3631 for (; i < IEEE80211_NUM_TIDS; i++) {
3632 struct ieee80211_key_seq seq = {};
3633 u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
3635 /* there's a hole at 2/3 in FW format depending on version */
3636 int hole = api_ver >= 3 ? 0 : 2;
3638 ieee80211_get_key_rx_seq(key, i, &seq);
3640 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
3641 rx_pn[0] = seq.tkip.iv16;
3642 rx_pn[1] = seq.tkip.iv16 >> 8;
3643 rx_pn[2 + hole] = seq.tkip.iv32;
3644 rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
3645 rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
3646 rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
3647 } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
3649 rx_pn_len = seq.hw.seq_len;
3651 rx_pn[0] = seq.ccmp.pn[0];
3652 rx_pn[1] = seq.ccmp.pn[1];
3653 rx_pn[2 + hole] = seq.ccmp.pn[2];
3654 rx_pn[3 + hole] = seq.ccmp.pn[3];
3655 rx_pn[4 + hole] = seq.ccmp.pn[4];
3656 rx_pn[5 + hole] = seq.ccmp.pn[5];
3659 if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
3661 memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
3666 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3667 size = sizeof(u.cmd);
3669 size = sizeof(u.cmd_v1);
3672 status = ADD_STA_SUCCESS;
3673 if (cmd_flags & CMD_ASYNC)
3674 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3677 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3681 case ADD_STA_SUCCESS:
3682 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3686 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3693 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3694 struct ieee80211_key_conf *keyconf,
3695 u8 sta_id, bool remove_key)
3697 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3699 /* verify the key details match the required command's expectations */
3700 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3701 (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
3702 keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
3703 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3704 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3705 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3708 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3709 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3712 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3713 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3716 /* This is a valid situation for IGTK */
3717 if (sta_id == IWL_MVM_INVALID_STA)
3720 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3722 struct ieee80211_key_seq seq;
3725 switch (keyconf->cipher) {
3726 case WLAN_CIPHER_SUITE_AES_CMAC:
3727 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3729 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3730 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3731 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3737 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3738 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3739 igtk_cmd.ctrl_flags |=
3740 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3741 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3742 pn = seq.aes_cmac.pn;
3743 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3744 ((u64) pn[4] << 8) |
3745 ((u64) pn[3] << 16) |
3746 ((u64) pn[2] << 24) |
3747 ((u64) pn[1] << 32) |
3748 ((u64) pn[0] << 40));
3751 IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
3752 remove_key ? "removing" : "installing",
3753 keyconf->keyidx >= 6 ? "B" : "",
3754 keyconf->keyidx, igtk_cmd.sta_id);
3756 if (!iwl_mvm_has_new_rx_api(mvm)) {
3757 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3758 .ctrl_flags = igtk_cmd.ctrl_flags,
3759 .key_id = igtk_cmd.key_id,
3760 .sta_id = igtk_cmd.sta_id,
3761 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3764 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3765 ARRAY_SIZE(igtk_cmd_v1.igtk));
3766 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3767 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3769 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3770 sizeof(igtk_cmd), &igtk_cmd);
3774 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3775 struct ieee80211_vif *vif,
3776 struct ieee80211_sta *sta)
3778 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3783 if (vif->type == NL80211_IFTYPE_STATION &&
3784 mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) {
3785 u8 sta_id = mvmvif->deflink.ap_sta_id;
3786 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3787 lockdep_is_held(&mvm->mutex));
3788 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
3798 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3799 struct ieee80211_vif *vif,
3800 struct ieee80211_sta *sta,
3801 struct ieee80211_key_conf *keyconf,
3806 struct ieee80211_key_seq seq;
3812 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3814 sta_id = mvm_sta->deflink.sta_id;
3816 } else if (vif->type == NL80211_IFTYPE_AP &&
3817 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3818 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3820 sta_id = mvmvif->deflink.mcast_sta.sta_id;
3822 IWL_ERR(mvm, "Failed to find station id\n");
3826 if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
3827 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3829 IWL_ERR(mvm, "Failed to find mac address\n");
3833 /* get phase 1 key from mac80211 */
3834 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3835 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3837 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3838 seq.tkip.iv32, p1k, 0, key_offset,
3842 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3843 0, NULL, 0, key_offset, mfp);
3846 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3847 struct ieee80211_vif *vif,
3848 struct ieee80211_sta *sta,
3849 struct ieee80211_key_conf *keyconf,
3852 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3853 struct iwl_mvm_sta *mvm_sta;
3854 u8 sta_id = IWL_MVM_INVALID_STA;
3856 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3858 lockdep_assert_held(&mvm->mutex);
3860 if (vif->type != NL80211_IFTYPE_AP ||
3861 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3862 /* Get the station id from the mvm local station table */
3863 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3865 IWL_ERR(mvm, "Failed to find station\n");
3868 sta_id = mvm_sta->deflink.sta_id;
3871 * It is possible that the 'sta' parameter is NULL, and thus
3872 * there is a need to retrieve the sta from the local station
3876 sta = rcu_dereference_protected(
3877 mvm->fw_id_to_mac_id[sta_id],
3878 lockdep_is_held(&mvm->mutex));
3879 if (IS_ERR_OR_NULL(sta)) {
3880 IWL_ERR(mvm, "Invalid station id\n");
3885 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3888 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3890 sta_id = mvmvif->deflink.mcast_sta.sta_id;
3893 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3894 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3895 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3896 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3900 /* If the key_offset is not pre-assigned, we need to find a
3901 * new offset to use. In normal cases, the offset is not
3902 * pre-assigned, but during HW_RESTART we want to reuse the
3903 * same indices, so we pass them when this function is called.
3905 * In D3 entry, we need to hardcoded the indices (because the
3906 * firmware hardcodes the PTK offset to 0). In this case, we
3907 * need to make sure we don't overwrite the hw_key_idx in the
3908 * keyconf structure, because otherwise we cannot configure
3909 * the original ones back when resuming.
3911 if (key_offset == STA_KEY_IDX_INVALID) {
3912 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3913 if (key_offset == STA_KEY_IDX_INVALID)
3915 keyconf->hw_key_idx = key_offset;
3918 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3923 * For WEP, the same key is used for multicast and unicast. Upload it
3924 * again, using the same key offset, and now pointing the other one
3925 * to the same key slot (offset).
3926 * If this fails, remove the original as well.
3928 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3929 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3931 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3932 key_offset, !mcast);
3934 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3939 __set_bit(key_offset, mvm->fw_key_table);
3942 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3943 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3944 sta ? sta->addr : zero_addr, ret);
3948 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3949 struct ieee80211_vif *vif,
3950 struct ieee80211_sta *sta,
3951 struct ieee80211_key_conf *keyconf)
3953 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3954 struct iwl_mvm_sta *mvm_sta;
3955 u8 sta_id = IWL_MVM_INVALID_STA;
3958 lockdep_assert_held(&mvm->mutex);
3960 /* Get the station from the mvm local station table */
3961 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3963 sta_id = mvm_sta->deflink.sta_id;
3964 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3965 sta_id = iwl_mvm_vif_from_mac80211(vif)->deflink.mcast_sta.sta_id;
3968 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3969 keyconf->keyidx, sta_id);
3971 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3972 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3973 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3974 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3976 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3977 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3978 keyconf->hw_key_idx);
3982 /* track which key was deleted last */
3983 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3984 if (mvm->fw_key_deleted[i] < U8_MAX)
3985 mvm->fw_key_deleted[i]++;
3987 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3989 if (sta && !mvm_sta) {
3990 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3994 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3998 /* delete WEP key twice to get rid of (now useless) offset */
3999 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
4000 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
4001 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
4006 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
4007 struct ieee80211_vif *vif,
4008 struct ieee80211_key_conf *keyconf,
4009 struct ieee80211_sta *sta, u32 iv32,
4012 struct iwl_mvm_sta *mvm_sta;
4013 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
4014 bool mfp = sta ? sta->mfp : false;
4018 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
4019 if (WARN_ON_ONCE(!mvm_sta))
4021 iwl_mvm_send_sta_key(mvm, mvm_sta->deflink.sta_id, keyconf, mcast,
4022 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
4029 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
4030 struct ieee80211_sta *sta)
4032 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4033 struct iwl_mvm_add_sta_cmd cmd = {
4034 .add_modify = STA_MODE_MODIFY,
4035 .sta_id = mvmsta->deflink.sta_id,
4036 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
4037 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
4041 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4042 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4044 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4047 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
4048 struct ieee80211_sta *sta,
4049 enum ieee80211_frame_release_type reason,
4050 u16 cnt, u16 tids, bool more_data,
4051 bool single_sta_queue)
4053 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4054 struct iwl_mvm_add_sta_cmd cmd = {
4055 .add_modify = STA_MODE_MODIFY,
4056 .sta_id = mvmsta->deflink.sta_id,
4057 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
4058 .sleep_tx_count = cpu_to_le16(cnt),
4059 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
4062 unsigned long _tids = tids;
4064 /* convert TIDs to ACs - we don't support TSPEC so that's OK
4065 * Note that this field is reserved and unused by firmware not
4066 * supporting GO uAPSD, so it's safe to always do this.
4068 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
4069 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
4071 /* If we're releasing frames from aggregation or dqa queues then check
4072 * if all the queues that we're releasing frames from, combined, have:
4073 * - more frames than the service period, in which case more_data
4075 * - fewer than 'cnt' frames, in which case we need to adjust the
4076 * firmware command (but do that unconditionally)
4078 if (single_sta_queue) {
4079 int remaining = cnt;
4082 spin_lock_bh(&mvmsta->lock);
4083 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
4084 struct iwl_mvm_tid_data *tid_data;
4087 tid_data = &mvmsta->tid_data[tid];
4089 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
4090 if (n_queued > remaining) {
4095 remaining -= n_queued;
4097 sleep_tx_count = cnt - remaining;
4098 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
4099 mvmsta->sleep_tx_count = sleep_tx_count;
4100 spin_unlock_bh(&mvmsta->lock);
4102 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
4103 if (WARN_ON(cnt - remaining == 0)) {
4104 ieee80211_sta_eosp(sta);
4109 /* Note: this is ignored by firmware not supporting GO uAPSD */
4111 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
4113 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
4114 mvmsta->next_status_eosp = true;
4115 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
4117 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
4120 /* block the Tx queues until the FW updated the sleep Tx count */
4121 iwl_trans_block_txq_ptrs(mvm->trans, true);
4123 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
4124 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
4125 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4127 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4130 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
4131 struct iwl_rx_cmd_buffer *rxb)
4133 struct iwl_rx_packet *pkt = rxb_addr(rxb);
4134 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
4135 struct ieee80211_sta *sta;
4136 u32 sta_id = le32_to_cpu(notif->sta_id);
4138 if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
4142 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
4143 if (!IS_ERR_OR_NULL(sta))
4144 ieee80211_sta_eosp(sta);
4148 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
4149 struct iwl_mvm_sta *mvmsta,
4152 struct iwl_mvm_add_sta_cmd cmd = {
4153 .add_modify = STA_MODE_MODIFY,
4154 .sta_id = mvmsta->deflink.sta_id,
4155 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4156 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4157 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
4161 if (mvm->mld_api_is_used) {
4162 iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable);
4166 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4167 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4169 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4172 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
4173 struct ieee80211_sta *sta,
4176 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4178 if (mvm->mld_api_is_used) {
4179 iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable);
4183 spin_lock_bh(&mvm_sta->lock);
4185 if (mvm_sta->disable_tx == disable) {
4186 spin_unlock_bh(&mvm_sta->lock);
4190 mvm_sta->disable_tx = disable;
4193 * If sta PS state is handled by mac80211, tell it to start/stop
4194 * queuing tx for this station.
4196 if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
4197 ieee80211_sta_block_awake(mvm->hw, sta, disable);
4199 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
4201 spin_unlock_bh(&mvm_sta->lock);
4204 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
4205 struct iwl_mvm_vif *mvmvif,
4206 struct iwl_mvm_int_sta *sta,
4209 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
4210 struct iwl_mvm_add_sta_cmd cmd = {
4211 .add_modify = STA_MODE_MODIFY,
4212 .sta_id = sta->sta_id,
4213 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4214 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4215 .mac_id_n_color = cpu_to_le32(id),
4219 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4220 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4222 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4225 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
4226 struct iwl_mvm_vif *mvmvif,
4229 struct ieee80211_sta *sta;
4230 struct iwl_mvm_sta *mvm_sta;
4233 if (mvm->mld_api_is_used) {
4234 iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif, disable);
4240 /* Block/unblock all the stations of the given mvmvif */
4241 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
4242 sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
4243 if (IS_ERR_OR_NULL(sta))
4246 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4247 if (mvm_sta->mac_id_n_color !=
4248 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4251 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4256 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4259 /* Need to block/unblock also multicast station */
4260 if (mvmvif->deflink.mcast_sta.sta_id != IWL_MVM_INVALID_STA)
4261 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4262 &mvmvif->deflink.mcast_sta,
4266 * Only unblock the broadcast station (FW blocks it for immediate
4267 * quiet, not the driver)
4269 if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4270 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4271 &mvmvif->deflink.bcast_sta,
4275 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4277 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4278 struct iwl_mvm_sta *mvmsta;
4282 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->deflink.ap_sta_id);
4285 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4290 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4292 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4295 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
4296 * to align the wrap around of ssn so we compare relevant values.
4298 if (mvm->trans->trans_cfg->gen2)
4301 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4304 int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
4305 struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
4306 u8 *key, u32 key_len)
4310 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4311 struct ieee80211_key_conf *keyconf;
4312 unsigned int wdg_timeout =
4313 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
4314 bool mld = iwl_mvm_has_mld_api(mvm->fw);
4315 u32 type = mld ? STATION_TYPE_PEER : IWL_STA_LINK;
4317 ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
4318 NL80211_IFTYPE_UNSPECIFIED, type);
4323 ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, sta, addr,
4324 mvmvif->deflink.fw_link_id,
4329 ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id,
4330 mvmvif->color, addr, sta,
4332 IWL_MVM_TX_FIFO_BE);
4336 keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
4342 keyconf->cipher = cipher;
4343 memcpy(keyconf->key, key, key_len);
4344 keyconf->keylen = key_len;
4345 keyconf->flags = IEEE80211_KEY_FLAG_PAIRWISE;
4348 /* The MFP flag is set according to the station mfp field. Since
4349 * we don't have a station, set it manually.
4352 iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) |
4353 IWL_SEC_KEY_FLAG_MFP;
4354 u32 sta_mask = BIT(sta->sta_id);
4356 ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf);
4358 ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
4359 0, NULL, 0, 0, true);
4365 iwl_mvm_dealloc_int_sta(mvm, sta);
4369 void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
4370 struct ieee80211_vif *vif,
4373 struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
4374 .id = cpu_to_le32(id),
4378 ret = iwl_mvm_send_cmd_pdu(mvm,
4379 WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
4381 sizeof(cancel_channel_switch_cmd),
4382 &cancel_channel_switch_cmd);
4384 IWL_ERR(mvm, "Failed to cancel the channel switch\n");