/*
* DQA queue numbers
*
+ * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
* @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
* @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
* that we are never left without the possibility to connect to an AP.
* @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
*/
enum iwl_mvm_dqa_txq {
+ IWL_MVM_DQA_CMD_QUEUE = 0,
IWL_MVM_DQA_GCAST_QUEUE = 3,
IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
*/
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
- mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
+ if (iwl_mvm_is_dqa_supported(mvm))
+ mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
+ else
+ mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
.exclude_vif = exclude_vif,
.used_hw_queues =
BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
- BIT(mvm->aux_queue) |
- BIT(IWL_MVM_CMD_QUEUE),
+ BIT(mvm->aux_queue),
};
+ if (iwl_mvm_is_dqa_supported(mvm))
+ data.used_hw_queues |= BIT(IWL_MVM_DQA_CMD_QUEUE);
+ else
+ data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE);
+
lockdep_assert_held(&mvm->mutex);
/* mark all VIF used hw queues */
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
- trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
+ if (iwl_mvm_is_dqa_supported(mvm))
+ trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+ else
+ trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
trans_cfg.scd_set_active = true;