ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+ ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC;
break;
case ATH10K_FW_WMI_OP_VERSION_10_4:
ar->max_num_peers = TARGET_10_4_NUM_PEERS;
const struct wmi_ops *ops;
const struct wmi_peer_flags_map *peer_flags;
+ u32 mgmt_max_num_pending_tx;
+
+ /* Protected by data_lock */
+ struct idr mgmt_pending_tx;
+
u32 num_mem_chunks;
u32 rx_decap_mode;
struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_WOW_PATTERNS 22
+#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50)
/* Target specific defines for WMI-HL-1.0 firmware */
#define TARGET_HL_10_TLV_NUM_PEERS 14
struct wmi_scan_ev_arg *arg);
int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg);
+ int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_ch_info_ev_arg *arg);
int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
}
static inline int
+ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_mgmt_tx_compl)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
+}
+
+static inline int
ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)
{
case WMI_TLV_TDLS_PEER_EVENTID:
ath10k_wmi_event_tdls_peer(ar, skb);
break;
+ case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
+ ath10k_wmi_event_mgmt_tx_compl(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
return 0;
}
+static int
+ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_mgmt_tx_compl_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
+
+ arg->desc_id = ev->desc_id;
+ arg->status = ev->status;
+ arg->pdev_id = ev->pdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)
return skb;
}
+static int
+ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
+ dma_addr_t paddr)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ int ret;
+
+ pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
+ if (!pkt_addr)
+ return -ENOMEM;
+
+ pkt_addr->vaddr = skb;
+ pkt_addr->paddr = paddr;
+
+ spin_lock_bh(&ar->data_lock);
+ ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
+ wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
+ return ret;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
dma_addr_t paddr)
u32 buf_len = msdu->len;
struct wmi_tlv *tlv;
struct sk_buff *skb;
+ int len, desc_id;
u32 vdev_id;
void *ptr;
- int len;
if (!cb->vif)
return ERR_PTR(-EINVAL);
if (!skb)
return ERR_PTR(-ENOMEM);
+ desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
+ if (desc_id < 0)
+ goto err_free_skb;
+
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
- cmd->desc_id = 0;
+ cmd->desc_id = __cpu_to_le32(desc_id);
cmd->chanfreq = 0;
cmd->buf_len = __cpu_to_le32(buf_len);
cmd->frame_len = __cpu_to_le32(msdu->len);
memcpy(ptr, msdu->data, buf_len);
return skb;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+ return ERR_PTR(desc_id);
}
static struct sk_buff *
.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
+ .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_TLV_MGMT_TX_COMPLETION_EVENTID,
WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
WMI_TLV_BA_RSP_SSN_EVENTID,
u8 value[0];
} __packed;
+struct ath10k_mgmt_tx_pkt_addr {
+ void *vaddr;
+ dma_addr_t paddr;
+};
+
+struct wmi_tlv_mgmt_tx_compl_ev {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+};
+
#define WMI_TLV_MGMT_RX_NUM_RSSI 4
struct wmi_tlv_mgmt_rx_ev {
return true;
}
+static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
+ u32 status)
+{
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *msdu;
+ int ret;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id);
+ if (!pkt_addr) {
+ ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
+ desc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+ msdu->len, DMA_FROM_DEVICE);
+ info = IEEE80211_SKB_CB(msdu);
+ info->flags |= status;
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ ret = 0;
+
+out:
+ idr_remove(&wmi->mgmt_pending_tx, desc_id);
+ spin_unlock_bh(&ar->data_lock);
+ return ret;
+}
+
+int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
+ int ret;
+
+ ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
+ return ret;
+ }
+
+ wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_id),
+ __le32_to_cpu(arg.status));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
+
+ return 0;
+}
+
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
INIT_WORK(&ar->radar_confirmation_work,
ath10k_radar_confirmation_work);
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ idr_init(&ar->wmi.mgmt_pending_tx);
+ }
+
return 0;
}
ar->wmi.num_mem_chunks = 0;
}
+static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
+ void *ctx)
+{
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
+ struct ath10k *ar = ctx;
+ struct sk_buff *msdu;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "force cleanup mgmt msdu_id %hu\n", msdu_id);
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+ msdu->len, DMA_FROM_DEVICE);
+ ieee80211_free_txskb(ar->hw, msdu);
+
+ return 0;
+}
+
void ath10k_wmi_detach(struct ath10k *ar)
{
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ spin_lock_bh(&ar->data_lock);
+ idr_for_each(&ar->wmi.mgmt_pending_tx,
+ ath10k_wmi_mgmt_tx_clean_up_pending, ar);
+ idr_destroy(&ar->wmi.mgmt_pending_tx);
+ spin_unlock_bh(&ar->data_lock);
+ }
+
cancel_work_sync(&ar->svc_rdy_work);
if (ar->svc_rdy_skb)
__le32 vdev_id;
};
+struct wmi_tlv_mgmt_tx_compl_ev_arg {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+};
+
struct wmi_mgmt_rx_ev_arg {
__le32 channel;
__le32 snr;
int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);