2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
27 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
28 void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
30 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_scan_ev_arg *arg);
32 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_mgmt_rx_ev_arg *arg);
34 int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
36 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_ch_info_ev_arg *arg);
38 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_vdev_start_ev_arg *arg);
40 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_peer_kick_ev_arg *arg);
42 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
43 struct wmi_swba_ev_arg *arg);
44 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_phyerr_hdr_arg *arg);
46 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
47 int left_len, struct wmi_phyerr_ev_arg *arg);
48 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
49 struct wmi_svc_rdy_ev_arg *arg);
50 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
51 struct wmi_rdy_ev_arg *arg);
52 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
53 struct ath10k_fw_stats *stats);
54 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
55 struct wmi_roam_ev_arg *arg);
56 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
57 struct wmi_wow_ev_arg *arg);
58 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
59 struct wmi_echo_ev_arg *arg);
60 int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
61 struct wmi_dfs_status_ev_arg *arg);
62 int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
63 struct wmi_svc_avail_ev_arg *arg);
65 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
67 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
68 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
69 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
70 u16 rd5g, u16 ctl2g, u16 ctl5g,
71 enum wmi_dfs_region dfs_reg);
72 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
74 struct sk_buff *(*gen_init)(struct ath10k *ar);
75 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
76 const struct wmi_start_scan_arg *arg);
77 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
78 const struct wmi_stop_scan_arg *arg);
79 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
80 enum wmi_vdev_type type,
81 enum wmi_vdev_subtype subtype,
82 const u8 macaddr[ETH_ALEN]);
83 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
84 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
85 const struct wmi_vdev_start_request_arg *arg,
87 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
88 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
90 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
91 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
92 u32 param_id, u32 param_value);
93 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
94 const struct wmi_vdev_install_key_arg *arg);
95 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
96 const struct wmi_vdev_spectral_conf_arg *arg);
97 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
98 u32 trigger, u32 enable);
99 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
100 const struct wmi_wmm_params_all_arg *arg);
101 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
102 const u8 peer_addr[ETH_ALEN],
103 enum wmi_peer_type peer_type);
104 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
105 const u8 peer_addr[ETH_ALEN]);
106 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
107 const u8 peer_addr[ETH_ALEN],
109 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
111 enum wmi_peer_param param_id,
113 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
114 const struct wmi_peer_assoc_complete_arg *arg);
115 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
116 enum wmi_sta_ps_mode psmode);
117 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
118 enum wmi_sta_powersave_param param_id,
120 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
122 enum wmi_ap_ps_peer_param param_id,
124 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
125 const struct wmi_scan_chan_list_arg *arg);
126 struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
128 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
129 const void *bcn, size_t bcn_len,
130 u32 bcn_paddr, bool dtim_zero,
132 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
133 const struct wmi_wmm_params_all_arg *arg);
134 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
135 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
136 enum wmi_force_fw_hang_type type,
138 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
139 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
142 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
144 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
145 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
146 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
147 u32 period, u32 duration,
150 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
151 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
153 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
154 const u8 *mac, u32 tid, u32 buf_size);
155 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
156 const u8 *mac, u32 tid,
158 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
159 const u8 *mac, u32 tid, u32 initiator,
161 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
162 u32 tim_ie_offset, struct sk_buff *bcn,
163 u32 prb_caps, u32 prb_erp,
164 void *prb_ies, size_t prb_ies_len);
165 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
166 struct sk_buff *bcn);
167 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
169 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
170 const u8 peer_addr[ETH_ALEN],
171 const struct wmi_sta_uapsd_auto_trig_arg *args,
173 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
174 const struct wmi_sta_keepalive_arg *arg);
175 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
176 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
177 enum wmi_wow_wakeup_event event,
179 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
180 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
186 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
188 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
190 enum wmi_tdls_state state);
191 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
192 const struct wmi_tdls_peer_update_cmd_arg *arg,
193 const struct wmi_tdls_peer_capab_arg *cap,
194 const struct wmi_channel_arg *chan);
195 struct sk_buff *(*gen_radar_found)
197 const struct ath10k_radar_found_info *arg);
198 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
199 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
201 void (*fw_stats_fill)(struct ath10k *ar,
202 struct ath10k_fw_stats *fw_stats,
204 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
208 struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
209 enum wmi_host_platform_type type,
210 u32 fw_feature_bitmap);
211 int (*get_vdev_subtype)(struct ath10k *ar,
212 enum wmi_vdev_subtype subtype);
213 struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
215 struct wmi_pno_scan_req *pno_scan);
216 struct sk_buff *(*gen_pdev_bss_chan_info_req)
218 enum wmi_bss_survey_req_type type);
219 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
220 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
222 struct sk_buff *(*gen_bb_timing)
224 const struct wmi_bb_timing_cfg_arg *arg);
228 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
231 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
233 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
236 ar->wmi.ops->rx(ar, skb);
241 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
244 if (!ar->wmi.ops->map_svc)
247 ar->wmi.ops->map_svc(in, out, len);
252 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
255 if (!ar->wmi.ops->map_svc_ext)
258 ar->wmi.ops->map_svc_ext(in, out, len);
263 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
264 struct wmi_scan_ev_arg *arg)
266 if (!ar->wmi.ops->pull_scan)
269 return ar->wmi.ops->pull_scan(ar, skb, arg);
273 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
274 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
276 if (!ar->wmi.ops->pull_mgmt_tx_compl)
279 return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
283 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
284 struct wmi_mgmt_rx_ev_arg *arg)
286 if (!ar->wmi.ops->pull_mgmt_rx)
289 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
293 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
294 struct wmi_ch_info_ev_arg *arg)
296 if (!ar->wmi.ops->pull_ch_info)
299 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
303 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
304 struct wmi_vdev_start_ev_arg *arg)
306 if (!ar->wmi.ops->pull_vdev_start)
309 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
313 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
314 struct wmi_peer_kick_ev_arg *arg)
316 if (!ar->wmi.ops->pull_peer_kick)
319 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
323 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
324 struct wmi_swba_ev_arg *arg)
326 if (!ar->wmi.ops->pull_swba)
329 return ar->wmi.ops->pull_swba(ar, skb, arg);
333 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
334 struct wmi_phyerr_hdr_arg *arg)
336 if (!ar->wmi.ops->pull_phyerr_hdr)
339 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
343 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
344 int left_len, struct wmi_phyerr_ev_arg *arg)
346 if (!ar->wmi.ops->pull_phyerr)
349 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
353 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
354 struct wmi_svc_rdy_ev_arg *arg)
356 if (!ar->wmi.ops->pull_svc_rdy)
359 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
363 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
364 struct wmi_rdy_ev_arg *arg)
366 if (!ar->wmi.ops->pull_rdy)
369 return ar->wmi.ops->pull_rdy(ar, skb, arg);
373 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
374 struct wmi_svc_avail_ev_arg *arg)
376 if (!ar->wmi.ops->pull_svc_avail)
378 return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
382 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
383 struct ath10k_fw_stats *stats)
385 if (!ar->wmi.ops->pull_fw_stats)
388 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
392 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
393 struct wmi_roam_ev_arg *arg)
395 if (!ar->wmi.ops->pull_roam_ev)
398 return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
402 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
403 struct wmi_wow_ev_arg *arg)
405 if (!ar->wmi.ops->pull_wow_event)
408 return ar->wmi.ops->pull_wow_event(ar, skb, arg);
412 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
413 struct wmi_echo_ev_arg *arg)
415 if (!ar->wmi.ops->pull_echo_ev)
418 return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
422 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
423 struct wmi_dfs_status_ev_arg *arg)
425 if (!ar->wmi.ops->pull_dfs_status_ev)
428 return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
431 static inline enum wmi_txbf_conf
432 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
434 if (!ar->wmi.ops->get_txbf_conf_scheme)
435 return WMI_TXBF_CONF_UNSUPPORTED;
437 return ar->wmi.ops->get_txbf_conf_scheme(ar);
441 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
447 if (!ar->wmi.ops->gen_mgmt_tx_send)
450 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
454 ret = ath10k_wmi_cmd_send(ar, skb,
455 ar->wmi.cmd->mgmt_tx_send_cmdid);
463 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
465 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
469 if (!ar->wmi.ops->gen_mgmt_tx)
472 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
476 ret = ath10k_wmi_cmd_send(ar, skb,
477 ar->wmi.cmd->mgmt_tx_cmdid);
481 /* FIXME There's no ACK event for Management Tx. This probably
482 * shouldn't be called here either.
484 info->flags |= IEEE80211_TX_STAT_ACK;
485 ieee80211_tx_status_irqsafe(ar->hw, msdu);
491 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
492 u16 ctl2g, u16 ctl5g,
493 enum wmi_dfs_region dfs_reg)
497 if (!ar->wmi.ops->gen_pdev_set_rd)
500 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
505 return ath10k_wmi_cmd_send(ar, skb,
506 ar->wmi.cmd->pdev_set_regdomain_cmdid);
510 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
514 if (!ar->wmi.ops->gen_pdev_suspend)
517 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
521 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
525 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
529 if (!ar->wmi.ops->gen_pdev_resume)
532 skb = ar->wmi.ops->gen_pdev_resume(ar);
536 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
540 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
544 if (!ar->wmi.ops->gen_pdev_set_param)
547 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
551 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
555 ath10k_wmi_cmd_init(struct ath10k *ar)
559 if (!ar->wmi.ops->gen_init)
562 skb = ar->wmi.ops->gen_init(ar);
566 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
570 ath10k_wmi_start_scan(struct ath10k *ar,
571 const struct wmi_start_scan_arg *arg)
575 if (!ar->wmi.ops->gen_start_scan)
578 skb = ar->wmi.ops->gen_start_scan(ar, arg);
582 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
586 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
590 if (!ar->wmi.ops->gen_stop_scan)
593 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
597 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
601 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
602 enum wmi_vdev_type type,
603 enum wmi_vdev_subtype subtype,
604 const u8 macaddr[ETH_ALEN])
608 if (!ar->wmi.ops->gen_vdev_create)
611 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
615 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
619 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
623 if (!ar->wmi.ops->gen_vdev_delete)
626 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
630 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
634 ath10k_wmi_vdev_start(struct ath10k *ar,
635 const struct wmi_vdev_start_request_arg *arg)
639 if (!ar->wmi.ops->gen_vdev_start)
642 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
646 return ath10k_wmi_cmd_send(ar, skb,
647 ar->wmi.cmd->vdev_start_request_cmdid);
651 ath10k_wmi_vdev_restart(struct ath10k *ar,
652 const struct wmi_vdev_start_request_arg *arg)
656 if (!ar->wmi.ops->gen_vdev_start)
659 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
663 return ath10k_wmi_cmd_send(ar, skb,
664 ar->wmi.cmd->vdev_restart_request_cmdid);
668 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
672 if (!ar->wmi.ops->gen_vdev_stop)
675 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
679 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
683 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
687 if (!ar->wmi.ops->gen_vdev_up)
690 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
694 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
698 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
702 if (!ar->wmi.ops->gen_vdev_down)
705 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
709 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
713 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
718 if (!ar->wmi.ops->gen_vdev_set_param)
721 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
726 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
730 ath10k_wmi_vdev_install_key(struct ath10k *ar,
731 const struct wmi_vdev_install_key_arg *arg)
735 if (!ar->wmi.ops->gen_vdev_install_key)
738 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
742 return ath10k_wmi_cmd_send(ar, skb,
743 ar->wmi.cmd->vdev_install_key_cmdid);
747 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
748 const struct wmi_vdev_spectral_conf_arg *arg)
753 if (!ar->wmi.ops->gen_vdev_spectral_conf)
756 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
760 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
761 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
765 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
771 if (!ar->wmi.ops->gen_vdev_spectral_enable)
774 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
779 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
780 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
784 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
785 const u8 peer_addr[ETH_ALEN],
786 const struct wmi_sta_uapsd_auto_trig_arg *args,
792 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
795 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
800 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
801 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
805 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
806 const struct wmi_wmm_params_all_arg *arg)
811 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
815 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
816 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
820 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
821 const u8 peer_addr[ETH_ALEN],
822 enum wmi_peer_type peer_type)
826 if (!ar->wmi.ops->gen_peer_create)
829 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
833 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
837 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
838 const u8 peer_addr[ETH_ALEN])
842 if (!ar->wmi.ops->gen_peer_delete)
845 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
849 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
853 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
854 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
858 if (!ar->wmi.ops->gen_peer_flush)
861 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
865 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
869 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
870 enum wmi_peer_param param_id, u32 param_value)
874 if (!ar->wmi.ops->gen_peer_set_param)
877 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
882 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
886 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
887 enum wmi_sta_ps_mode psmode)
891 if (!ar->wmi.ops->gen_set_psmode)
894 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
898 return ath10k_wmi_cmd_send(ar, skb,
899 ar->wmi.cmd->sta_powersave_mode_cmdid);
903 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
904 enum wmi_sta_powersave_param param_id, u32 value)
908 if (!ar->wmi.ops->gen_set_sta_ps)
911 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
915 return ath10k_wmi_cmd_send(ar, skb,
916 ar->wmi.cmd->sta_powersave_param_cmdid);
920 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
921 enum wmi_ap_ps_peer_param param_id, u32 value)
925 if (!ar->wmi.ops->gen_set_ap_ps)
928 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
932 return ath10k_wmi_cmd_send(ar, skb,
933 ar->wmi.cmd->ap_ps_peer_param_cmdid);
937 ath10k_wmi_scan_chan_list(struct ath10k *ar,
938 const struct wmi_scan_chan_list_arg *arg)
942 if (!ar->wmi.ops->gen_scan_chan_list)
945 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
949 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
953 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
958 prob_req_oui = (((u32)mac_addr[0]) << 16) |
959 (((u32)mac_addr[1]) << 8) | mac_addr[2];
961 if (!ar->wmi.ops->gen_scan_prob_req_oui)
964 skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
968 return ath10k_wmi_cmd_send(ar, skb,
969 ar->wmi.cmd->scan_prob_req_oui_cmdid);
973 ath10k_wmi_peer_assoc(struct ath10k *ar,
974 const struct wmi_peer_assoc_complete_arg *arg)
978 if (!ar->wmi.ops->gen_peer_assoc)
981 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
985 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
989 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
990 const void *bcn, size_t bcn_len,
991 u32 bcn_paddr, bool dtim_zero,
997 if (!ar->wmi.ops->gen_beacon_dma)
1000 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
1001 dtim_zero, deliver_cab);
1003 return PTR_ERR(skb);
1005 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
1006 ar->wmi.cmd->pdev_send_bcn_cmdid);
1016 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
1017 const struct wmi_wmm_params_all_arg *arg)
1019 struct sk_buff *skb;
1021 if (!ar->wmi.ops->gen_pdev_set_wmm)
1024 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
1026 return PTR_ERR(skb);
1028 return ath10k_wmi_cmd_send(ar, skb,
1029 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
1033 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
1035 struct sk_buff *skb;
1037 if (!ar->wmi.ops->gen_request_stats)
1040 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
1042 return PTR_ERR(skb);
1044 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
1048 ath10k_wmi_force_fw_hang(struct ath10k *ar,
1049 enum wmi_force_fw_hang_type type, u32 delay_ms)
1051 struct sk_buff *skb;
1053 if (!ar->wmi.ops->gen_force_fw_hang)
1056 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
1058 return PTR_ERR(skb);
1060 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
1064 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
1066 struct sk_buff *skb;
1068 if (!ar->wmi.ops->gen_dbglog_cfg)
1071 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
1073 return PTR_ERR(skb);
1075 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
1079 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
1081 struct sk_buff *skb;
1083 if (!ar->wmi.ops->gen_pktlog_enable)
1086 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
1088 return PTR_ERR(skb);
1090 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
1094 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
1096 struct sk_buff *skb;
1098 if (!ar->wmi.ops->gen_pktlog_disable)
1101 skb = ar->wmi.ops->gen_pktlog_disable(ar);
1103 return PTR_ERR(skb);
1105 return ath10k_wmi_cmd_send(ar, skb,
1106 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
1110 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
1111 u32 next_offset, u32 enabled)
1113 struct sk_buff *skb;
1115 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1118 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1119 next_offset, enabled);
1121 return PTR_ERR(skb);
1123 return ath10k_wmi_cmd_send(ar, skb,
1124 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1128 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1130 struct sk_buff *skb;
1132 if (!ar->wmi.ops->gen_pdev_get_temperature)
1135 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1137 return PTR_ERR(skb);
1139 return ath10k_wmi_cmd_send(ar, skb,
1140 ar->wmi.cmd->pdev_get_temperature_cmdid);
1144 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1146 struct sk_buff *skb;
1148 if (!ar->wmi.ops->gen_addba_clear_resp)
1151 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1153 return PTR_ERR(skb);
1155 return ath10k_wmi_cmd_send(ar, skb,
1156 ar->wmi.cmd->addba_clear_resp_cmdid);
1160 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1161 u32 tid, u32 buf_size)
1163 struct sk_buff *skb;
1165 if (!ar->wmi.ops->gen_addba_send)
1168 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1170 return PTR_ERR(skb);
1172 return ath10k_wmi_cmd_send(ar, skb,
1173 ar->wmi.cmd->addba_send_cmdid);
1177 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1178 u32 tid, u32 status)
1180 struct sk_buff *skb;
1182 if (!ar->wmi.ops->gen_addba_set_resp)
1185 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1187 return PTR_ERR(skb);
1189 return ath10k_wmi_cmd_send(ar, skb,
1190 ar->wmi.cmd->addba_set_resp_cmdid);
1194 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1195 u32 tid, u32 initiator, u32 reason)
1197 struct sk_buff *skb;
1199 if (!ar->wmi.ops->gen_delba_send)
1202 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1205 return PTR_ERR(skb);
1207 return ath10k_wmi_cmd_send(ar, skb,
1208 ar->wmi.cmd->delba_send_cmdid);
1212 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1213 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1214 void *prb_ies, size_t prb_ies_len)
1216 struct sk_buff *skb;
1218 if (!ar->wmi.ops->gen_bcn_tmpl)
1221 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1222 prb_caps, prb_erp, prb_ies,
1225 return PTR_ERR(skb);
1227 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1231 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1233 struct sk_buff *skb;
1235 if (!ar->wmi.ops->gen_prb_tmpl)
1238 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1240 return PTR_ERR(skb);
1242 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1246 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1248 struct sk_buff *skb;
1250 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1253 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1255 return PTR_ERR(skb);
1257 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1261 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1262 const struct wmi_sta_keepalive_arg *arg)
1264 struct sk_buff *skb;
1267 if (!ar->wmi.ops->gen_sta_keepalive)
1270 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1272 return PTR_ERR(skb);
1274 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1275 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1279 ath10k_wmi_wow_enable(struct ath10k *ar)
1281 struct sk_buff *skb;
1284 if (!ar->wmi.ops->gen_wow_enable)
1287 skb = ar->wmi.ops->gen_wow_enable(ar);
1289 return PTR_ERR(skb);
1291 cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1292 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1296 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1297 enum wmi_wow_wakeup_event event,
1300 struct sk_buff *skb;
1303 if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1306 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1308 return PTR_ERR(skb);
1310 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1311 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1315 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1317 struct sk_buff *skb;
1320 if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1323 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1325 return PTR_ERR(skb);
1327 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1328 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1332 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1333 const u8 *pattern, const u8 *mask,
1334 int pattern_len, int pattern_offset)
1336 struct sk_buff *skb;
1339 if (!ar->wmi.ops->gen_wow_add_pattern)
1342 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1343 pattern, mask, pattern_len,
1346 return PTR_ERR(skb);
1348 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1349 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1353 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1355 struct sk_buff *skb;
1358 if (!ar->wmi.ops->gen_wow_del_pattern)
1361 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1363 return PTR_ERR(skb);
1365 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1366 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1370 ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
1371 struct wmi_pno_scan_req *pno_scan)
1373 struct sk_buff *skb;
1376 if (!ar->wmi.ops->gen_wow_config_pno)
1379 skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
1381 return PTR_ERR(skb);
1383 cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
1384 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1388 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1389 enum wmi_tdls_state state)
1391 struct sk_buff *skb;
1393 if (!ar->wmi.ops->gen_update_fw_tdls_state)
1396 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1398 return PTR_ERR(skb);
1400 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1404 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1405 const struct wmi_tdls_peer_update_cmd_arg *arg,
1406 const struct wmi_tdls_peer_capab_arg *cap,
1407 const struct wmi_channel_arg *chan)
1409 struct sk_buff *skb;
1411 if (!ar->wmi.ops->gen_tdls_peer_update)
1414 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1416 return PTR_ERR(skb);
1418 return ath10k_wmi_cmd_send(ar, skb,
1419 ar->wmi.cmd->tdls_peer_update_cmdid);
1423 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1425 struct sk_buff *skb;
1427 if (!ar->wmi.ops->gen_adaptive_qcs)
1430 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1432 return PTR_ERR(skb);
1434 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1438 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1440 struct sk_buff *skb;
1442 if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1445 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1448 return PTR_ERR(skb);
1450 return ath10k_wmi_cmd_send(ar, skb,
1451 ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1455 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1458 if (!ar->wmi.ops->fw_stats_fill)
1461 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1466 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1467 u32 detect_level, u32 detect_margin)
1469 struct sk_buff *skb;
1471 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1474 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1479 return PTR_ERR(skb);
1481 return ath10k_wmi_cmd_send(ar, skb,
1482 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1486 ath10k_wmi_ext_resource_config(struct ath10k *ar,
1487 enum wmi_host_platform_type type,
1488 u32 fw_feature_bitmap)
1490 struct sk_buff *skb;
1492 if (!ar->wmi.ops->ext_resource_config)
1495 skb = ar->wmi.ops->ext_resource_config(ar, type,
1499 return PTR_ERR(skb);
1501 return ath10k_wmi_cmd_send(ar, skb,
1502 ar->wmi.cmd->ext_resource_cfg_cmdid);
1506 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1508 if (!ar->wmi.ops->get_vdev_subtype)
1511 return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1515 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1516 enum wmi_bss_survey_req_type type)
1518 struct ath10k_wmi *wmi = &ar->wmi;
1519 struct sk_buff *skb;
1521 if (!wmi->ops->gen_pdev_bss_chan_info_req)
1524 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1526 return PTR_ERR(skb);
1528 return ath10k_wmi_cmd_send(ar, skb,
1529 wmi->cmd->pdev_bss_chan_info_request_cmdid);
1533 ath10k_wmi_echo(struct ath10k *ar, u32 value)
1535 struct ath10k_wmi *wmi = &ar->wmi;
1536 struct sk_buff *skb;
1538 if (!wmi->ops->gen_echo)
1541 skb = wmi->ops->gen_echo(ar, value);
1543 return PTR_ERR(skb);
1545 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
1549 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
1551 struct sk_buff *skb;
1553 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
1556 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
1559 return PTR_ERR(skb);
1561 return ath10k_wmi_cmd_send(ar, skb,
1562 ar->wmi.cmd->pdev_get_tpc_table_cmdid);
1566 ath10k_wmi_report_radar_found(struct ath10k *ar,
1567 const struct ath10k_radar_found_info *arg)
1569 struct sk_buff *skb;
1571 if (!ar->wmi.ops->gen_radar_found)
1574 skb = ar->wmi.ops->gen_radar_found(ar, arg);
1576 return PTR_ERR(skb);
1578 return ath10k_wmi_cmd_send(ar, skb,
1579 ar->wmi.cmd->radar_found_cmdid);
1583 ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
1584 const struct wmi_bb_timing_cfg_arg *arg)
1586 struct sk_buff *skb;
1588 if (!ar->wmi.ops->gen_bb_timing)
1591 skb = ar->wmi.ops->gen_bb_timing(ar, arg);
1594 return PTR_ERR(skb);
1596 return ath10k_wmi_cmd_send(ar, skb,
1597 ar->wmi.cmd->set_bb_timing_cmdid);