1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2018 - 2021, 2023 Intel Corporation
5 #include <net/cfg80211.h>
10 static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
11 struct nlattr *ftmreq,
12 struct cfg80211_pmsr_request_peer *out,
13 struct genl_info *info)
15 const struct cfg80211_pmsr_capabilities *capa = rdev->wiphy.pmsr_capa;
16 struct nlattr *tb[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1];
17 u32 preamble = NL80211_PREAMBLE_DMG; /* only optional in DMG */
19 /* validate existing data */
20 if (!(rdev->wiphy.pmsr_capa->ftm.bandwidths & BIT(out->chandef.width))) {
21 NL_SET_ERR_MSG(info->extack, "FTM: unsupported bandwidth");
25 /* no validation needed - was already done via nested policy */
26 nla_parse_nested_deprecated(tb, NL80211_PMSR_FTM_REQ_ATTR_MAX, ftmreq,
29 if (tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE])
30 preamble = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]);
32 /* set up values - struct is 0-initialized */
33 out->ftm.requested = true;
35 switch (out->chandef.chan->band) {
36 case NL80211_BAND_60GHZ:
40 if (!tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]) {
41 NL_SET_ERR_MSG(info->extack,
42 "FTM: must specify preamble");
47 if (!(capa->ftm.preambles & BIT(preamble))) {
48 NL_SET_ERR_MSG_ATTR(info->extack,
49 tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE],
50 "FTM: invalid preamble");
54 out->ftm.preamble = preamble;
56 out->ftm.burst_period = 0;
57 if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
58 out->ftm.burst_period =
59 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
61 out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
62 if (out->ftm.asap && !capa->ftm.asap) {
63 NL_SET_ERR_MSG_ATTR(info->extack,
64 tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP],
65 "FTM: ASAP mode not supported");
69 if (!out->ftm.asap && !capa->ftm.non_asap) {
70 NL_SET_ERR_MSG(info->extack,
71 "FTM: non-ASAP mode not supported");
75 out->ftm.num_bursts_exp = 0;
76 if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
77 out->ftm.num_bursts_exp =
78 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
80 if (capa->ftm.max_bursts_exponent >= 0 &&
81 out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
82 NL_SET_ERR_MSG_ATTR(info->extack,
83 tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP],
84 "FTM: max NUM_BURSTS_EXP must be set lower than the device limit");
88 out->ftm.burst_duration = 15;
89 if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
90 out->ftm.burst_duration =
91 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
93 out->ftm.ftms_per_burst = 0;
94 if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
95 out->ftm.ftms_per_burst =
96 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]);
98 if (capa->ftm.max_ftms_per_burst &&
99 (out->ftm.ftms_per_burst > capa->ftm.max_ftms_per_burst ||
100 out->ftm.ftms_per_burst == 0)) {
101 NL_SET_ERR_MSG_ATTR(info->extack,
102 tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST],
103 "FTM: FTMs per burst must be set lower than the device limit but non-zero");
107 out->ftm.ftmr_retries = 3;
108 if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
109 out->ftm.ftmr_retries =
110 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
112 out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
113 if (out->ftm.request_lci && !capa->ftm.request_lci) {
114 NL_SET_ERR_MSG_ATTR(info->extack,
115 tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI],
116 "FTM: LCI request not supported");
119 out->ftm.request_civicloc =
120 !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC];
121 if (out->ftm.request_civicloc && !capa->ftm.request_civicloc) {
122 NL_SET_ERR_MSG_ATTR(info->extack,
123 tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC],
124 "FTM: civic location request not supported");
127 out->ftm.trigger_based =
128 !!tb[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED];
129 if (out->ftm.trigger_based && !capa->ftm.trigger_based) {
130 NL_SET_ERR_MSG_ATTR(info->extack,
131 tb[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED],
132 "FTM: trigger based ranging is not supported");
136 out->ftm.non_trigger_based =
137 !!tb[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED];
138 if (out->ftm.non_trigger_based && !capa->ftm.non_trigger_based) {
139 NL_SET_ERR_MSG_ATTR(info->extack,
140 tb[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED],
141 "FTM: trigger based ranging is not supported");
145 if (out->ftm.trigger_based && out->ftm.non_trigger_based) {
146 NL_SET_ERR_MSG(info->extack,
147 "FTM: can't set both trigger based and non trigger based");
151 if ((out->ftm.trigger_based || out->ftm.non_trigger_based) &&
152 out->ftm.preamble != NL80211_PREAMBLE_HE) {
153 NL_SET_ERR_MSG_ATTR(info->extack,
154 tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE],
155 "FTM: non EDCA based ranging must use HE preamble");
159 out->ftm.lmr_feedback =
160 !!tb[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK];
161 if (!out->ftm.trigger_based && !out->ftm.non_trigger_based &&
162 out->ftm.lmr_feedback) {
163 NL_SET_ERR_MSG_ATTR(info->extack,
164 tb[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK],
165 "FTM: LMR feedback set for EDCA based ranging");
169 if (tb[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR]) {
170 if (!out->ftm.non_trigger_based && !out->ftm.trigger_based) {
171 NL_SET_ERR_MSG_ATTR(info->extack,
172 tb[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR],
173 "FTM: BSS color set for EDCA based ranging");
178 nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR]);
184 static int pmsr_parse_peer(struct cfg80211_registered_device *rdev,
186 struct cfg80211_pmsr_request_peer *out,
187 struct genl_info *info)
189 struct nlattr *tb[NL80211_PMSR_PEER_ATTR_MAX + 1];
190 struct nlattr *req[NL80211_PMSR_REQ_ATTR_MAX + 1];
194 /* no validation needed - was already done via nested policy */
195 nla_parse_nested_deprecated(tb, NL80211_PMSR_PEER_ATTR_MAX, peer,
198 if (!tb[NL80211_PMSR_PEER_ATTR_ADDR] ||
199 !tb[NL80211_PMSR_PEER_ATTR_CHAN] ||
200 !tb[NL80211_PMSR_PEER_ATTR_REQ]) {
201 NL_SET_ERR_MSG_ATTR(info->extack, peer,
202 "insufficient peer data");
206 memcpy(out->addr, nla_data(tb[NL80211_PMSR_PEER_ATTR_ADDR]), ETH_ALEN);
208 /* reuse info->attrs */
209 memset(info->attrs, 0, sizeof(*info->attrs) * (NL80211_ATTR_MAX + 1));
210 err = nla_parse_nested_deprecated(info->attrs, NL80211_ATTR_MAX,
211 tb[NL80211_PMSR_PEER_ATTR_CHAN],
216 err = nl80211_parse_chandef(rdev, info, &out->chandef);
220 /* no validation needed - was already done via nested policy */
221 nla_parse_nested_deprecated(req, NL80211_PMSR_REQ_ATTR_MAX,
222 tb[NL80211_PMSR_PEER_ATTR_REQ], NULL,
225 if (!req[NL80211_PMSR_REQ_ATTR_DATA]) {
226 NL_SET_ERR_MSG_ATTR(info->extack,
227 tb[NL80211_PMSR_PEER_ATTR_REQ],
228 "missing request type/data");
232 if (req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF])
233 out->report_ap_tsf = true;
235 if (out->report_ap_tsf && !rdev->wiphy.pmsr_capa->report_ap_tsf) {
236 NL_SET_ERR_MSG_ATTR(info->extack,
237 req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF],
238 "reporting AP TSF is not supported");
242 nla_for_each_nested(treq, req[NL80211_PMSR_REQ_ATTR_DATA], rem) {
243 switch (nla_type(treq)) {
244 case NL80211_PMSR_TYPE_FTM:
245 err = pmsr_parse_ftm(rdev, treq, out, info);
248 NL_SET_ERR_MSG_ATTR(info->extack, treq,
249 "unsupported measurement type");
260 int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
262 struct nlattr *reqattr = info->attrs[NL80211_ATTR_PEER_MEASUREMENTS];
263 struct cfg80211_registered_device *rdev = info->user_ptr[0];
264 struct wireless_dev *wdev = info->user_ptr[1];
265 struct cfg80211_pmsr_request *req;
266 struct nlattr *peers, *peer;
267 int count, rem, err, idx;
269 if (!rdev->wiphy.pmsr_capa)
275 peers = nla_find(nla_data(reqattr), nla_len(reqattr),
276 NL80211_PMSR_ATTR_PEERS);
281 nla_for_each_nested(peer, peers, rem) {
284 if (count > rdev->wiphy.pmsr_capa->max_peers) {
285 NL_SET_ERR_MSG_ATTR(info->extack, peer,
286 "Too many peers used");
291 req = kzalloc(struct_size(req, peers, count), GFP_KERNEL);
294 req->n_peers = count;
296 if (info->attrs[NL80211_ATTR_TIMEOUT])
297 req->timeout = nla_get_u32(info->attrs[NL80211_ATTR_TIMEOUT]);
299 if (info->attrs[NL80211_ATTR_MAC]) {
300 if (!rdev->wiphy.pmsr_capa->randomize_mac_addr) {
301 NL_SET_ERR_MSG_ATTR(info->extack,
302 info->attrs[NL80211_ATTR_MAC],
303 "device cannot randomize MAC address");
308 err = nl80211_parse_random_mac(info->attrs, req->mac_addr,
313 memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN);
314 eth_broadcast_addr(req->mac_addr_mask);
318 nla_for_each_nested(peer, peers, rem) {
319 /* NB: this reuses info->attrs, but we no longer need it */
320 err = pmsr_parse_peer(rdev, peer, &req->peers[idx], info);
325 req->cookie = cfg80211_assign_cookie(rdev);
326 req->nl_portid = info->snd_portid;
328 err = rdev_start_pmsr(rdev, wdev, req);
332 list_add_tail(&req->list, &wdev->pmsr_list);
334 nl_set_extack_cookie_u64(info->extack, req->cookie);
341 void cfg80211_pmsr_complete(struct wireless_dev *wdev,
342 struct cfg80211_pmsr_request *req,
345 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
346 struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL;
350 trace_cfg80211_pmsr_complete(wdev->wiphy, wdev, req->cookie);
352 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
356 hdr = nl80211hdr_put(msg, 0, 0, 0,
357 NL80211_CMD_PEER_MEASUREMENT_COMPLETE);
361 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
362 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
366 if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie,
370 genlmsg_end(msg, hdr);
371 genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid);
376 spin_lock_bh(&wdev->pmsr_lock);
378 * cfg80211_pmsr_process_abort() may have already moved this request
379 * to the free list, and will free it later. In this case, don't free
382 list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) {
384 list_del(&req->list);
389 spin_unlock_bh(&wdev->pmsr_lock);
392 EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
394 static int nl80211_pmsr_send_ftm_res(struct sk_buff *msg,
395 struct cfg80211_pmsr_result *res)
397 if (res->status == NL80211_PMSR_STATUS_FAILURE) {
398 if (nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON,
399 res->ftm.failure_reason))
402 if (res->ftm.failure_reason ==
403 NL80211_PMSR_FTM_FAILURE_PEER_BUSY &&
404 res->ftm.busy_retry_time &&
405 nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME,
406 res->ftm.busy_retry_time))
412 #define PUT(tp, attr, val) \
414 if (nla_put_##tp(msg, \
415 NL80211_PMSR_FTM_RESP_ATTR_##attr, \
420 #define PUTOPT(tp, attr, val) \
422 if (res->ftm.val##_valid) \
423 PUT(tp, attr, val); \
426 #define PUT_U64(attr, val) \
428 if (nla_put_u64_64bit(msg, \
429 NL80211_PMSR_FTM_RESP_ATTR_##attr,\
431 NL80211_PMSR_FTM_RESP_ATTR_PAD)) \
435 #define PUTOPT_U64(attr, val) \
437 if (res->ftm.val##_valid) \
438 PUT_U64(attr, val); \
441 if (res->ftm.burst_index >= 0)
442 PUT(u32, BURST_INDEX, burst_index);
443 PUTOPT(u32, NUM_FTMR_ATTEMPTS, num_ftmr_attempts);
444 PUTOPT(u32, NUM_FTMR_SUCCESSES, num_ftmr_successes);
445 PUT(u8, NUM_BURSTS_EXP, num_bursts_exp);
446 PUT(u8, BURST_DURATION, burst_duration);
447 PUT(u8, FTMS_PER_BURST, ftms_per_burst);
448 PUTOPT(s32, RSSI_AVG, rssi_avg);
449 PUTOPT(s32, RSSI_SPREAD, rssi_spread);
450 if (res->ftm.tx_rate_valid &&
451 !nl80211_put_sta_rate(msg, &res->ftm.tx_rate,
452 NL80211_PMSR_FTM_RESP_ATTR_TX_RATE))
454 if (res->ftm.rx_rate_valid &&
455 !nl80211_put_sta_rate(msg, &res->ftm.rx_rate,
456 NL80211_PMSR_FTM_RESP_ATTR_RX_RATE))
458 PUTOPT_U64(RTT_AVG, rtt_avg);
459 PUTOPT_U64(RTT_VARIANCE, rtt_variance);
460 PUTOPT_U64(RTT_SPREAD, rtt_spread);
461 PUTOPT_U64(DIST_AVG, dist_avg);
462 PUTOPT_U64(DIST_VARIANCE, dist_variance);
463 PUTOPT_U64(DIST_SPREAD, dist_spread);
464 if (res->ftm.lci && res->ftm.lci_len &&
465 nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_LCI,
466 res->ftm.lci_len, res->ftm.lci))
468 if (res->ftm.civicloc && res->ftm.civicloc_len &&
469 nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC,
470 res->ftm.civicloc_len, res->ftm.civicloc))
482 static int nl80211_pmsr_send_result(struct sk_buff *msg,
483 struct cfg80211_pmsr_result *res)
485 struct nlattr *pmsr, *peers, *peer, *resp, *data, *typedata;
487 pmsr = nla_nest_start_noflag(msg, NL80211_ATTR_PEER_MEASUREMENTS);
491 peers = nla_nest_start_noflag(msg, NL80211_PMSR_ATTR_PEERS);
495 peer = nla_nest_start_noflag(msg, 1);
499 if (nla_put(msg, NL80211_PMSR_PEER_ATTR_ADDR, ETH_ALEN, res->addr))
502 resp = nla_nest_start_noflag(msg, NL80211_PMSR_PEER_ATTR_RESP);
506 if (nla_put_u32(msg, NL80211_PMSR_RESP_ATTR_STATUS, res->status) ||
507 nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_HOST_TIME,
508 res->host_time, NL80211_PMSR_RESP_ATTR_PAD))
511 if (res->ap_tsf_valid &&
512 nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_AP_TSF,
513 res->ap_tsf, NL80211_PMSR_RESP_ATTR_PAD))
516 if (res->final && nla_put_flag(msg, NL80211_PMSR_RESP_ATTR_FINAL))
519 data = nla_nest_start_noflag(msg, NL80211_PMSR_RESP_ATTR_DATA);
523 typedata = nla_nest_start_noflag(msg, res->type);
528 case NL80211_PMSR_TYPE_FTM:
529 if (nl80211_pmsr_send_ftm_res(msg, res))
536 nla_nest_end(msg, typedata);
537 nla_nest_end(msg, data);
538 nla_nest_end(msg, resp);
539 nla_nest_end(msg, peer);
540 nla_nest_end(msg, peers);
541 nla_nest_end(msg, pmsr);
548 void cfg80211_pmsr_report(struct wireless_dev *wdev,
549 struct cfg80211_pmsr_request *req,
550 struct cfg80211_pmsr_result *result,
553 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
558 trace_cfg80211_pmsr_report(wdev->wiphy, wdev, req->cookie,
562 * Currently, only variable items are LCI and civic location,
563 * both of which are reasonably short so we don't need to
564 * worry about them here for the allocation.
566 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
570 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PEER_MEASUREMENT_RESULT);
574 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
575 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
579 if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie,
583 err = nl80211_pmsr_send_result(msg, result);
585 pr_err_ratelimited("peer measurement result: message didn't fit!");
589 genlmsg_end(msg, hdr);
590 genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid);
595 EXPORT_SYMBOL_GPL(cfg80211_pmsr_report);
597 static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev)
599 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
600 struct cfg80211_pmsr_request *req, *tmp;
601 LIST_HEAD(free_list);
603 lockdep_assert_held(&wdev->mtx);
605 spin_lock_bh(&wdev->pmsr_lock);
606 list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) {
609 list_move_tail(&req->list, &free_list);
611 spin_unlock_bh(&wdev->pmsr_lock);
613 list_for_each_entry_safe(req, tmp, &free_list, list) {
614 rdev_abort_pmsr(rdev, wdev, req);
620 void cfg80211_pmsr_free_wk(struct work_struct *work)
622 struct wireless_dev *wdev = container_of(work, struct wireless_dev,
625 wiphy_lock(wdev->wiphy);
627 cfg80211_pmsr_process_abort(wdev);
629 wiphy_unlock(wdev->wiphy);
632 void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
634 struct cfg80211_pmsr_request *req;
637 spin_lock_bh(&wdev->pmsr_lock);
638 list_for_each_entry(req, &wdev->pmsr_list, list) {
642 spin_unlock_bh(&wdev->pmsr_lock);
645 cfg80211_pmsr_process_abort(wdev);
647 WARN_ON(!list_empty(&wdev->pmsr_list));
650 void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid)
652 struct cfg80211_pmsr_request *req;
654 spin_lock_bh(&wdev->pmsr_lock);
655 list_for_each_entry(req, &wdev->pmsr_list, list) {
656 if (req->nl_portid == portid) {
658 schedule_work(&wdev->pmsr_free_wk);
661 spin_unlock_bh(&wdev->pmsr_lock);